summaryrefslogtreecommitdiff
path: root/scripts/gdb/linux/mm.py
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/gdb/linux/mm.py')
0 files changed, 0 insertions, 0 deletions
d' style='width: 0.0%;'/> -rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/cppc_acpi.c43
-rw-r--r--drivers/acpi/dock.c8
-rw-r--r--drivers/acpi/glue.c66
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/power.c104
-rw-r--r--drivers/acpi/pptt.c67
-rw-r--r--drivers/acpi/prmt.c35
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/resource.c56
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/sleep.c10
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/amba/bus.c100
-rw-r--r--drivers/android/binder.c41
-rw-r--r--drivers/android/binder_internal.h4
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/libata-core.c59
-rw-r--r--drivers/ata/libata-scsi.c52
-rw-r--r--drivers/ata/pata_ali.c4
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_optidma.c4
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/sata_mv.c4
-rw-r--r--drivers/base/arch_topology.c18
-rw-r--r--drivers/base/component.c6
-rw-r--r--drivers/base/core.c17
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile6
-rw-r--r--drivers/base/firmware_loader/builtin/main.c106
-rw-r--r--drivers/base/firmware_loader/firmware.h17
-rw-r--r--drivers/base/firmware_loader/main.c65
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/main.c14
-rw-r--r--drivers/base/power/power.h7
-rw-r--r--drivers/base/power/runtime.c6
-rw-r--r--drivers/base/power/wakeirq.c101
-rw-r--r--drivers/base/property.c63
-rw-r--r--drivers/base/regmap/regcache-rbtree.c7
-rw-r--r--drivers/base/regmap/regmap-mdio.c6
-rw-r--r--drivers/base/regmap/regmap-spi.c36
-rw-r--r--drivers/base/swnode.c6
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/Kconfig26
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c9
-rw-r--r--drivers/block/aoe/aoeblk.c19
-rw-r--r--drivers/block/ataflop.c110
-rw-r--r--drivers/block/brd.c12
-rw-r--r--drivers/block/cryptoloop.c206
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/floppy.c35
-rw-r--r--drivers/block/loop.c420
-rw-r--r--drivers/block/loop.h30
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/n64cart.c24
-rw-r--r--drivers/block/nbd.c176
-rw-r--r--drivers/block/null_blk/main.c195
-rw-r--r--drivers/block/null_blk/null_blk.h6
-rw-r--r--drivers/block/paride/pcd.c312
-rw-r--r--drivers/block/paride/pd.c148
-rw-r--r--drivers/block/paride/pf.c236
-rw-r--r--drivers/block/pktcdvd.c20
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rbd.c8
-rw-r--r--drivers/block/rnbd/rnbd-clt.c15
-rw-r--r--drivers/block/rnbd/rnbd-proto.h2
-rw-r--r--drivers/block/rsxx/core.c4
-rw-r--r--drivers/block/rsxx/dev.c19
-rw-r--r--drivers/block/swim.c36
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/sx8.c15
-rw-r--r--drivers/block/virtio_blk.c194
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/bluetooth/btintel.c239
-rw-r--r--drivers/bluetooth/btintel.h11
-rw-r--r--drivers/bluetooth/btmrvl_main.c6
-rw-r--r--drivers/bluetooth/btmtkuart.c13
-rw-r--r--drivers/bluetooth/btrsi.c1
-rw-r--r--drivers/bluetooth/btrtl.c26
-rw-r--r--drivers/bluetooth/btusb.c64
-rw-r--r--drivers/bluetooth/hci_h5.c35
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_qca.c5
-rw-r--r--drivers/bluetooth/hci_vhci.c122
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/brcmstb_gisb.c7
-rw-r--r--drivers/bus/fsl-mc/Makefile3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h39
-rw-r--r--drivers/bus/fsl-mc/obj-api.c103
-rw-r--r--drivers/bus/sun50i-de2.c7
-rw-r--r--drivers/bus/ti-sysc.c276
-rw-r--r--drivers/cdrom/cdrom.c63
-rw-r--r--drivers/cdrom/gdrom.c7
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c4
-rw-r--r--drivers/char/hw_random/meson-rng.c5
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/s390-trng.c4
-rw-r--r--drivers/char/hw_random/virtio-rng.c86
-rw-r--r--drivers/char/ipmi/Kconfig11
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c69
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c8
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c539
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c330
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c25
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c4
-rw-r--r--drivers/char/mem.c8
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm2-space.c3
-rw-r--r--drivers/char/tpm/tpm_tis_core.c26
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c1
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/char/xillybus/xillybus.h31
-rw-r--r--drivers/char/xillybus/xillybus_core.c131
-rw-r--r--drivers/char/xillybus/xillybus_of.c86
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c99
-rw-r--r--drivers/char/xillybus/xillyusb.c1
-rw-r--r--drivers/clk/at91/at91rm9200.c2
-rw-r--r--drivers/clk/at91/at91sam9260.c2
-rw-r--r--drivers/clk/at91/at91sam9g45.c2
-rw-r--r--drivers/clk/at91/at91sam9n12.c2
-rw-r--r--drivers/clk/at91/at91sam9rl.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c2
-rw-r--r--drivers/clk/at91/clk-generated.c46
-rw-r--r--drivers/clk/at91/clk-main.c66
-rw-r--r--drivers/clk/at91/clk-master.c463
-rw-r--r--drivers/clk/at91/clk-peripheral.c40
-rw-r--r--drivers/clk/at91/clk-pll.c39
-rw-r--r--drivers/clk/at91/clk-programmable.c29
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c174
-rw-r--r--drivers/clk/at91/clk-system.c20
-rw-r--r--drivers/clk/at91/clk-usb.c27
-rw-r--r--drivers/clk/at91/clk-utmi.c39
-rw-r--r--drivers/clk/at91/dt-compat.c2
-rw-r--r--drivers/clk/at91/pmc.c178
-rw-r--r--drivers/clk/at91/pmc.h29
-rw-r--r--drivers/clk/at91/sam9x60.c6
-rw-r--r--drivers/clk/at91/sama5d2.c2
-rw-r--r--drivers/clk/at91/sama5d3.c2
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/at91/sama7g5.c29
-rw-r--r--drivers/clk/clk-composite.c77
-rw-r--r--drivers/clk/clk.c5
-rw-r--r--drivers/clk/imx/Kconfig7
-rw-r--r--drivers/clk/imx/Makefile2
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c88
-rw-r--r--drivers/clk/imx/clk-composite-8m.c4
-rw-r--r--drivers/clk/imx/clk-imx6ul.c9
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c20
-rw-r--r--drivers/clk/imx/clk-imx8ulp.c569
-rw-r--r--drivers/clk/imx/clk-pfdv2.c23
-rw-r--r--drivers/clk/imx/clk-pllv4.c35
-rw-r--r--drivers/clk/imx/clk.h457
-rw-r--r--drivers/clk/mediatek/Kconfig28
-rw-r--r--drivers/clk/mediatek/Makefile8
-rw-r--r--drivers/clk/mediatek/clk-apmixed.c3
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c3
-rw-r--r--drivers/clk/mediatek/clk-gate.c8
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c145
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c92
-rw-r--r--drivers/clk/mediatek/clk-mt8195-cam.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ccu.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8195-img.c96
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c206
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ipe.c51
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c47
-rw-r--r--drivers/clk/mediatek/clk-mt8195-peri_ao.c62
-rw-r--r--drivers/clk/mediatek/clk-mt8195-scp_adsp.c47
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c1273
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdec.c104
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c123
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c140
-rw-r--r--drivers/clk/mediatek/clk-mt8195-venc.c69
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp0.c110
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp1.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8195-wpe.c143
-rw-r--r--drivers/clk/mediatek/clk-mtk.c29
-rw-r--r--drivers/clk/mediatek/clk-mtk.h1
-rw-r--r--drivers/clk/mediatek/clk-mux.c6
-rw-r--r--drivers/clk/mediatek/clk-pll.c6
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/meson/meson8b.c163
-rw-r--r--drivers/clk/meson/meson8b.h26
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c14
-rw-r--r--drivers/clk/qcom/Kconfig43
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/a53-pll.c4
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c2484
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c135
-rw-r--r--drivers/clk/qcom/common.c8
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c27
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c1384
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c705
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c3044
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c85
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c80
-rw-r--r--drivers/clk/qcom/gdsc.c51
-rw-r--r--drivers/clk/qcom/gdsc.h2
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c13
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c15
-rw-r--r--drivers/clk/qcom/kpss-xcc.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c216
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c183
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c75
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c27
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c191
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c83
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c83
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.h7
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c89
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c212
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h45
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c17
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c2
-rw-r--r--drivers/clk/samsung/Kconfig30
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-cpu.c18
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c4
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c124
-rw-r--r--drivers/clk/samsung/clk-exynos850.c835
-rw-r--r--drivers/clk/samsung/clk-pll.c196
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c4
-rw-r--r--drivers/clk/samsung/clk.c2
-rw-r--r--drivers/clk/samsung/clk.h26
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c96
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h1
-rw-r--r--drivers/clk/sunxi/clk-mod0.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c4
-rw-r--r--drivers/clk/ux500/Makefile3
-rw-r--r--drivers/clk/ux500/prcc.h19
-rw-r--r--drivers/clk/ux500/reset-prcc.c181
-rw-r--r--drivers/clk/ux500/reset-prcc.h23
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c30
-rw-r--r--drivers/clk/versatile/Kconfig3
-rw-r--r--drivers/clk/versatile/Makefile2
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/arc_timer.c6
-rw-r--r--drivers/clocksource/arm_arch_timer.c243
-rw-r--r--drivers/clocksource/timer-riscv.c9
-rw-r--r--drivers/comedi/drivers/dt9812.c115
-rw-r--r--drivers/comedi/drivers/ni_usb6501.c10
-rw-r--r--drivers/comedi/drivers/vmk80xx.c28
-rw-r--r--drivers/counter/104-quad-8.c699
-rw-r--r--drivers/counter/Kconfig6
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter-chrdev.c573
-rw-r--r--drivers/counter/counter-chrdev.h14
-rw-r--r--drivers/counter/counter-core.c191
-rw-r--r--drivers/counter/counter-sysfs.c959
-rw-r--r--drivers/counter/counter-sysfs.h13
-rw-r--r--drivers/counter/counter.c1496
-rw-r--r--drivers/counter/ftm-quaddec.c60
-rw-r--r--drivers/counter/intel-qep.c146
-rw-r--r--drivers/counter/interrupt-cnt.c62
-rw-r--r--drivers/counter/microchip-tcb-capture.c93
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c212
-rw-r--r--drivers/counter/stm32-timer-cnt.c195
-rw-r--r--drivers/counter/ti-eqep.c180
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c3
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c16
-rw-r--r--drivers/cpufreq/intel_pstate.c120
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c8
-rw-r--r--drivers/cpuidle/Kconfig.arm3
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c318
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c3
-rw-r--r--drivers/cpuidle/sysfs.c5
-rw-r--r--drivers/crypto/caam/caampkc.c19
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c5
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c3
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h14
-rw-r--r--drivers/crypto/hisilicon/qm.c74
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c2
-rw-r--r--drivers/crypto/img-hash.c7
-rw-r--r--drivers/crypto/keembay/Kconfig19
-rw-r--r--drivers/crypto/keembay/Makefile2
-rw-r--r--drivers/crypto/keembay/keembay-ocs-ecc.c1017
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c1
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c35
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h10
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c89
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h13
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c87
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h12
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h29
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h9
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c98
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h27
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c190
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c238
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h9
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c30
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c123
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h14
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/sa2ul.c13
-rw-r--r--drivers/cxl/cxl.h61
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c12
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c187
-rw-r--r--drivers/dma-buf/dma-fence.c13
-rw-r--r--drivers/dma-buf/dma-resv.c442
-rw-r--r--drivers/dma-buf/heaps/system_heap.c5
-rw-r--r--drivers/dma-buf/seqno-fence.c71
-rw-r--r--drivers/dma/pxa_dma.c3
-rw-r--r--drivers/edac/al_mc_edac.c12
-rw-r--r--drivers/edac/amd64_edac.c22
-rw-r--r--drivers/edac/edac_mc.c42
-rw-r--r--drivers/edac/edac_mc_sysfs.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/edac/ti_edac.c7
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-axp288.c31
-rw-r--r--drivers/extcon/extcon-max3355.c1
-rw-r--r--drivers/extcon/extcon-usb-gpio.c3
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c163
-rw-r--r--drivers/firewire/core-cdev.c32
-rw-r--r--drivers/firewire/net.c14
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_ffa/driver.c53
-rw-r--r--drivers/firmware/cirrus/Kconfig5
-rw-r--r--drivers/firmware/cirrus/Makefile3
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c3109
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/firmware/qcom_scm.c6
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c26
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c7
-rw-r--r--drivers/firmware/xilinx/zynqmp.c17
-rw-r--r--drivers/fsi/fsi-occ.c218
-rw-r--r--drivers/fsi/fsi-sbefifo.c28
-rw-r--r--drivers/gpio/gpio-amdpt.c4
-rw-r--r--drivers/gpio/gpio-mlxbf2.c5
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c5
-rw-r--r--drivers/gpu/drm/Kconfig11
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c256
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c871
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c669
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c755
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c394
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c403
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c382
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c346
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c51
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c79
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c267
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c153
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c109
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c192
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1024
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h103
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c34
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c71
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c258
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c1107
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c1822
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c962
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c496
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c361
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h302
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c164
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c316
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c630
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c1307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c263
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c616
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h222
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c752
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h241
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c345
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h162
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c (renamed from drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h (renamed from drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c236
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c390
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c166
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c256
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c246
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h (renamed from drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c291
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c259
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h97
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h87
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c374
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h275
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c21
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c16
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h17
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h12
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h86
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c32
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h5
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h32
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h37
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h6193
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h132
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h151
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h952
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h355
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/include/soc15_hw_ip.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c22
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c135
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c119
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c117
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c96
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c33
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c9
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c27
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c18
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c15
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c27
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c21
-rw-r--r--drivers/gpu/drm/bridge/panel.c37
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c292
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c25
-rw-r--r--drivers/gpu/drm/drm_bridge.c78
-rw-r--r--drivers/gpu/drm/drm_cache.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c83
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c42
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c42
-rw-r--r--drivers/gpu/drm/drm_edid.c367
-rw-r--r--drivers/gpu/drm/drm_format_helper.c88
-rw-r--r--drivers/gpu/drm/drm_fourcc.c3
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c3
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c25
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c1
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c11
-rw-r--r--drivers/gpu/drm/drm_lease.c39
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c81
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c3
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c61
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c119
-rw-r--r--drivers/gpu/drm/drm_property.c9
-rw-r--r--drivers/gpu/drm/drm_sysfs.c87
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/gma500/backlight.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c24
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c22
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c16
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c14
-rw-r--r--drivers/gpu/drm/gma500/gtt.c18
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c10
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c12
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c11
-rw-r--r--drivers/gpu/drm/gma500/mmu.c12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c20
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c18
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c14
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/opregion.c14
-rw-r--r--drivers/gpu/drm/gma500/power.c20
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c16
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c147
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h24
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c31
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c26
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c2
-rw-r--r--drivers/gpu/drm/gud/Kconfig2
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c6
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h12
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c6
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile36
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c90
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c1776
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c420
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c348
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c535
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c672
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2509
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c780
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c467
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c674
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c437
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c606
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c274
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c292
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c321
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c1833
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c283
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c476
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c225
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h6
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c58
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c53
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c57
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c514
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h58
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c75
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c823
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c33
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c70
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h57
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c91
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c70
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c201
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c206
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c48
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c29
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c36
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c190
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.h14
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c47
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c7
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h153
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c150
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c104
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h (renamed from drivers/gpu/drm/i915/gt/debugfs_gt.h)18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c (renamed from drivers/gpu/drm/i915/gt/debugfs_engines.c)10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c (renamed from drivers/gpu/drm/i915/gt/debugfs_gt_pm.c)197
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c176
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c262
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h2
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h119
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c60
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h34
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c2298
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c93
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c127
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c179
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c45
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h8
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c286
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h168
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_ww.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c42
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c94
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h51
-rw-r--r--drivers/gpu/drm/i915/i915_module.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c14
-rw-r--r--drivers/gpu/drm/i915/i915_pci.h12
-rw-r--r--drivers/gpu/drm/i915/i915_query.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h180
-rw-r--r--drivers/gpu/drm/i915/i915_request.c157
-rw-r--r--drivers/gpu/drm/i915/i915_request.h49
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h19
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c20
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h13
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c36
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c12
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c235
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c307
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c73
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c577
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c447
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h20
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h12
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c299
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h64
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c141
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c78
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c101
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.h32
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c46
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h24
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c175
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c172
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.h17
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h36
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h83
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c266
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h (renamed from drivers/gpu/drm/i915/intel_sideband.h)34
-rw-r--r--drivers/gpu/drm/kmb/kmb_crtc.c41
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h10
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c25
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.h2
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c43
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.h6
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c28
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c4
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c27
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c35
-rw-r--r--drivers/gpu/drm/msm/Kconfig6
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c256
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c147
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c267
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h92
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c89
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c12
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c64
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c294
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c143
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c138
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h14
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c147
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c66
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c24
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c33
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c33
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h31
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c35
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c41
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c1
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig27
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c743
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c1896
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c12
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c85
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c320
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1098
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c33
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c48
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c42
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c108
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c36
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c4
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c3
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c26
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c140
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c62
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c199
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c21
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/gem.c3
-rw-r--r--drivers/gpu/drm/tegra/plane.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/tiny/Kconfig4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c71
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c48
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c42
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c69
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/Kconfig2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h30
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c473
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c44
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c5
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c342
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c44
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c195
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c32
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/gpu/drm/zte/Kconfig10
-rw-r--r--drivers/gpu/drm/zte/Makefile10
-rw-r--r--drivers/gpu/drm/zte/zx_common_regs.h28
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c184
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h34
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c760
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h66
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c537
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h120
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c400
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc_regs.h27
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c527
-rw-r--r--drivers/gpu/drm/zte/zx_vga_regs.h33
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c921
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h64
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h212
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c31
-rw-r--r--drivers/hid/hid-cp2112.c14
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-kone.h12
-rw-r--r--drivers/hid/surface-hid/surface_hid.c4
-rw-r--r--drivers/hsi/clients/ssi_protocol.c4
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c72
-rw-r--r--drivers/hv/channel_mgmt.c34
-rw-r--r--drivers/hv/connection.c101
-rw-r--r--drivers/hv/hv.c82
-rw-r--r--drivers/hv/hv_common.c12
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/ring_buffer.c57
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru3.c6
-rw-r--r--drivers/hwmon/acpi_power_meter.c13
-rw-r--r--drivers/hwmon/ad7414.c4
-rw-r--r--drivers/hwmon/ad7418.c6
-rw-r--r--drivers/hwmon/adm1021.c4
-rw-r--r--drivers/hwmon/adm1025.c4
-rw-r--r--drivers/hwmon/adm1026.c4
-rw-r--r--drivers/hwmon/adm1029.c4
-rw-r--r--drivers/hwmon/adm1031.c6
-rw-r--r--drivers/hwmon/adt7310.c3
-rw-r--r--drivers/hwmon/adt7410.c3
-rw-r--r--drivers/hwmon/adt7x10.c3
-rw-r--r--drivers/hwmon/adt7x10.h2
-rw-r--r--drivers/hwmon/amc6821.c8
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asb100.c4
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/atxp1.c10
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c103
-rw-r--r--drivers/hwmon/dme1737.c4
-rw-r--r--drivers/hwmon/ds1621.c4
-rw-r--r--drivers/hwmon/ds620.c4
-rw-r--r--drivers/hwmon/emc6w201.c4
-rw-r--r--drivers/hwmon/f71805f.c4
-rw-r--r--drivers/hwmon/f71882fg.c4
-rw-r--r--drivers/hwmon/f75375s.c4
-rw-r--r--drivers/hwmon/fschmd.c4
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gl518sm.c4
-rw-r--r--drivers/hwmon/gl520sm.c4
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/hwmon/i5500_temp.c114
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c4
-rw-r--r--drivers/hwmon/it87.c12
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/lm63.c6
-rw-r--r--drivers/hwmon/lm77.c4
-rw-r--r--drivers/hwmon/lm78.c4
-rw-r--r--drivers/hwmon/lm80.c6
-rw-r--r--drivers/hwmon/lm83.c4
-rw-r--r--drivers/hwmon/lm85.c4
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/lm90.c75
-rw-r--r--drivers/hwmon/lm92.c4
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c8
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4215.c2
-rw-r--r--drivers/hwmon/ltc4261.c4
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1619.c4
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max31722.c8
-rw-r--r--drivers/hwmon/max6620.c514
-rw-r--r--drivers/hwmon/max6639.c4
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/mlxreg-fan.c138
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/nct6775.c717
-rw-r--r--drivers/hwmon/nct7802.c131
-rw-r--r--drivers/hwmon/occ/common.c30
-rw-r--r--drivers/hwmon/occ/common.h3
-rw-r--r--drivers/hwmon/occ/p8_i2c.c15
-rw-r--r--drivers/hwmon/occ/p9_sbe.c90
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c23
-rw-r--r--drivers/hwmon/pmbus/lm25066.c88
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c2
-rw-r--r--drivers/hwmon/sch5636.c4
-rw-r--r--drivers/hwmon/sht21.c4
-rw-r--r--drivers/hwmon/sis5595.c4
-rw-r--r--drivers/hwmon/smm665.c2
-rw-r--r--drivers/hwmon/smsc47b397.c4
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/hwmon/thmc50.c4
-rw-r--r--drivers/hwmon/tmp103.c105
-rw-r--r--drivers/hwmon/tmp401.c31
-rw-r--r--drivers/hwmon/tmp421.c186
-rw-r--r--drivers/hwmon/via686a.c4
-rw-r--r--drivers/hwmon/vt1211.c4
-rw-r--r--drivers/hwmon/vt8231.c4
-rw-r--r--drivers/hwmon/w83627ehf.c8
-rw-r--r--drivers/hwmon/w83627hf.c6
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/hwmon/w83791d.c4
-rw-r--r--drivers/hwmon/w83792d.c6
-rw-r--r--drivers/hwmon/w83793.c6
-rw-r--r--drivers/hwmon/w83795.c6
-rw-r--r--drivers/hwmon/w83l785ts.c4
-rw-r--r--drivers/hwmon/w83l786ng.c4
-rw-r--r--drivers/hwmon/xgene-hwmon.c35
-rw-r--r--drivers/hwtracing/coresight/Kconfig13
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c56
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c101
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-self-hosted-trace.h33
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c52
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c534
-rw-r--r--drivers/i2c/busses/i2c-virtio.c56
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c33
-rw-r--r--drivers/i2c/i2c-core-acpi.c22
-rw-r--r--drivers/idle/intel_idle.c13
-rw-r--r--drivers/iio/accel/Kconfig62
-rw-r--r--drivers/iio/accel/Makefile6
-rw-r--r--drivers/iio/accel/adxl313.h54
-rw-r--r--drivers/iio/accel/adxl313_core.c332
-rw-r--r--drivers/iio/accel/adxl313_i2c.c66
-rw-r--r--drivers/iio/accel/adxl313_spi.c92
-rw-r--r--drivers/iio/accel/adxl355.h21
-rw-r--r--drivers/iio/accel/adxl355_core.c765
-rw-r--r--drivers/iio/accel/adxl355_i2c.c62
-rw-r--r--drivers/iio/accel/adxl355_spi.c65
-rw-r--r--drivers/iio/accel/adxl372.c1
-rw-r--r--drivers/iio/accel/bma400.h2
-rw-r--r--drivers/iio/accel/bma400_core.c7
-rw-r--r--drivers/iio/accel/bma400_i2c.c4
-rw-r--r--drivers/iio/accel/bma400_spi.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c5
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c4
-rw-r--r--drivers/iio/accel/bmc150-accel.h2
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c4
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c4
-rw-r--r--drivers/iio/accel/bmi088-accel.h2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c347
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c4
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/kxsd9.c4
-rw-r--r--drivers/iio/accel/kxsd9.h2
-rw-r--r--drivers/iio/accel/mma7455.h2
-rw-r--r--drivers/iio/accel/mma7455_core.c4
-rw-r--r--drivers/iio/accel/mma7455_i2c.c4
-rw-r--r--drivers/iio/accel/mma7455_spi.c4
-rw-r--r--drivers/iio/accel/mma7660.c2
-rw-r--r--drivers/iio/accel/sca3000.c3
-rw-r--r--drivers/iio/accel/st_accel_core.c31
-rw-r--r--drivers/iio/accel/st_accel_i2c.c23
-rw-r--r--drivers/iio/accel/st_accel_spi.c23
-rw-r--r--drivers/iio/adc/Kconfig18
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c22
-rw-r--r--drivers/iio/adc/ad7291.c70
-rw-r--r--drivers/iio/adc/ad7949.c254
-rw-r--r--drivers/iio/adc/ad799x.c68
-rw-r--r--drivers/iio/adc/aspeed_adc.c598
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c598
-rw-r--r--drivers/iio/adc/axp288_adc.c28
-rw-r--r--drivers/iio/adc/berlin2-adc.c34
-rw-r--r--drivers/iio/adc/da9150-gpadc.c27
-rw-r--r--drivers/iio/adc/ep93xx_adc.c4
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c55
-rw-r--r--drivers/iio/adc/imx7d_adc.c18
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c494
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c24
-rw-r--r--drivers/iio/adc/lp8788_adc.c31
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c75
-rw-r--r--drivers/iio/adc/max1027.c278
-rw-r--r--drivers/iio/adc/max1118.c7
-rw-r--r--drivers/iio/adc/max1241.c17
-rw-r--r--drivers/iio/adc/max1363.c82
-rw-r--r--drivers/iio/adc/meson_saradc.c39
-rw-r--r--drivers/iio/adc/nau7802.c50
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c9
-rw-r--r--drivers/iio/adc/rn5t618-adc.c13
-rw-r--r--drivers/iio/adc/rockchip_saradc.c31
-rw-r--r--drivers/iio/adc/stm32-adc-core.c1
-rw-r--r--drivers/iio/adc/stm32-adc-core.h10
-rw-r--r--drivers/iio/adc/stm32-adc.c422
-rw-r--r--drivers/iio/adc/ti-adc108s102.c11
-rw-r--r--drivers/iio/adc/ti-adc128s052.c33
-rw-r--r--drivers/iio/adc/ti-ads7950.c4
-rw-r--r--drivers/iio/adc/ti-ads8344.c27
-rw-r--r--drivers/iio/adc/ti-tsc2046.c2
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c6
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c5
-rw-r--r--drivers/iio/adc/xilinx-xadc.h1
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c8
-rw-r--r--drivers/iio/buffer/kfifo_buf.c50
-rw-r--r--drivers/iio/chemical/Kconfig24
-rw-r--r--drivers/iio/chemical/Makefile2
-rw-r--r--drivers/iio/chemical/scd4x.c696
-rw-r--r--drivers/iio/chemical/sunrise_co2.c537
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c5
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c48
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c53
-rw-r--r--drivers/iio/dac/ad5064.c49
-rw-r--r--drivers/iio/dac/ad5380.c15
-rw-r--r--drivers/iio/dac/ad5446.c21
-rw-r--r--drivers/iio/dac/ad5592r-base.c4
-rw-r--r--drivers/iio/dac/ad5592r-base.h2
-rw-r--r--drivers/iio/dac/ad5592r.c4
-rw-r--r--drivers/iio/dac/ad5593r.c4
-rw-r--r--drivers/iio/dac/ad5686-spi.c4
-rw-r--r--drivers/iio/dac/ad5686.c4
-rw-r--r--drivers/iio/dac/ad5686.h2
-rw-r--r--drivers/iio/dac/ad5696-i2c.c4
-rw-r--r--drivers/iio/dac/ad5766.c48
-rw-r--r--drivers/iio/dac/ad5770r.c2
-rw-r--r--drivers/iio/dac/ad7303.c47
-rw-r--r--drivers/iio/dac/ad8801.c11
-rw-r--r--drivers/iio/dac/ds4424.c9
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c14
-rw-r--r--drivers/iio/dac/ltc1660.c7
-rw-r--r--drivers/iio/dac/max5821.c9
-rw-r--r--drivers/iio/dac/mcp4922.c7
-rw-r--r--drivers/iio/dac/stm32-dac-core.c18
-rw-r--r--drivers/iio/dac/ti-dac7311.c7
-rw-r--r--drivers/iio/frequency/Kconfig12
-rw-r--r--drivers/iio/frequency/Makefile1
-rw-r--r--drivers/iio/frequency/adrf6780.c527
-rw-r--r--drivers/iio/gyro/Kconfig1
-rw-r--r--drivers/iio/gyro/adis16080.c11
-rw-r--r--drivers/iio/gyro/mpu3050-core.c24
-rw-r--r--drivers/iio/gyro/st_gyro_core.c27
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c23
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c23
-rw-r--r--drivers/iio/health/afe4403.c14
-rw-r--r--drivers/iio/health/afe4404.c8
-rw-r--r--drivers/iio/iio_core.h4
-rw-r--r--drivers/iio/imu/adis.c17
-rw-r--r--drivers/iio/imu/adis16400.c20
-rw-r--r--drivers/iio/imu/adis16460.c18
-rw-r--r--drivers/iio/imu/adis16475.c19
-rw-r--r--drivers/iio/imu/adis_trigger.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c36
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c22
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h1
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c29
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c6
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c6
-rw-r--r--drivers/iio/industrialio-buffer.c201
-rw-r--r--drivers/iio/industrialio-core.c10
-rw-r--r--drivers/iio/inkern.c17
-rw-r--r--drivers/iio/light/cm3605.c29
-rw-r--r--drivers/iio/light/cm36651.c7
-rw-r--r--drivers/iio/light/gp2ap002.c24
-rw-r--r--drivers/iio/light/ltr501.c37
-rw-r--r--drivers/iio/light/max44000.c17
-rw-r--r--drivers/iio/light/noa1305.c7
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/ak8975.c35
-rw-r--r--drivers/iio/magnetometer/hmc5843.h2
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c4
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c29
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c23
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c23
-rw-r--r--drivers/iio/multiplexer/iio-mux.c7
-rw-r--r--drivers/iio/potentiometer/max5487.c7
-rw-r--r--drivers/iio/pressure/ms5611.h2
-rw-r--r--drivers/iio/pressure/ms5611_core.c4
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c27
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c23
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c27
-rw-r--r--drivers/iio/temperature/Kconfig10
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/ltc2983.c16
-rw-r--r--drivers/iio/temperature/max31865.c349
-rw-r--r--drivers/infiniband/core/cma.c34
-rw-r--r--drivers/infiniband/core/cma_priv.h11
-rw-r--r--drivers/infiniband/core/counters.c40
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/iwpm_util.c2
-rw-r--r--drivers/infiniband/core/nldev.c278
-rw-r--r--drivers/infiniband/core/rw.c66
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/sysfs.c58
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c54
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c3
-rw-r--r--drivers/infiniband/core/verbs.c49
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h19
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c380
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h30
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c45
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c15
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c57
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h33
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h85
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c22
-rw-r--r--drivers/infiniband/hw/efa/efa.h23
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h100
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h41
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c164
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h38
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c35
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h10
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c182
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h7
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c213
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c3
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c3
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c10
-rw-r--r--drivers/infiniband/hw/hfi1/init.c3
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h76
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c314
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h71
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c53
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c142
-rw-r--r--drivers/infiniband/hw/irdma/cm.h12
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c43
-rw-r--r--drivers/infiniband/hw/irdma/hw.c7
-rw-r--r--drivers/infiniband/hw/irdma/main.h5
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h1
-rw-r--r--drivers/infiniband/hw/irdma/protos.h2
-rw-r--r--drivers/infiniband/hw/irdma/trace_cm.h8
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c105
-rw-r--r--drivers/infiniband/hw/irdma/user.h32
-rw-r--r--drivers/infiniband/hw/irdma/utils.c49
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c154
-rw-r--r--drivers/infiniband/hw/irdma/ws.c13
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c46
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c26
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c283
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c13
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h2
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c187
-rw-r--r--drivers/infiniband/hw/mlx5/main.c55
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h59
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c111
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c79
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c10
-rw-r--r--drivers/infiniband/hw/qedr/main.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c30
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h1
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c33
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c28
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c42
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c267
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c41
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h292
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c65
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c50
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c139
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h60
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Kconfig4
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Makefile3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c49
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c11
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h13
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c31
-rw-r--r--drivers/input/misc/axp20x-pek.c26
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c263
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h56
-rw-r--r--drivers/interconnect/qcom/msm8916.c1214
-rw-r--r--drivers/interconnect/qcom/msm8939.c1283
-rw-r--r--drivers/interconnect/qcom/qcs404.c967
-rw-r--r--drivers/interconnect/qcom/sdm660.c1940
-rw-r--r--drivers/interconnect/samsung/Kconfig6
-rw-r--r--drivers/iommu/amd/init.c16
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/amd/iommu_v2.c3
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/ipack/devices/ipoctal.c48
-rw-r--r--drivers/irqchip/Kconfig25
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c20
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c13
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c2
-rw-r--r--drivers/irqchip/irq-ativic32.c22
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c6
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c47
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c21
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c16
-rw-r--r--drivers/irqchip/irq-clps711x.c8
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c2
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c4
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c2
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c2
-rw-r--r--drivers/irqchip/irq-ftintc010.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-ixp4xx.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-mchp-eic.c280
-rw-r--r--drivers/irqchip/irq-meson-gpio.c15
-rw-r--r--drivers/irqchip/irq-mips-gic.c37
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c4
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-nvic.c17
-rw-r--r--drivers/irqchip/irq-omap-intc.c2
-rw-r--r--drivers/irqchip/irq-or1k-pic.c2
-rw-r--r--drivers/irqchip/irq-orion.c4
-rw-r--r--drivers/irqchip/irq-rda-intc.c2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-sa11x0.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/irqchip/irq-ts4800.c4
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/irqchip/irq-vt8500.c2
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c8
-rw-r--r--drivers/leds/led-class-flash.c2
-rw-r--r--drivers/leds/led-triggers.c41
-rw-r--r--drivers/leds/trigger/Kconfig1
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/apple-mailbox.c384
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c4
-rw-r--r--drivers/mailbox/hi3660-mailbox.c4
-rw-r--r--drivers/mailbox/hi6220-mailbox.c7
-rw-r--r--drivers/mailbox/imx-mailbox.c124
-rw-r--r--drivers/mailbox/mailbox-altera.c5
-rw-r--r--drivers/mailbox/mailbox-sti.c4
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c15
-rw-r--r--drivers/mailbox/omap-mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c598
-rw-r--r--drivers/mailbox/platform_mhu.c4
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c31
-rw-r--r--drivers/mailbox/stm32-ipcc.c4
-rw-r--r--drivers/mailbox/sun6i-msgbox.c9
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/bcache_ondisk.h445
-rw-r--r--drivers/md/bcache/bset.h2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/debug.c15
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h3
-rw-r--r--drivers/md/bcache/io.c16
-rw-r--r--drivers/md/bcache/request.c19
-rw-r--r--drivers/md/bcache/request.h4
-rw-r--r--drivers/md/bcache/super.c91
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/sysfs.h18
-rw-r--r--drivers/md/bcache/util.h29
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-bio-record.h1
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-crypt.c1
-rw-r--r--drivers/md/dm-dust.c5
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-flakey.c3
-rw-r--r--drivers/md/dm-ima.c1
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-ps-historical-service-time.c1
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c172
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c42
-rw-r--r--drivers/md/md.c130
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/cec/Kconfig4
-rw-r--r--drivers/media/cec/core/cec-pin.c4
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c4
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c4
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c4
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c4
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.c7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c150
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c199
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c40
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c59
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c31
-rw-r--r--drivers/media/dvb-core/dvb_net.c8
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c9
-rw-r--r--drivers/media/dvb-frontends/cxd2099.h9
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c4
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c18
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.h9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h4
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_regs.h10
-rw-r--r--drivers/media/dvb-frontends/mxl692.c9
-rw-r--r--drivers/media/dvb-frontends/mxl692.h9
-rw-r--r--drivers/media/dvb-frontends/mxl692_defs.h9
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/dvb-frontends/stv0910.c9
-rw-r--r--drivers/media/dvb-frontends/stv0910.h9
-rw-r--r--drivers/media/dvb-frontends/stv6111.c9
-rw-r--r--drivers/media/dvb-frontends/stv6111.h9
-rw-r--r--drivers/media/firewire/firedtv-avc.c14
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/i2c/Kconfig27
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/adv7604.c15
-rw-r--r--drivers/media/i2c/dw9714.c14
-rw-r--r--drivers/media/i2c/hi846.c2190
-rw-r--r--drivers/media/i2c/imx258.c12
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/max9286.c17
-rw-r--r--drivers/media/i2c/mt9p031.c80
-rw-r--r--drivers/media/i2c/ov13858.c11
-rw-r--r--drivers/media/i2c/ov13b10.c1491
-rw-r--r--drivers/media/i2c/ov5670.c11
-rw-r--r--drivers/media/i2c/ov8856.c83
-rw-r--r--drivers/media/i2c/st-mipid02.c22
-rw-r--r--drivers/media/i2c/tda1997x.c131
-rw-r--r--drivers/media/i2c/tda1997x_regs.h3
-rw-r--r--drivers/media/i2c/video-i2c.c21
-rw-r--r--drivers/media/mc/Kconfig8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c4
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c13
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c4
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c60
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h9
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c274
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c18
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c22
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c19
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c10
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c29
-rw-r--r--drivers/media/pci/pluto2/pluto2.c20
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c53
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c29
-rw-r--r--drivers/media/pci/saa7134/saa7134.h1
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c2
-rw-r--r--drivers/media/platform/Kconfig20
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c311
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.c23
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.h10
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.c74
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.h200
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.c202
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.h189
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c23
-rw-r--r--drivers/media/platform/aspeed-video.c133
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c29
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h2
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c17
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c54
-rw-r--r--drivers/media/platform/atmel/atmel-sama7g5-isc.c37
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c18
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c4
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c3
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c9
-rw-r--r--drivers/media/platform/davinci/vpif.c5
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c21
-rw-r--r--drivers/media/platform/davinci/vpss.c10
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c3
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c20
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.c109
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.h2
-rw-r--r--drivers/media/platform/imx-pxp.c4
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c9
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c10
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c6
-rw-r--r--drivers/media/platform/meson/ge2d/ge2d.c10
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c820
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h27
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c65
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c628
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c360
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h59
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c148
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c75
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c774
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h23
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c43
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h5
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c5
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c18
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap3isp/isp.c21
-rw-r--r--drivers/media/platform/pxa_camera.c26
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c18
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.c135
-rw-r--r--drivers/media/platform/qcom/venus/core.h9
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c81
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c48
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h14
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c13
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c13
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c67
-rw-r--r--drivers/media/platform/qcom/venus/venc.c116
-rw-r--r--drivers/media/platform/rcar-isp.c515
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c959
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c241
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c40
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c25
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h25
-rw-r--r--drivers/media/platform/rcar_drif.c17
-rw-r--r--drivers/media/platform/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/renesas-ceu.c33
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h44
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c98
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c29
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c557
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h406
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c107
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c6
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c9
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c1
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c37
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c16
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c33
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c8
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal.c16
-rw-r--r--drivers/media/platform/via-camera.c6
-rw-r--r--drivers/media/platform/video-mux.c17
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h11
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c17
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/rc/Kconfig8
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c4
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/ir_toy.c63
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/meson-ir-tx.c1
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c4
-rw-r--r--drivers/media/rc/sir_ir.c438
-rw-r--r--drivers/media/rc/st_rc.c5
-rw-r--r--drivers/media/rc/streamzap.c1
-rw-r--r--drivers/media/rc/sunxi-cir.c4
-rw-r--r--drivers/media/spi/cxd2880-spi.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c4
-rw-r--r--drivers/media/test-drivers/vim2m.c5
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c366
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c341
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.h9
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c52
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h23
-rw-r--r--drivers/media/tuners/mxl5007t.c9
-rw-r--r--drivers/media/tuners/tuner-types.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c16
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c1
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c12
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c5
-rw-r--r--drivers/media/usb/gspca/gl860/gl860-mi1320.c87
-rw-r--r--drivers/media/usb/gspca/gl860/gl860-ov9655.c169
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov7660.h1
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c22
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.c25
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c11
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c3
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c10
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c260
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c16
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c103
-rw-r--r--drivers/media/usb/uvc/uvc_video.c5
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h17
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c168
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c83
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/memory/Kconfig5
-rw-r--r--drivers/memory/fsl_ifc.c13
-rw-r--r--drivers/memory/jedec_ddr.h47
-rw-r--r--drivers/memory/jedec_ddr_data.c41
-rw-r--r--drivers/memory/mtk-smi.c596
-rw-r--r--drivers/memory/of_memory.c87
-rw-r--r--drivers/memory/of_memory.h9
-rw-r--r--drivers/memory/renesas-rpc-if.c159
-rw-r--r--drivers/memory/samsung/Kconfig13
-rw-r--r--drivers/memory/tegra/Kconfig1
-rw-r--r--drivers/memory/tegra/mc.c25
-rw-r--r--drivers/memory/tegra/tegra186-emc.c5
-rw-r--r--drivers/memory/tegra/tegra20-emc.c200
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c2
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c6
-rw-r--r--drivers/memory/tegra/tegra30-emc.c4
-rw-r--r--drivers/memstick/core/ms_block.c8
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/memstick/host/jmb38x_ms.c5
-rw-r--r--drivers/memstick/host/r592.c8
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c3
-rw-r--r--drivers/misc/ad525x_dpot-spi.c3
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/enclosure.c16
-rw-r--r--drivers/misc/fastrpc.c21
-rw-r--r--drivers/misc/genwqe/card_utils.c10
-rw-r--r--drivers/misc/habanalabs/Kconfig2
-rw-r--r--drivers/misc/habanalabs/common/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c105
-rw-r--r--drivers/misc/habanalabs/common/context.c8
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c51
-rw-r--r--drivers/misc/habanalabs/common/device.c159
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c28
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h64
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c24
-rw-r--r--drivers/misc/habanalabs/common/hwmgr.c (renamed from drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c)38
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c194
-rw-r--r--drivers/misc/habanalabs/common/irq.c5
-rw-r--r--drivers/misc/habanalabs/common/memory.c515
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c30
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c6
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c22
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h4
-rw-r--r--drivers/misc/habanalabs/goya/goya.c13
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h22
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h189
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h10
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h1
-rw-r--r--drivers/misc/hisi_hikey_usb.c119
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c4
-rw-r--r--drivers/misc/lkdtm/bugs.c77
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h1
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/pxp/Kconfig13
-rw-r--r--drivers/misc/mei/pxp/Makefile7
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c229
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.h18
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c9
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c26
-rw-r--r--drivers/misc/pvpanic/pvpanic.c16
-rw-r--r--drivers/misc/sgi-xp/xpnet.c9
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/tifm_core.c8
-rw-r--r--drivers/mmc/core/block.c27
-rw-r--r--drivers/mmc/core/crypto.c11
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/slot-gpio.c42
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/cqhci-core.c7
-rw-r--r--drivers/mmc/host/cqhci-crypto.c33
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c26
-rw-r--r--drivers/mmc/host/dw_mmc.c42
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c29
-rw-r--r--drivers/mmc/host/mtk-sd.c137
-rw-r--r--drivers/mmc/host/mxs-mmc.c10
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-acpi.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c33
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c29
-rw-r--r--drivers/mmc/host/sdhci-omap.c322
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c159
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.h5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c1
-rw-r--r--drivers/mmc/host/sdhci-sprd.c13
-rw-r--r--drivers/mmc/host/sdhci.c48
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c17
-rw-r--r--drivers/mmc/host/vub300.c18
-rw-r--r--drivers/most/most_usb.c5
-rw-r--r--drivers/mtd/mtd_blkdevs.c6
-rw-r--r--drivers/mtd/mtdsuper.c1
-rw-r--r--drivers/mux/core.c38
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/amt.c3296
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c3
-rw-r--r--drivers/net/arcnet/arc-rimi.c5
-rw-r--r--drivers/net/arcnet/arcdevice.h5
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020.c4
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bareudp.c7
-rw-r--r--drivers/net/bonding/bond_alb.c28
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/dev/bittiming.c30
-rw-r--r--drivers/net/can/dev/netlink.c221
-rw-r--r--drivers/net/can/flexcan.c68
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c14
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c6
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/rcar/rcar_can.c20
-rw-r--r--drivers/net/can/sja1000/peak_pci.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.h2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c7
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.h2
-rw-r--r--drivers/net/can/usb/gs_usb.c12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c13
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c17
-rw-r--r--drivers/net/can/xilinx_can.c7
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_common.c101
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/bcm_sf2.c12
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c44
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c8
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c5
-rw-r--r--drivers/net/dsa/ocelot/felix.c4
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c8
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c8
-rw-r--r--drivers/net/dsa/qca/ar9331.c10
-rw-r--r--drivers/net/dsa/qca8k.c435
-rw-r--r--drivers/net/dsa/qca8k.h35
-rw-r--r--drivers/net/dsa/realtek-smi-core.c4
-rw-r--r--drivers/net/dsa/realtek-smi-core.h4
-rw-r--r--drivers/net/dsa/rtl8365mb.c1982
-rw-r--r--drivers/net/dsa/rtl8366.c96
-rw-r--r--drivers/net/dsa/rtl8366rb.c301
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h29
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c35
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c91
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c144
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c15
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c5
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c11
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/8390/apne.c3
-rw-r--r--drivers/net/ethernet/8390/ax88796.c12
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c3
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c22
-rw-r--r--drivers/net/ethernet/8390/stnic.c5
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c20
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c5
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/asix/Kconfig35
-rw-r--r--drivers/net/ethernet/asix/Makefile6
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.c239
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.h26
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c1164
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h568
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c115
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.h69
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c12
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c283
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h113
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c444
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c785
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c400
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h155
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c87
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c202
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c60
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/macb.h7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c42
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c40
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c15
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c13
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c35
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c5
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/ethoc.c28
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c58
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c332
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c37
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve.h52
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c61
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h15
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h13
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c109
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c413
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c68
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c117
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c84
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c37
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c591
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c13
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c12
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c46
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c666
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h10
-rw-r--r--drivers/net/ethernet/intel/Kconfig14
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c31
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h48
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c238
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h215
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c131
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c225
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c259
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c655
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c236
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c307
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c864
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1645
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c375
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c389
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2830
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c1369
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h162
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c326
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c447
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/korina.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c21
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c74
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c75
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c117
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h138
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h958
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c266
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c225
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c234
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c273
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h69
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c219
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1064
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h47
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c161
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c114
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c7
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c99
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c668
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.c)106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.h)9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c)17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c611
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h173
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c372
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h357
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c583
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c662
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c15
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c14
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c16
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c39
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c91
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c155
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c75
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c96
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c3
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c327
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c125
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c11
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c51
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c5
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c41
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c264
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h49
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c92
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c241
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c121
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h143
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h1491
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1389
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c126
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h12339
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h222
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c405
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c98
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h286
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h135
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h765
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2474
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h223
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c201
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h311
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c53
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c21
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c24
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c7
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c45
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c59
-rw-r--r--drivers/net/ethernet/renesas/ravb.h52
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c728
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c37
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c14
-rw-r--r--drivers/net/ethernet/sis/sis190.c10
-rw-r--r--drivers/net/ethernet/sis/sis900.c19
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c15
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c22
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c26
-rw-r--r--drivers/net/ethernet/socionext/netsec.c46
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c46
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c15
-rw-r--r--drivers/net/ethernet/sun/sunhme.c23
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c26
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c17
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c11
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c11
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c10
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c11
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c14
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
-rw-r--r--drivers/net/fddi/defxx.c12
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c9
-rw-r--r--drivers/net/fddi/skfp/smtinit.c4
-rw-r--r--drivers/net/fjes/fjes_hw.c3
-rw-r--r--drivers/net/fjes/fjes_hw.h2
-rw-r--r--drivers/net/fjes/fjes_main.c14
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/baycom_epp.c10
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/hamradio/dmascc.c5
-rw-r--r--drivers/net/hamradio/hdlcdrv.c4
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/hamradio/scc.c7
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hippi/rrunner.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ifb.c5
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c4
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/netdevsim/bus.c155
-rw-r--r--drivers/net/netdevsim/dev.c204
-rw-r--r--drivers/net/netdevsim/ethtool.c28
-rw-r--r--drivers/net/netdevsim/health.c32
-rw-r--r--drivers/net/netdevsim/netdev.c72
-rw-r--r--drivers/net/netdevsim/netdevsim.h57
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/phy/at803x.c778
-rw-r--r--drivers/net/phy/bcm7xxx.c203
-rw-r--r--drivers/net/phy/broadcom.c106
-rw-r--r--drivers/net/phy/dp83867.c23
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/marvell10g.c107
-rw-r--r--drivers/net/phy/mdio_bus.c28
-rw-r--r--drivers/net/phy/micrel.c107
-rw-r--r--drivers/net/phy/microchip_t1.c239
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/phy-c45.c35
-rw-r--r--drivers/net/phy/phy.c140
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c142
-rw-r--r--drivers/net/phy/realtek.c8
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/plip/plip.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/rionet.c14
-rw-r--r--drivers/net/sb1000.c12
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/thunderbolt.c8
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/aqc111.c4
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c12
-rw-r--r--drivers/net/usb/catc.c24
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/ch9200.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c5
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/kaweth.c3
-rw-r--r--drivers/net/usb/lan78xx.c10
-rw-r--r--drivers/net/usb/mcs7830.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c6
-rw-r--r--drivers/net/usb/smsc75xx.c9
-rw-r--r--drivers/net/usb/smsc95xx.c9
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/sr9800.c7
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c54
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vrf.c32
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c45
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c77
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c73
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h49
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c27
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c4344
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h226
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c282
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c36
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h24
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1445
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c349
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c162
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h107
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c105
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c10
-rw-r--r--drivers/net/wireless/ath/spectral_common.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h38
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c55
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c189
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c147
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c19
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c27
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c252
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c362
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c269
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c182
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c306
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c90
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c5
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c163
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c18
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c384
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c16
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c296
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c357
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c542
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c170
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c652
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c366
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c1192
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c96
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c776
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c328
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c448
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h179
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c348
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c317
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c220
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c135
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c197
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h (renamed from drivers/net/wireless/mediatek/mt76/mt7615/sdio.h)33
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c (renamed from drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c)134
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c84
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c11
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c31
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c14
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c91
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c134
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/realtek/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c14
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h49
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c119
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c753
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c47
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig50
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile25
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c695
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h165
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c5716
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h181
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c2502
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h3384
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2489
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c188
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c1641
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1378
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c3836
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h860
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c676
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c3060
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h630
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c2868
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h311
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h16
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h2159
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2036
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h109
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c3911
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c1607
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h133
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c48725
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c190
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h26
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c491
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h358
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h17
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c74
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c17
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h11
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h15
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c9
-rw-r--r--drivers/net/wireless/wl3501_cs.c3
-rw-r--r--drivers/net/wireless/zydas/zd1201.c9
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/wwan/Kconfig1
-rw-r--r--drivers/net/wwan/iosm/Makefile5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c125
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h59
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c321
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.h205
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.c594
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.h229
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c107
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h18
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c317
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h49
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netfront.c12
-rw-r--r--drivers/nfc/fdp/i2c.c1
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/mei.c6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c4
-rw-r--r--drivers/nfc/pn533/i2c.c6
-rw-r--r--drivers/nfc/pn533/pn533.c6
-rw-r--r--drivers/nfc/pn533/pn533.h4
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/pn544/mei.c8
-rw-r--r--drivers/nfc/port100.c4
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c29
-rw-r--r--drivers/nfc/s3fwrn5/nci.c18
-rw-r--r--drivers/nfc/st-nci/i2c.c4
-rw-r--r--drivers/nfc/st-nci/ndlc.c4
-rw-r--r--drivers/nfc/st-nci/se.c6
-rw-r--r--drivers/nfc/st-nci/spi.c4
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/se.c4
-rw-r--r--drivers/nfc/st95hf/core.c6
-rw-r--r--drivers/nfc/trf7970a.c8
-rw-r--r--drivers/nvdimm/blk.c5
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/pmem.c36
-rw-r--r--drivers/nvme/host/core.c140
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c34
-rw-r--r--drivers/nvme/host/multipath.c54
-rw-r--r--drivers/nvme/host/nvme.h19
-rw-r--r--drivers/nvme/host/pci.c58
-rw-r--r--drivers/nvme/host/rdma.c28
-rw-r--r--drivers/nvme/host/tcp.c29
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c18
-rw-r--r--drivers/nvme/target/configfs.c41
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/discovery.c19
-rw-r--r--drivers/nvme/target/fabrics-cmd.c3
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c5
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/loop.c6
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/rdma.c31
-rw-r--r--drivers/nvme/target/tcp.c23
-rw-r--r--drivers/nvmem/core.c174
-rw-r--r--drivers/nvmem/imx-ocotp.c25
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c22
-rw-r--r--drivers/of/fdt.c52
-rw-r--r--drivers/of/kobj.c4
-rw-r--r--drivers/of/of_net.c145
-rw-r--r--drivers/of/of_numa.c2
-rw-r--r--drivers/of/of_private.h10
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/platform.c7
-rw-r--r--drivers/of/unittest-data/Makefile8
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi19
-rw-r--r--drivers/of/unittest.c24
-rw-r--r--drivers/pci/pci-acpi.c74
-rw-r--r--drivers/pci/pci-mid.c37
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c156
-rw-r--r--drivers/pci/pci.h96
-rw-r--r--drivers/pcmcia/db1xxx_ss.c1
-rw-r--r--drivers/pcmcia/pcmcia_cis.c5
-rw-r--r--drivers/perf/Kconfig12
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c7
-rw-r--r--drivers/perf/thunderx2_pmu.c2
-rw-r--r--drivers/phy/broadcom/Kconfig4
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c6
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c316
-rw-r--r--drivers/phy/hisilicon/Kconfig10
-rw-r--r--drivers/phy/hisilicon/Makefile1
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c845
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c10
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c157
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c21
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c11
-rw-r--r--drivers/phy/samsung/Kconfig16
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c203
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c29
-rw-r--r--drivers/pinctrl/pinctrl-amd.c31
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/platform/mellanox/Kconfig12
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c123
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c906
-rw-r--r--drivers/platform/surface/surface3-wmi.c9
-rw-r--r--drivers/platform/surface/surface3_power.c3
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c66
-rw-r--r--drivers/platform/surface/surface_gpe.c13
-rw-r--r--drivers/platform/x86/Kconfig29
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/acer-wmi.c14
-rw-r--r--drivers/platform/x86/amd-pmc.c152
-rw-r--r--drivers/platform/x86/asus-wmi.c12
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c436
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c76
-rw-r--r--drivers/platform/x86/hp-wmi.c337
-rw-r--r--drivers/platform/x86/hp_accel.c3
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c35
-rw-r--r--drivers/platform/x86/intel/Kconfig16
-rw-r--r--drivers/platform/x86/intel/Makefile1
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c14
-rw-r--r--drivers/platform/x86/intel/ishtp_eclite.c701
-rw-r--r--drivers/platform/x86/lg-laptop.c11
-rw-r--r--drivers/platform/x86/mlx-platform.c1958
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c213
-rw-r--r--drivers/platform/x86/panasonic-laptop.c18
-rw-r--r--drivers/platform/x86/sony-laptop.c46
-rw-r--r--drivers/platform/x86/system76_acpi.c427
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c195
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c25
-rw-r--r--drivers/platform/x86/wmi.c375
-rw-r--r--drivers/pnp/system.c2
-rw-r--r--drivers/powercap/dtpm.c78
-rw-r--r--drivers/powercap/dtpm_cpu.c228
-rw-r--r--drivers/ptp/idt8a340_reg.h720
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1452
-rw-r--r--drivers/ptp/ptp_clockmatrix.h109
-rw-r--r--drivers/ptp/ptp_kvm_x86.c4
-rw-r--r--drivers/ptp/ptp_ocp.c1316
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/bd71815-regulator.c4
-rw-r--r--drivers/regulator/core.c14
-rw-r--r--drivers/regulator/dummy.c3
-rw-r--r--drivers/regulator/lp872x.c52
-rw-r--r--drivers/regulator/max8973-regulator.c4
-rw-r--r--drivers/regulator/pwm-regulator.c12
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c32
-rw-r--r--drivers/regulator/qcom_smd-regulator.c49
-rw-r--r--drivers/regulator/rtq6752-regulator.c18
-rw-r--r--drivers/regulator/s5m8767.c21
-rw-r--r--drivers/regulator/sy7636a-regulator.c2
-rw-r--r--drivers/regulator/ti-abb-regulator.c31
-rw-r--r--drivers/regulator/tps62360-regulator.c59
-rw-r--r--drivers/regulator/tps80031-regulator.c753
-rw-r--r--drivers/regulator/uniphier-regulator.c4
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c4
-rw-r--r--drivers/reset/Kconfig8
-rw-r--r--drivers/reset/reset-brcmstb-rescal.c2
-rw-r--r--drivers/reset/reset-microchip-sparx5.c40
-rw-r--r--drivers/reset/reset-socfpga.c26
-rw-r--r--drivers/reset/reset-uniphier-glue.c4
-rw-r--r--drivers/reset/reset-uniphier.c27
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_3990_erp.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c294
-rw-r--r--drivers/s390/block/dasd_eckd.h13
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/cio/qdio_setup.c34
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c136
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c142
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h5
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.c60
-rw-r--r--drivers/s390/net/ctcm_main.c38
-rw-r--r--drivers/s390/net/ctcm_mpc.c8
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/lcs.c123
-rw-r--r--drivers/s390/net/netiucv.c104
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c63
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3_main.c15
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c9
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c8
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c10
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c40
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c22
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h4
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_bsg.c6
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_ioctl.c8
-rw-r--r--drivers/scsi/scsi_lib.c32
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c128
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/storvsc_drv.c32
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c32
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c33
-rw-r--r--drivers/scsi/ufs/ufshcd.c29
-rw-r--r--drivers/scsi/ufs/ufshcd.h6
-rw-r--r--drivers/scsi/ufs/ufshpb.c287
-rw-r--r--drivers/scsi/ufs/ufshpb.h2
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/soc/amlogic/meson-canvas.c4
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c4
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c1
-rw-r--r--drivers/soc/aspeed/Kconfig10
-rw-r--r--drivers/soc/aspeed/Makefile9
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c603
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c4
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm63xx-power.c4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c2
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h3
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c119
-rw-r--r--drivers/soc/fsl/dpio/dpio.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio.h2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c66
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h13
-rw-r--r--drivers/soc/fsl/guts.c4
-rw-r--r--drivers/soc/fsl/rcpm.c7
-rw-r--r--drivers/soc/imx/Kconfig1
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpcv2.c134
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c523
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h76
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c79
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c35
-rw-r--r--drivers/soc/qcom/Kconfig21
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/apr.c289
-rw-r--r--drivers/soc/qcom/cpr.c4
-rw-r--r--drivers/soc/qcom/llcc-qcom.c18
-rw-r--r--drivers/soc/qcom/ocmem.c4
-rw-r--r--drivers/soc/qcom/pdr_interface.c12
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c4
-rw-r--r--drivers/soc/qcom/qcom_aoss.c165
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c4
-rw-r--r--drivers/soc/qcom/qcom_stats.c277
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c4
-rw-r--r--drivers/soc/qcom/rpmhpd.c36
-rw-r--r--drivers/soc/qcom/rpmpd.c24
-rw-r--r--drivers/soc/qcom/smd-rpm.c2
-rw-r--r--drivers/soc/qcom/smem.c57
-rw-r--r--drivers/soc/qcom/smp2p.c154
-rw-r--r--drivers/soc/qcom/socinfo.c18
-rw-r--r--drivers/soc/qcom/spm.c279
-rw-r--r--drivers/soc/renesas/Kconfig7
-rw-r--r--drivers/soc/renesas/renesas-soc.c7
-rw-r--r--drivers/soc/samsung/Kconfig5
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-chipid.c94
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c1
-rw-r--r--drivers/soc/samsung/pm_domains.c1
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c4
-rw-r--r--drivers/soc/tegra/Makefile1
-rw-r--r--drivers/soc/tegra/ari-tegra186.c80
-rw-r--r--drivers/soc/tegra/pmc.c28
-rw-r--r--drivers/soundwire/bus.c2
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/qcom.c27
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c2
-rw-r--r--drivers/spi/spi-altera-dfl.c2
-rw-r--r--drivers/spi/spi-altera-platform.c2
-rw-r--r--drivers/spi/spi-amd.c113
-rw-r--r--drivers/spi/spi-at91-usart.c27
-rw-r--r--drivers/spi/spi-bcm-qspi.c193
-rw-r--r--drivers/spi/spi-cadence-quadspi.c214
-rw-r--r--drivers/spi/spi-cadence-xspi.c642
-rw-r--r--drivers/spi/spi-fsi.c121
-rw-r--r--drivers/spi/spi-geni-qcom.c254
-rw-r--r--drivers/spi/spi-ingenic.c482
-rw-r--r--drivers/spi/spi-mtk-nor.c2
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-pl022.c5
-rw-r--r--drivers/spi/spi-rpc-if.c4
-rw-r--r--drivers/spi/spi-rspi.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c4
-rw-r--r--drivers/spi/spi-tle62x0.c2
-rw-r--r--drivers/spi/spi.c278
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c88
-rw-r--r--drivers/staging/fbtft/fbtft-core.c11
-rw-r--r--drivers/staging/fbtft/fbtft.h8
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c8
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c4
-rw-r--r--drivers/staging/iio/cdc/ad7746.c4
-rw-r--r--drivers/staging/iio/frequency/ad9832.c82
-rw-r--r--drivers/staging/ks7010/Kconfig3
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c37
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c70
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c12
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_regs.h2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c3
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_hevc.c21
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h4
-rw-r--r--drivers/staging/media/imx/TODO5
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c23
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c9
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c6
-rw-r--r--drivers/staging/media/imx/imx-media-of.c6
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c17
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c24
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c16
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c16
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c19
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h1
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c13
-rw-r--r--drivers/staging/media/ipu3/ipu3.h12
-rw-r--r--drivers/staging/media/meson/vdec/esparser.h6
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c7
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h16
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h3
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c5
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c44
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c56
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c113
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c100
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c7
-rw-r--r--drivers/staging/media/tegra-vde/dmabuf-cache.c3
-rw-r--r--drivers/staging/media/tegra-video/vi.c17
-rw-r--r--drivers/staging/most/dim2/Makefile2
-rw-r--r--drivers/staging/most/dim2/dim2.c115
-rw-r--r--drivers/staging/most/dim2/sysfs.c49
-rw-r--r--drivers/staging/most/dim2/sysfs.h11
-rw-r--r--drivers/staging/most/net/net.c2
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c6
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts3
-rw-r--r--drivers/staging/mt7621-dts/gbpc2.dts1
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi74
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c2
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/pi433/pi433_if.c18
-rw-r--r--drivers/staging/pi433/pi433_if.h23
-rw-r--r--drivers/staging/qlge/qlge_main.c30
-rw-r--r--drivers/staging/qlge/qlge_mpi.c2
-rw-r--r--drivers/staging/r8188eu/Kconfig10
-rw-r--r--drivers/staging/r8188eu/Makefile155
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c607
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c3
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c618
-rw-r--r--drivers/staging/r8188eu/core/rtw_debug.c904
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c582
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c339
-rw-r--r--drivers/staging/r8188eu/core/rtw_io.c299
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c397
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c34
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c1189
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c126
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c386
-rw-r--r--drivers/staging/r8188eu/core/rtw_mp.c935
-rw-r--r--drivers/staging/r8188eu/core/rtw_mp_ioctl.c1170
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c43
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c140
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c116
-rw-r--r--drivers/staging/r8188eu/core/rtw_rf.c17
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c197
-rw-r--r--drivers/staging/r8188eu/core/rtw_sreset.c62
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c34
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c157
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c121
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c22
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c32
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c10
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c15
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c171
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c26
-rw-r--r--drivers/staging/r8188eu/hal/hal_intf.c391
-rw-r--r--drivers/staging/r8188eu/hal/odm.c1188
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c393
-rw-r--r--drivers/staging/r8188eu/hal/odm_RTL8188E.c31
-rw-r--r--drivers/staging/r8188eu/hal/odm_RegConfig8188E.c8
-rw-r--r--drivers/staging/r8188eu/hal/odm_interface.c85
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c48
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c93
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c310
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_mp.c798
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c215
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c226
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_sreset.c27
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_recv.c4
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c60
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c328
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c256
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h91
-rw-r--r--drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h2
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_FW.h16
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h70
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h37
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h312
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h77
-rw-r--r--drivers/staging/r8188eu/include/ioctl_cfg80211.h2
-rw-r--r--drivers/staging/r8188eu/include/mp_custom_oid.h333
-rw-r--r--drivers/staging/r8188eu/include/odm.h457
-rw-r--r--drivers/staging/r8188eu/include/odm_HWConfig.h11
-rw-r--r--drivers/staging/r8188eu/include/odm_RTL8188E.h2
-rw-r--r--drivers/staging/r8188eu/include/odm_RegConfig8188E.h3
-rw-r--r--drivers/staging/r8188eu/include/odm_RegDefine11AC.h29
-rw-r--r--drivers/staging/r8188eu/include/odm_RegDefine11N.h112
-rw-r--r--drivers/staging/r8188eu/include/odm_interface.h88
-rw-r--r--drivers/staging/r8188eu/include/odm_precomp.h22
-rw-r--r--drivers/staging/r8188eu/include/odm_reg.h89
-rw-r--r--drivers/staging/r8188eu/include/odm_types.h24
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h5
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h42
-rw-r--r--drivers/staging/r8188eu/include/recv_osdep.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_cmd.h16
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_dm.h13
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h102
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_led.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_rf.h1
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h4
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_sreset.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_ap.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_br_ext.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h27
-rw-r--r--drivers/staging/r8188eu/include/rtw_debug.h156
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h57
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h21
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h87
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl_rtl.h63
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl_set.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_iol.h5
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h20
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h14
-rw-r--r--drivers/staging/r8188eu/include/rtw_mp.h474
-rw-r--r--drivers/staging/r8188eu/include/rtw_mp_ioctl.h242
-rw-r--r--drivers/staging/r8188eu/include/rtw_mp_phy_regdef.h1063
-rw-r--r--drivers/staging/r8188eu/include/rtw_p2p.h1
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h130
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h6
-rw-r--r--drivers/staging/r8188eu/include/rtw_rf.h12
-rw-r--r--drivers/staging/r8188eu/include/rtw_security.h20
-rw-r--r--drivers/staging/r8188eu/include/rtw_sreset.h34
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h6
-rw-r--r--drivers/staging/r8188eu/include/sta_info.h7
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h5
-rw-r--r--drivers/staging/r8188eu/include/usb_ops_linux.h8
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h5
-rw-r--r--drivers/staging/r8188eu/include/wifi.h52
-rw-r--r--drivers/staging/r8188eu/include/xmit_osdep.h2
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c2247
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c399
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c82
-rw-r--r--drivers/staging/r8188eu/os_dep/recv_linux.c14
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c285
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c40
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c9
-rw-r--r--drivers/staging/rtl8192u/r8192U.h3
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c36
-rw-r--r--drivers/staging/rtl8712/ieee80211.h4
-rw-r--r--drivers/staging/rtl8712/os_intfs.c9
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h10
-rw-r--r--drivers/staging/rtl8712/usb_intf.c6
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c23
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c210
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c24
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c79
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c22
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c48
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c49
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c26
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c34
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c11
-rw-r--r--drivers/staging/rts5208/ms.c42
-rw-r--r--drivers/staging/rts5208/rtsx.c2
-rw-r--r--drivers/staging/rts5208/rtsx_card.c8
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c16
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c106
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c6
-rw-r--r--drivers/staging/rts5208/sd.c68
-rw-r--r--drivers/staging/rts5208/xd.c48
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c298
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h52
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c20
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c771
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h107
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c182
-rw-r--r--drivers/staging/vt6655/baseband.c74
-rw-r--r--drivers/staging/vt6655/baseband.h2
-rw-r--r--drivers/staging/vt6655/card.c98
-rw-r--r--drivers/staging/vt6655/channel.c12
-rw-r--r--drivers/staging/vt6655/device.h10
-rw-r--r--drivers/staging/vt6655/device_main.c162
-rw-r--r--drivers/staging/vt6655/dpc.c2
-rw-r--r--drivers/staging/vt6655/key.c2
-rw-r--r--drivers/staging/vt6655/mac.c50
-rw-r--r--drivers/staging/vt6655/mac.h6
-rw-r--r--drivers/staging/vt6655/power.c24
-rw-r--r--drivers/staging/vt6655/rf.c140
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/vt6655/rxtx.c64
-rw-r--r--drivers/staging/wfx/bh.c37
-rw-r--r--drivers/staging/wfx/bh.h4
-rw-r--r--drivers/staging/wfx/bus_sdio.c25
-rw-r--r--drivers/staging/wfx/bus_spi.c22
-rw-r--r--drivers/staging/wfx/data_rx.c7
-rw-r--r--drivers/staging/wfx/data_rx.h4
-rw-r--r--drivers/staging/wfx/data_tx.c87
-rw-r--r--drivers/staging/wfx/data_tx.h6
-rw-r--r--drivers/staging/wfx/debug.c56
-rw-r--r--drivers/staging/wfx/debug.h2
-rw-r--r--drivers/staging/wfx/fwio.c26
-rw-r--r--drivers/staging/wfx/fwio.h2
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h14
-rw-r--r--drivers/staging/wfx/hif_api_general.h25
-rw-r--r--drivers/staging/wfx/hif_api_mib.h85
-rw-r--r--drivers/staging/wfx/hif_rx.c23
-rw-r--r--drivers/staging/wfx/hif_rx.h3
-rw-r--r--drivers/staging/wfx/hif_tx.c60
-rw-r--r--drivers/staging/wfx/hif_tx.h6
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c14
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h2
-rw-r--r--drivers/staging/wfx/hwio.c6
-rw-r--r--drivers/staging/wfx/hwio.h20
-rw-r--r--drivers/staging/wfx/key.c30
-rw-r--r--drivers/staging/wfx/key.h4
-rw-r--r--drivers/staging/wfx/main.c37
-rw-r--r--drivers/staging/wfx/main.h3
-rw-r--r--drivers/staging/wfx/queue.c43
-rw-r--r--drivers/staging/wfx/queue.h6
-rw-r--r--drivers/staging/wfx/scan.c51
-rw-r--r--drivers/staging/wfx/scan.h4
-rw-r--r--drivers/staging/wfx/sta.c118
-rw-r--r--drivers/staging/wfx/sta.h8
-rw-r--r--drivers/staging/wfx/traces.h2
-rw-r--r--drivers/staging/wfx/wfx.h14
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c24
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c2
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h2
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h2
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h2
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h2
-rw-r--r--drivers/staging/wlan-ng/p80211req.c2
-rw-r--r--drivers/staging/wlan-ng/p80211req.h2
-rw-r--r--drivers/staging/wlan-ng/p80211types.h2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c6
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c3
-rw-r--r--drivers/target/target_core_file.c5
-rw-r--r--drivers/target/target_core_iblock.c6
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/tee/optee/Makefile5
-rw-r--r--drivers/tee/optee/call.c445
-rw-r--r--drivers/tee/optee/core.c719
-rw-r--r--drivers/tee/optee/ffa_abi.c911
-rw-r--r--drivers/tee/optee/optee_ffa.h153
-rw-r--r--drivers/tee/optee/optee_msg.h27
-rw-r--r--drivers/tee/optee/optee_private.h157
-rw-r--r--drivers/tee/optee/rpc.c237
-rw-r--r--drivers/tee/optee/shm_pool.c101
-rw-r--r--drivers/tee/optee/shm_pool.h14
-rw-r--r--drivers/tee/optee/smc_abi.c1362
-rw-r--r--drivers/tee/tee_shm.c3
-rw-r--r--drivers/thermal/gov_user_space.c9
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c9
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c36
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c18
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c8
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c8
-rw-r--r--drivers/thermal/qcom/Kconfig2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c41
-rw-r--r--drivers/thermal/qcom/tsens.c29
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c113
-rw-r--r--drivers/thermal/rockchip_thermal.c2
-rw-r--r--drivers/thermal/thermal_core.c22
-rw-r--r--drivers/thermal/thermal_mmio.c2
-rw-r--r--drivers/thermal/thermal_netlink.c11
-rw-r--r--drivers/thermal/thermal_netlink.h8
-rw-r--r--drivers/thermal/thermal_sysfs.c3
-rw-r--r--drivers/thermal/uniphier_thermal.c4
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/uio/uio_hv_generic.c18
-rw-r--r--drivers/usb/atm/usbatm.c4
-rw-r--r--drivers/usb/chipidea/core.c23
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/devio.c144
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/dwc2/core.h19
-rw-r--r--drivers/usb/dwc2/debugfs.c4
-rw-r--r--drivers/usb/dwc2/drd.c24
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/hcd.c12
-rw-r--r--drivers/usb/dwc2/params.c75
-rw-r--r--drivers/usb/dwc3/Kconfig7
-rw-r--r--drivers/usb/dwc3/core.c29
-rw-r--r--drivers/usb/dwc3/core.h25
-rw-r--r--drivers/usb/dwc3/gadget.c14
-rw-r--r--drivers/usb/gadget/configfs.c26
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c97
-rw-r--r--drivers/usb/gadget/function/f_phonet.c5
-rw-r--r--drivers/usb/gadget/function/f_uac1.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/f_uvc.c8
-rw-r--r--drivers/usb/gadget/function/u_audio.c96
-rw-r--r--drivers/usb/gadget/function/u_audio.h10
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/function/u_uac2.h1
-rw-r--r--drivers/usb/gadget/function/uvc.h6
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c52
-rw-r--r--drivers/usb/gadget/function/uvc_video.c71
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/legacy/hid.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h1
-rw-r--r--drivers/usb/gadget/udc/core.c10
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c5
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c25
-rw-r--r--drivers/usb/host/Kconfig6
-rw-r--r--drivers/usb/host/ehci-atmel.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c11
-rw-r--r--drivers/usb/host/ehci-mem.c3
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-platform.c6
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fotg210-hcd.c5
-rw-r--r--drivers/usb/host/max3421-hcd.c25
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/ohci-hub.c3
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-pci.c16
-rw-r--r--drivers/usb/misc/iowarrior.c8
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c2
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/mediatek.c1
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/sunxi.c8
-rw-r--r--drivers/usb/musb/tusb6010.c5
-rw-r--r--drivers/usb/phy/phy-tahvo.c4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c198
-rw-r--r--drivers/usb/serial/ch341.c85
-rw-r--r--drivers/usb/serial/cp210x.c109
-rw-r--r--drivers/usb/serial/f81232.c96
-rw-r--r--drivers/usb/serial/ftdi_sio.c53
-rw-r--r--drivers/usb/serial/keyspan.c15
-rw-r--r--drivers/usb/serial/keyspan_pda.c67
-rw-r--r--drivers/usb/serial/kl5kusb105.c115
-rw-r--r--drivers/usb/serial/usb-serial.c59
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/typec/Kconfig4
-rw-r--r--drivers/usb/typec/altmodes/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c58
-rw-r--r--drivers/usb/typec/hd3ss3220.c8
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tipd/core.c223
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h12
-rw-r--r--drivers/usb/typec/tipd/trace.h23
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c337
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h3
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c2
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/alibaba/Makefile3
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c553
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c3
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h10
-rw-r--r--drivers/vdpa/mlx5/core/mr.c8
-rw-r--r--drivers/vdpa/mlx5/core/resources.c13
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c204
-rw-r--r--drivers/vdpa/vdpa.c261
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c38
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c32
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c12
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c62
-rw-r--r--drivers/vfio/mdev/mdev_driver.c45
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c234
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c13
-rw-r--r--drivers/vfio/vfio.c622
-rw-r--r--drivers/vfio/vfio.h72
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c256
-rw-r--r--drivers/vhost/vdpa.c3
-rw-r--r--drivers/video/fbdev/efifb.c21
-rw-r--r--drivers/virt/acrn/hsm.c49
-rw-r--r--drivers/virt/acrn/hypercall.h52
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig8
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c17
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.c2
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.h8
-rw-r--r--drivers/virtio/Kconfig10
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_dma_buf.c1
-rw-r--r--drivers/virtio/virtio_pci_common.c58
-rw-r--r--drivers/virtio/virtio_pci_common.h16
-rw-r--r--drivers/virtio/virtio_pci_legacy.c106
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c220
-rw-r--r--drivers/virtio/virtio_pci_modern.c6
-rw-r--r--drivers/virtio/virtio_ring.c92
-rw-r--r--drivers/virtio/virtio_vdpa.c19
-rw-r--r--drivers/watchdog/iTCO_wdt.c12
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c6
-rw-r--r--drivers/watchdog/omap_wdt.c6
-rw-r--r--drivers/watchdog/sbsa_gwdt.c5
-rw-r--r--drivers/xen/gntdev-dmabuf.c3
4352 files changed, 334920 insertions, 105391 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1da360c51d66..cdbdf68bd98f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -71,7 +71,7 @@ config ACPI_DEBUGGER
if ACPI_DEBUGGER
config ACPI_DEBUGGER_USER
- tristate "Userspace debugger accessiblity"
+ tristate "Userspace debugger accessibility"
depends on DEBUG_FS
help
Export /sys/kernel/debug/acpi/acpidbg for userspace utilities
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index b0cb662233f1..81aff651a0d4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,6 +61,7 @@ static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
static int ac_sleep_before_get_state_ms;
static int ac_check_pmic = 1;
+static int ac_only;
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
@@ -93,6 +94,11 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
if (!ac)
return -EINVAL;
+ if (ac_only) {
+ ac->state = 1;
+ return 0;
+ }
+
status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
@@ -200,6 +206,12 @@ static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init ac_only_quirk(const struct dmi_system_id *d)
+{
+ ac_only = 1;
+ return 0;
+}
+
/* Please keep this list alphabetically sorted */
static const struct dmi_system_id ac_dmi_table[] __initconst = {
{
@@ -210,6 +222,13 @@ static const struct dmi_system_id ac_dmi_table[] __initconst = {
},
},
{
+ /* Kodlix GK45 returning incorrect state */
+ .callback = ac_only_quirk,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "GK45"),
+ },
+ },
+ {
/* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
.callback = ac_do_not_check_pmic_quirk,
.matches = {
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 30b1f511c2af..bcae0f03572b 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -712,14 +712,13 @@ static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct lpss_private_data *pdata;
unsigned long flags;
int ret;
- ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
- if (WARN_ON(ret))
- return ret;
+ if (WARN_ON(!adev))
+ return -ENODEV;
spin_lock_irqsave(&dev->power.lock, flags);
if (pm_runtime_suspended(dev)) {
@@ -732,6 +731,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
goto out;
}
*val = __lpss_reg_read(pdata, reg);
+ ret = 0;
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -750,7 +750,7 @@ static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
+ return sysfs_emit(buf, "%08x\n", ltr_value);
}
static ssize_t lpss_ltr_mode_show(struct device *dev,
@@ -1266,7 +1266,8 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
if (!id || !id->driver_data)
return 0;
- if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
return 0;
pdata = acpi_driver_data(adev);
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 8f2dc176bb41..ffdcfcd4a10d 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -156,8 +156,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
{"BRI1400"}, /* Boca Research 33,600 ACF Modem */
{"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */
- {"BRI0A49"}, /* Boca 33.6 Kbps Internal FD34FSVD */
- {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
{"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */
{"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
{"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d41b810e367c..4366d36ef119 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -226,6 +226,8 @@ extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a_s0);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b_s0);
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 803402aefaeb..808fdf54aeeb 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -147,17 +147,13 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
{
- acpi_status status;
u8 sleep_type_value;
ACPI_FUNCTION_TRACE(hw_extended_wake_prep);
- status = acpi_get_sleep_type_data(ACPI_STATE_S0,
- &acpi_gbl_sleep_type_a,
- &acpi_gbl_sleep_type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) {
sleep_type_value =
- ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+ ((acpi_gbl_sleep_type_a_s0 << ACPI_X_SLEEP_TYPE_POSITION) &
ACPI_X_SLEEP_TYPE_MASK);
(void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 14baa13bf848..34a3825f25d3 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -179,7 +179,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
u32 pm1a_control;
@@ -192,10 +192,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
* This is unclear from the ACPI Spec, but it is required
* by some machines.
*/
- status = acpi_get_sleep_type_data(ACPI_STATE_S0,
- &acpi_gbl_sleep_type_a,
- &acpi_gbl_sleep_type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) {
sleep_type_reg_info =
acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
sleep_enable_reg_info =
@@ -216,9 +213,9 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
/* Insert the SLP_TYP bits */
- pm1a_control |= (acpi_gbl_sleep_type_a <<
+ pm1a_control |= (acpi_gbl_sleep_type_a_s0 <<
sleep_type_reg_info->bit_position);
- pm1b_control |= (acpi_gbl_sleep_type_b <<
+ pm1b_control |= (acpi_gbl_sleep_type_b_s0 <<
sleep_type_reg_info->bit_position);
/* Write the control registers and ignore any errors */
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 89b12afed564..e4cde23a2906 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -217,6 +217,13 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
return_ACPI_STATUS(status);
}
+ status = acpi_get_sleep_type_data(ACPI_STATE_S0,
+ &acpi_gbl_sleep_type_a_s0,
+ &acpi_gbl_sleep_type_b_s0);
+ if (ACPI_FAILURE(status)) {
+ acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID;
+ }
+
/* Execute the _PTS method (Prepare To Sleep) */
arg_list.count = 1;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 7b8e8bf1e824..8afa1ccaf12e 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -73,6 +73,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */
{"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */
{"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */
+ {"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */
/* Feature Group Strings */
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 2882450c443e..edb2622fd35f 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -28,9 +28,10 @@
#undef pr_fmt
#define pr_fmt(fmt) "EINJ: " fmt
-#define SPIN_UNIT 100 /* 100ns */
-/* Firmware should respond within 1 milliseconds */
-#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define SLEEP_UNIT_MIN 1000 /* 1ms */
+#define SLEEP_UNIT_MAX 5000 /* 5ms */
+/* Firmware should respond within 1 seconds */
+#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
#define ACPI5_VENDOR_BIT BIT(31)
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
@@ -171,13 +172,13 @@ static int einj_get_available_error_type(u32 *type)
static int einj_timedout(u64 *t)
{
- if ((s64)*t < SPIN_UNIT) {
+ if ((s64)*t < SLEEP_UNIT_MIN) {
pr_warn(FW_WARN "Firmware does not respond in time\n");
return 1;
}
- *t -= SPIN_UNIT;
- ndelay(SPIN_UNIT);
- touch_nmi_watchdog();
+ *t -= SLEEP_UNIT_MIN;
+ usleep_range(SLEEP_UNIT_MIN, SLEEP_UNIT_MAX);
+
return 0;
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 277f00b288d1..0edc1ed47673 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -86,7 +86,9 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
return len;
};
-int apei_hest_parse(apei_hest_func_t func, void *data)
+typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
+
+static int apei_hest_parse(apei_hest_func_t func, void *data)
{
struct acpi_hest_header *hest_hdr;
int i, rc, len;
@@ -121,7 +123,6 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
return 0;
}
-EXPORT_SYMBOL_GPL(apei_hest_parse);
/*
* Check if firmware advertises firmware first mode. We need FF bit to be set
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dae91f906cea..8afa85d6eb6a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -169,7 +169,7 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
return 1;
/* fallback to using design values for broken batteries */
- if (battery->design_capacity == battery->capacity_now)
+ if (battery->design_capacity <= battery->capacity_now)
return 1;
/* we don't do any sort of metric based on percentages */
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index bd482108310c..a85c351589be 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -43,7 +43,7 @@
#include <acpi/cppc_acpi.h>
struct cppc_pcc_data {
- struct mbox_chan *pcc_channel;
+ struct pcc_mbox_chan *pcc_channel;
void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
@@ -295,7 +295,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
pcc_ss_data->platform_owns_pcc = true;
/* Ring doorbell */
- ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
+ ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
if (ret < 0) {
pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
pcc_ss_id, cmd, ret);
@@ -308,10 +308,10 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
if (pcc_ss_data->pcc_mrtt)
pcc_ss_data->last_cmd_cmpl_time = ktime_get();
- if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
- mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
+ if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
+ mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
else
- mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
+ mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
end:
if (cmd == CMD_WRITE) {
@@ -493,46 +493,33 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
static int register_pcc_channel(int pcc_ss_idx)
{
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
u64 usecs_lat;
if (pcc_ss_idx >= 0) {
- pcc_data[pcc_ss_idx]->pcc_channel =
- pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
+ pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
- if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
+ if (IS_ERR(pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
pcc_ss_idx);
return -ENODEV;
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
-
- if (!cppc_ss) {
- pr_err("No PCC subspace found for %d CPPC\n",
- pcc_ss_idx);
- return -ENODEV;
- }
-
+ pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
/*
* cppc_ss->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
- usecs_lat = NUM_RETRIES * cppc_ss->latency;
+ usecs_lat = NUM_RETRIES * pcc_chan->latency;
pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
- pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
- pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
- pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
+ pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
+ pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
+ pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
+ acpi_os_ioremap(pcc_chan->shmem_base_addr,
+ pcc_chan->shmem_size);
if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
pcc_ss_idx);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 7cf92158008f..c8e9b962e18c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -492,7 +492,7 @@ static ssize_t docked_show(struct device *dev,
struct acpi_device *adev = NULL;
acpi_bus_get_device(dock_station->handle, &adev);
- return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
+ return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR_RO(docked);
@@ -504,7 +504,7 @@ static ssize_t flags_show(struct device *dev,
{
struct dock_station *dock_station = dev->platform_data;
- return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
+ return sysfs_emit(buf, "%d\n", dock_station->flags);
}
static DEVICE_ATTR_RO(flags);
@@ -543,7 +543,7 @@ static ssize_t uid_show(struct device *dev,
if (ACPI_FAILURE(status))
return 0;
- return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
+ return sysfs_emit(buf, "%llx\n", lbuf);
}
static DEVICE_ATTR_RO(uid);
@@ -562,7 +562,7 @@ static ssize_t type_show(struct device *dev,
else
type = "unknown";
- return snprintf(buf, PAGE_SIZE, "%s\n", type);
+ return sysfs_emit(buf, "%s\n", type);
}
static DEVICE_ATTR_RO(type);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 7a33a6d985f8..7cd0009e7ff3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -17,6 +17,8 @@
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/platform_device.h>
#include "internal.h"
@@ -111,13 +113,10 @@ struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
return NULL;
list_for_each_entry(adev, &parent->children, node) {
- unsigned long long addr;
- acpi_status status;
+ acpi_bus_address addr = acpi_device_adr(adev);
int score;
- status = acpi_evaluate_integer(adev->handle, METHOD_NAME__ADR,
- NULL, &addr);
- if (ACPI_FAILURE(status) || addr != address)
+ if (!adev->pnp.type.bus_address || addr != address)
continue;
if (!ret) {
@@ -287,12 +286,13 @@ EXPORT_SYMBOL_GPL(acpi_unbind_one);
void acpi_device_notify(struct device *dev)
{
- struct acpi_bus_type *type = acpi_get_bus_type(dev);
struct acpi_device *adev;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret) {
+ struct acpi_bus_type *type = acpi_get_bus_type(dev);
+
if (!type)
goto err;
@@ -304,17 +304,26 @@ void acpi_device_notify(struct device *dev)
ret = acpi_bind_one(dev, adev);
if (ret)
goto err;
- }
- adev = ACPI_COMPANION(dev);
- if (dev_is_platform(dev))
- acpi_configure_pmsi_domain(dev);
+ if (type->setup) {
+ type->setup(dev);
+ goto done;
+ }
+ } else {
+ adev = ACPI_COMPANION(dev);
+
+ if (dev_is_pci(dev)) {
+ pci_acpi_setup(dev, adev);
+ goto done;
+ } else if (dev_is_platform(dev)) {
+ acpi_configure_pmsi_domain(dev);
+ }
+ }
- if (type && type->setup)
- type->setup(dev);
- else if (adev->handler && adev->handler->bind)
+ if (adev->handler && adev->handler->bind)
adev->handler->bind(dev);
+done:
acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n",
dev_name(dev));
@@ -327,16 +336,39 @@ err:
void acpi_device_notify_remove(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpi_bus_type *type;
if (!adev)
return;
- type = acpi_get_bus_type(dev);
- if (type && type->cleanup)
- type->cleanup(dev);
+ if (dev_is_pci(dev))
+ pci_acpi_cleanup(dev, adev);
else if (adev->handler && adev->handler->unbind)
adev->handler->unbind(dev);
acpi_unbind_one(dev);
}
+
+int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ /*
+ * Skip device objects with device IDs, because they may be in use even
+ * if they are not companions of any physical device objects.
+ */
+ if (adev->pnp.type.hardware_id)
+ return 0;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ /*
+ * Device objects without device IDs are not in use if they have no
+ * corresponding physical device objects.
+ */
+ if (list_empty(&adev->physical_node_list))
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+
+ mutex_unlock(&adev->physical_node_lock);
+
+ return 0;
+}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index d91b560e8867..8fbdc172864b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -117,6 +117,7 @@ bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
int acpi_bus_register_early_device(int type);
+int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used);
/* --------------------------------------------------------------------------
Device Matching and Notification
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b9863e22b952..112256154880 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -52,7 +52,6 @@ struct acpi_power_resource {
u32 order;
unsigned int ref_count;
u8 state;
- bool wakeup_enabled;
struct mutex resource_lock;
struct list_head dependents;
};
@@ -615,20 +614,19 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
- int result;
u8 state;
mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(resource, &state);
- if (result) {
- mutex_unlock(&resource->resource_lock);
- return result;
- }
- if (state == ACPI_POWER_RESOURCE_STATE_ON) {
- resource->ref_count++;
- resource->wakeup_enabled = true;
- }
+ /*
+ * Make sure that the power resource state and its reference
+ * counter value are consistent with each other.
+ */
+ if (!resource->ref_count &&
+ !acpi_power_get_state(resource, &state) &&
+ state == ACPI_POWER_RESOURCE_STATE_ON)
+ __acpi_power_off(resource);
+
if (system_level > resource->system_level)
system_level = resource->system_level;
@@ -711,7 +709,6 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
- struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
@@ -722,33 +719,22 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (dev->wakeup.prepare_count++)
goto out;
- list_for_each_entry(entry, &dev->wakeup.resources, node) {
- struct acpi_power_resource *resource = entry->resource;
-
- mutex_lock(&resource->resource_lock);
-
- if (!resource->wakeup_enabled) {
- err = acpi_power_on_unlocked(resource);
- if (!err)
- resource->wakeup_enabled = true;
- }
-
- mutex_unlock(&resource->resource_lock);
-
- if (err) {
- dev_err(&dev->dev,
- "Cannot turn wakeup power resources on\n");
- dev->wakeup.flags.valid = 0;
- goto out;
- }
+ err = acpi_power_on_list(&dev->wakeup.resources);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn on wakeup power resources\n");
+ dev->wakeup.flags.valid = 0;
+ goto out;
}
+
/*
* Passing 3 as the third argument below means the device may be
* put into arbitrary power state afterward.
*/
err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
- if (err)
+ if (err) {
+ acpi_power_off_list(&dev->wakeup.resources);
dev->wakeup.prepare_count = 0;
+ }
out:
mutex_unlock(&acpi_device_lock);
@@ -771,39 +757,33 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
mutex_lock(&acpi_device_lock);
- if (--dev->wakeup.prepare_count > 0)
+ if (dev->wakeup.prepare_count > 1) {
+ dev->wakeup.prepare_count--;
goto out;
+ }
- /*
- * Executing the code below even if prepare_count is already zero when
- * the function is called may be useful, for example for initialisation.
- */
- if (dev->wakeup.prepare_count < 0)
- dev->wakeup.prepare_count = 0;
+ /* Do nothing if wakeup power has not been enabled for this device. */
+ if (!dev->wakeup.prepare_count)
+ goto out;
err = acpi_device_sleep_wake(dev, 0, 0, 0);
if (err)
goto out;
+ /*
+ * All of the power resources in the list need to be turned off even if
+ * there are errors.
+ */
list_for_each_entry(entry, &dev->wakeup.resources, node) {
- struct acpi_power_resource *resource = entry->resource;
-
- mutex_lock(&resource->resource_lock);
-
- if (resource->wakeup_enabled) {
- err = acpi_power_off_unlocked(resource);
- if (!err)
- resource->wakeup_enabled = false;
- }
-
- mutex_unlock(&resource->resource_lock);
+ int ret;
- if (err) {
- dev_err(&dev->dev,
- "Cannot turn wakeup power resources off\n");
- dev->wakeup.flags.valid = 0;
- break;
- }
+ ret = acpi_power_off(entry->resource);
+ if (ret && !err)
+ err = ret;
+ }
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn off wakeup power resources\n");
+ dev->wakeup.flags.valid = 0;
}
out:
@@ -943,6 +923,7 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
acpi_status status;
+ u8 state_dummy;
int result;
acpi_bus_get_device(handle, &device);
@@ -971,6 +952,10 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
resource->order = acpi_object.power_resource.resource_order;
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
+ /* Get the initial state or just flip it on if that fails. */
+ if (acpi_power_get_state(resource, &state_dummy))
+ __acpi_power_on(resource);
+
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
device->flags.match_driver = true;
@@ -1035,13 +1020,8 @@ void acpi_turn_off_unused_power_resources(void)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
- /*
- * Turn off power resources in an unknown state too, because the
- * platform firmware on some system expects the OS to turn off
- * power resources without any users unconditionally.
- */
if (!resource->ref_count &&
- resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+ resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
__acpi_power_off(resource);
}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index fe69dc518f31..701f61c01359 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -747,6 +747,73 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
}
/**
+ * find_acpi_cpu_topology_cluster() - Determine a unique CPU cluster value
+ * @cpu: Kernel logical CPU number
+ *
+ * Determine a topology unique cluster ID for the given CPU/thread.
+ * This ID can then be used to group peers, which will have matching ids.
+ *
+ * The cluster, if present is the level of topology above CPUs. In a
+ * multi-thread CPU, it will be the level above the CPU, not the thread.
+ * It may not exist in single CPU systems. In simple multi-CPU systems,
+ * it may be equal to the package topology level.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found
+ * or there is no toplogy level above the CPU..
+ * Otherwise returns a value which represents the package for this CPU.
+ */
+
+int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ struct acpi_pptt_processor *cpu_node, *cluster_node;
+ u32 acpi_cpu_id;
+ int retval;
+ int is_thread;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ acpi_pptt_warn_missing();
+ return -ENOENT;
+ }
+
+ acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (cpu_node == NULL || !cpu_node->parent) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+
+ is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD;
+ cluster_node = fetch_pptt_node(table, cpu_node->parent);
+ if (cluster_node == NULL) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ if (is_thread) {
+ if (!cluster_node->parent) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ cluster_node = fetch_pptt_node(table, cluster_node->parent);
+ if (cluster_node == NULL) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ }
+ if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
+ retval = cluster_node->acpi_processor_id;
+ else
+ retval = ACPI_PTR_DIFF(cluster_node, table);
+
+put_table:
+ acpi_put_table(table);
+
+ return retval;
+}
+
+/**
* find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
* @cpu: Kernel logical CPU number
*
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 89c22bc55057..4d3a219c67f8 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -49,7 +49,6 @@ struct prm_context_buffer {
};
#pragma pack()
-
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
@@ -73,7 +72,6 @@ struct prm_module_info {
struct prm_handler_info handlers[];
};
-
static u64 efi_pa_va_lookup(u64 pa)
{
efi_memory_desc_t *md;
@@ -88,7 +86,6 @@ static u64 efi_pa_va_lookup(u64 pa)
return 0;
}
-
#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
@@ -99,7 +96,7 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
struct acpi_prmt_handler_info *handler_info;
struct prm_handler_info *th;
struct prm_module_info *tm;
- u64 mmio_count = 0;
+ u64 *mmio_count;
u64 cur_handler = 0;
u32 module_info_size = 0;
u64 mmio_range_size = 0;
@@ -108,6 +105,8 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
module_info = (struct acpi_prmt_module_info *) header;
module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
tm = kmalloc(module_info_size, GFP_KERNEL);
+ if (!tm)
+ goto parse_prmt_out1;
guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
tm->major_rev = module_info->major_rev;
@@ -120,14 +119,24 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
* Each module is associated with a list of addr
* ranges that it can use during the service
*/
- mmio_count = *(u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
- mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
+ mmio_count = (u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
+ if (!mmio_count)
+ goto parse_prmt_out2;
+
+ mmio_range_size = struct_size(tm->mmio_info, addr_ranges, *mmio_count);
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+ if (!tm->mmio_info)
+ goto parse_prmt_out3;
+
temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
+ if (!temp_mmio)
+ goto parse_prmt_out4;
memmove(tm->mmio_info, temp_mmio, mmio_range_size);
} else {
- mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
- tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+ tm->mmio_info = kmalloc(sizeof(*tm->mmio_info), GFP_KERNEL);
+ if (!tm->mmio_info)
+ goto parse_prmt_out2;
+
tm->mmio_info->mmio_count = 0;
}
@@ -145,6 +154,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
+
+parse_prmt_out4:
+ kfree(tm->mmio_info);
+parse_prmt_out3:
+ memunmap(mmio_count);
+parse_prmt_out2:
+ kfree(tm);
+parse_prmt_out1:
+ return -ENOMEM;
}
#define GET_MODULE 0
@@ -171,7 +189,6 @@ static void *find_guid_info(const guid_t *guid, u8 mode)
return NULL;
}
-
static struct prm_module_info *find_prm_module(const guid_t *guid)
{
return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f37fba9e5ba0..76ef1bcc8848 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -789,7 +789,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state->enter = acpi_idle_enter;
state->flags = 0;
- if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
+ cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index ee78a210c606..3c25ce8c95ba 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -380,9 +381,58 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
}
EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type);
+static const struct dmi_system_id medion_laptop[] = {
+ {
+ .ident = "MEDION P15651",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M15T"),
+ },
+ },
+ {
+ .ident = "MEDION S17405",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M17T"),
+ },
+ },
+ { }
+};
+
+struct irq_override_cmp {
+ const struct dmi_system_id *system;
+ unsigned char irq;
+ unsigned char triggering;
+ unsigned char polarity;
+ unsigned char shareable;
+};
+
+static const struct irq_override_cmp skip_override_table[] = {
+ { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+};
+
+static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+ u8 shareable)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
+ const struct irq_override_cmp *entry = &skip_override_table[i];
+
+ if (dmi_check_system(entry->system) &&
+ entry->irq == gsi &&
+ entry->triggering == triggering &&
+ entry->polarity == polarity &&
+ entry->shareable == shareable)
+ return false;
+ }
+
+ return true;
+}
+
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
- bool legacy)
+ bool check_override)
{
int irq, p, t;
@@ -401,7 +451,9 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
* using extended IRQ descriptors we take the IRQ configuration
* from _CRS directly.
*/
- if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
+ if (check_override &&
+ acpi_dev_irq_override(gsi, triggering, polarity, shareable) &&
+ !acpi_get_override_irq(gsi, &t, &p)) {
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5b54c80b9d32..dce2c291b982 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -608,6 +608,7 @@ struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
{
return handle_to_device(handle, get_acpi_device);
}
+EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device);
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
@@ -2559,6 +2560,12 @@ int __init acpi_scan_init(void)
}
}
+ /*
+ * Make sure that power management resources are not blocked by ACPI
+ * device objects with no users.
+ */
+ bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused);
+
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 3023224515ab..eaa47753b758 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -815,14 +815,18 @@ void __weak acpi_s2idle_setup(void)
static void acpi_sleep_suspend_setup(void)
{
+ bool suspend_ops_needed = false;
int i;
for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
- if (acpi_sleep_state_supported(i))
+ if (acpi_sleep_state_supported(i)) {
sleep_states[i] = 1;
+ suspend_ops_needed = true;
+ }
- suspend_set_ops(old_suspend_ordering ?
- &acpi_suspend_ops_old : &acpi_suspend_ops);
+ if (suspend_ops_needed)
+ suspend_set_ops(old_suspend_ordering ?
+ &acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_s2idle_setup();
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f9383736fa0f..71419eb16e09 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -21,6 +21,7 @@
#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include <linux/security.h>
+#include <linux/kmemleak.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
*/
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+ kmemleak_ignore_phys(acpi_tables_addr);
+
/*
* early_ioremap only can remap 256k one time. If we map all
* tables one time, we will hit the limit. Need to map chunks
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 962041148482..720aa6cdd402 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -19,6 +19,7 @@
#include <linux/clk/clk-conf.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/of_irq.h>
#include <asm/irq.h>
@@ -371,14 +372,37 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
+static int of_amba_device_decode_irq(struct amba_device *dev)
+{
+ struct device_node *node = dev->dev.of_node;
+ int i, irq = 0;
+
+ if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
+ /* Decode the IRQs and address ranges */
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = of_irq_get(node, i);
+ if (irq < 0) {
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ irq = 0;
+ }
+
+ dev->irq[i] = irq;
+ }
+ }
+
+ return 0;
+}
+
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
int i, ret;
- WARN_ON(dev->irq[0] == (unsigned int)-1);
- WARN_ON(dev->irq[1] == (unsigned int)-1);
+ ret = of_amba_device_decode_irq(dev);
+ if (ret)
+ goto err_out;
ret = request_resource(parent, &dev->res);
if (ret)
@@ -579,78 +603,6 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
}
EXPORT_SYMBOL_GPL(amba_device_add);
-static struct amba_device *
-amba_aphb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid, u64 dma_mask,
- struct resource *resbase)
-{
- struct amba_device *dev;
- int ret;
-
- dev = amba_device_alloc(name, base, size);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- dev->dev.coherent_dma_mask = dma_mask;
- dev->irq[0] = irq1;
- dev->irq[1] = irq2;
- dev->periphid = periphid;
- dev->dev.platform_data = pdata;
- dev->dev.parent = parent;
-
- ret = amba_device_add(dev, resbase);
- if (ret) {
- amba_device_put(dev);
- return ERR_PTR(ret);
- }
-
- return dev;
-}
-
-struct amba_device *
-amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, 0, &iomem_resource);
-}
-EXPORT_SYMBOL_GPL(amba_apb_device_add);
-
-struct amba_device *
-amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, ~0ULL, &iomem_resource);
-}
-EXPORT_SYMBOL_GPL(amba_ahb_device_add);
-
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, 0, resbase);
-}
-EXPORT_SYMBOL_GPL(amba_apb_device_add_res);
-
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, ~0ULL, resbase);
-}
-EXPORT_SYMBOL_GPL(amba_ahb_device_add_res);
-
-
static void amba_device_initialize(struct amba_device *dev, const char *name)
{
device_initialize(&dev->dev);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 9edacc8b9768..49fb74196d02 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1870,7 +1870,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
binder_dec_node(buffer->target_node, 1, 0);
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
- off_end_offset = is_failure ? failed_at :
+ off_end_offset = is_failure && failed_at ? failed_at :
off_start_offset + buffer->offsets_size;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -1956,9 +1956,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
binder_size_t fd_buf_size;
binder_size_t num_valid;
- if (proc->tsk != current->group_leader) {
+ if (is_failure) {
/*
- * Nothing to do if running in sender context
* The fd fixups have not been applied so no
* fds need to be closed.
*/
@@ -2056,7 +2055,7 @@ static int binder_translate_binder(struct flat_binder_object *fp,
ret = -EINVAL;
goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
ret = -EPERM;
goto done;
}
@@ -2102,7 +2101,7 @@ static int binder_translate_handle(struct flat_binder_object *fp,
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
ret = -EPERM;
goto done;
}
@@ -2190,7 +2189,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
ret = -EBADF;
goto err_fget;
}
- ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+ ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
if (ret < 0) {
ret = -EPERM;
goto err_security;
@@ -2595,8 +2594,8 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- if (security_binder_transaction(proc->tsk,
- target_proc->tsk) < 0) {
+ if (security_binder_transaction(proc->cred,
+ target_proc->cred) < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
@@ -2711,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
t->from = thread;
else
t->from = NULL;
- t->sender_euid = task_euid(proc->tsk);
+ t->sender_euid = proc->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
@@ -2722,16 +2721,7 @@ static void binder_transaction(struct binder_proc *proc,
u32 secid;
size_t added_size;
- /*
- * Arguably this should be the task's subjective LSM secid but
- * we can't reliably access the subjective creds of a task
- * other than our own so we must use the objective creds, which
- * are safe to access. The downside is that if a task is
- * temporarily overriding it's creds it will not be reflected
- * here; however, it isn't clear that binder would handle that
- * case well anyway.
- */
- security_task_getsecid_obj(proc->tsk, &secid);
+ security_cred_getsecid(proc->cred, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
return_error = BR_FAILED_REPLY;
@@ -3185,6 +3175,7 @@ err_invalid_target_handle:
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
* @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
* If buffer for an async transaction, enqueue the next async
* transaction from the node.
@@ -3194,7 +3185,7 @@ err_invalid_target_handle:
static void
binder_free_buf(struct binder_proc *proc,
struct binder_thread *thread,
- struct binder_buffer *buffer)
+ struct binder_buffer *buffer, bool is_failure)
{
binder_inner_proc_lock(proc);
if (buffer->transaction) {
@@ -3222,7 +3213,7 @@ binder_free_buf(struct binder_proc *proc,
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, thread, buffer, 0, false);
+ binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer);
}
@@ -3424,7 +3415,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
- binder_free_buf(proc, thread, buffer);
+ binder_free_buf(proc, thread, buffer, false);
break;
}
@@ -4117,7 +4108,7 @@ retry:
buffer->transaction = NULL;
binder_cleanup_transaction(t, "fd fixups failed",
BR_FAILED_REPLY);
- binder_free_buf(proc, thread, buffer);
+ binder_free_buf(proc, thread, buffer, true);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
proc->pid, thread->pid,
@@ -4353,6 +4344,7 @@ static void binder_free_proc(struct binder_proc *proc)
}
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
+ put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
kfree(proc);
}
@@ -4564,7 +4556,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
ret = -EBUSY;
goto out;
}
- ret = security_binder_set_context_mgr(proc->tsk);
+ ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
@@ -5055,6 +5047,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ proc->cred = get_cred(filp->f_cred);
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->freeze_wait);
proc->default_priority = task_nice(current);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 402c4d4362a8..d6b6b8cb7346 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -364,6 +364,9 @@ struct binder_ref {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
+ * @cred struct cred associated with the `struct file`
+ * in binder_open()
+ * (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -426,6 +429,7 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
+ const struct cred *cred;
struct hlist_node deferred_work_node;
int deferred_work;
int outstanding_txns;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 186cbf90c8ea..d60f34718b5d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -258,7 +258,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
@@ -316,7 +316,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
@@ -358,8 +358,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
@@ -394,10 +394,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
@@ -592,6 +588,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
+ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
/*
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index eed65311b5d1..3018ca84a3d8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2007,7 +2007,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
- if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
+ if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
!(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
@@ -2459,18 +2459,70 @@ static void ata_dev_config_devslp(struct ata_device *dev)
}
}
+static void ata_dev_config_cpr(struct ata_device *dev)
+{
+ unsigned int err_mask;
+ size_t buf_len;
+ int i, nr_cpr = 0;
+ struct ata_cpr_log *cpr_log = NULL;
+ u8 *desc, *buf = NULL;
+
+ if (!ata_identify_page_supported(dev,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ goto out;
+
+ /*
+ * Read IDENTIFY DEVICE data log, page 0x47
+ * (concurrent positioning ranges). We can have at most 255 32B range
+ * descriptors plus a 64B header.
+ */
+ buf_len = (64 + 255 * 32 + 511) & ~511;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ buf, buf_len >> 9);
+ if (err_mask)
+ goto out;
+
+ nr_cpr = buf[0];
+ if (!nr_cpr)
+ goto out;
+
+ cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
+ if (!cpr_log)
+ goto out;
+
+ cpr_log->nr_cpr = nr_cpr;
+ desc = &buf[64];
+ for (i = 0; i < nr_cpr; i++, desc += 32) {
+ cpr_log->cpr[i].num = desc[0];
+ cpr_log->cpr[i].num_storage_elements = desc[1];
+ cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
+ cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
+ }
+
+out:
+ swap(dev->cpr_log, cpr_log);
+ kfree(cpr_log);
+ kfree(buf);
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
- dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "");
+ dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
+ dev->cpr_log ? " CPR" : "");
}
/**
@@ -2634,6 +2686,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
ata_dev_config_trusted(dev);
+ ata_dev_config_cpr(dev);
dev->cdb_len = 32;
if (ata_msg_drv(ap) && print_info)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1fb4611f7eeb..8a6b7b913d64 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1895,7 +1895,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
- int num_pages;
+ int i, num_pages = 0;
static const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
@@ -1905,13 +1905,17 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
0xb1, /* page 0xb1, block device characteristics page */
0xb2, /* page 0xb2, thin provisioning page */
0xb6, /* page 0xb6, zoned block device characteristics */
+ 0xb9, /* page 0xb9, concurrent positioning ranges */
};
- num_pages = sizeof(pages);
- if (!(args->dev->flags & ATA_DFLAG_ZAC))
- num_pages--;
+ for (i = 0; i < sizeof(pages); i++) {
+ if (pages[i] == 0xb6 &&
+ !(args->dev->flags & ATA_DFLAG_ZAC))
+ continue;
+ rbuf[num_pages + 4] = pages[i];
+ num_pages++;
+ }
rbuf[3] = num_pages; /* number of supported VPD pages */
- memcpy(rbuf + 4, pages, num_pages);
return 0;
}
@@ -2121,6 +2125,26 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
+static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ u8 *desc = &rbuf[64];
+ int i;
+
+ /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
+ rbuf[1] = 0xb9;
+ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
+
+ for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
+ desc[0] = cpr_log->cpr[i].num;
+ desc[1] = cpr_log->cpr[i].num_storage_elements;
+ put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
+ put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
+ }
+
+ return 0;
+}
+
/**
* modecpy - Prepare response for MODE SENSE
* @dest: output buffer
@@ -2981,7 +3005,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
ata_qc_set_pc_nbytes(qc);
/* We may not issue DMA commands if no DMA mode is set */
- if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) {
+ if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) {
fp = 1;
goto invalid_fld;
}
@@ -3131,7 +3155,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
- if (unlikely(!dev->dma_mode))
+ if (unlikely(!ata_dma_enabled(dev)))
goto invalid_opcode;
/*
@@ -4120,11 +4144,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break;
case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC) {
+ if (dev->flags & ATA_DFLAG_ZAC)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- break;
- }
- fallthrough;
+ else
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ break;
+ case 0xb9:
+ if (dev->cpr_log)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
+ else
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ break;
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 557ecf466102..b7ff63ed3bbb 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -215,7 +215,7 @@ static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
- if (pair->dma_mode) {
+ if (ata_dma_enabled(pair)) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
@@ -264,7 +264,7 @@ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
- if (pair->dma_mode) {
+ if (ata_dma_enabled(pair)) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index c8acba162d02..154748cfcc79 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -66,7 +66,7 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
if (peer) {
/* This may be over conservative */
- if (peer->dma_mode) {
+ if (ata_dma_enabled(peer)) {
ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
}
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index f6278d9de348..ad1090b90e52 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -153,7 +153,7 @@ static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8
if (pair) {
u8 pair_addr;
/* Hardware constraint */
- if (pair->dma_mode)
+ if (ata_dma_enabled(pair))
pair_addr = 0;
else
pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
@@ -301,7 +301,7 @@ static u8 optidma_make_bits43(struct ata_device *adev)
};
if (!ata_dev_enabled(adev))
return 0;
- if (adev->dma_mode)
+ if (ata_dma_enabled(adev))
return adev->dma_mode - XFER_MW_DMA_0;
return bits43[adev->pio_mode - XFER_PIO_0];
}
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 8fde4a86401b..3aca8fe3fdb6 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -172,8 +172,8 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
if (adev != ap->private_data) {
/* UDMA timing is not shared */
- if (adev->dma_mode < XFER_UDMA_0) {
- if (adev->dma_mode)
+ if (adev->dma_mode < XFER_UDMA_0 || !ata_dma_enabled(adev)) {
+ if (ata_dma_enabled(adev))
radisys_set_dmamode(ap, adev);
else if (adev->pio_mode)
radisys_set_piomode(ap, adev);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9d86203e1e7a..c53633d47bfb 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
- dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
- return 1;
+ dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
+ return -EINVAL;
}
hpriv->hp_flags = hp_flags;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 43407665918f..981e72a3dafb 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -12,15 +12,12 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
-#include <linux/string.h>
#include <linux/sched/topology.h>
#include <linux/cpuset.h>
#include <linux/cpumask.h>
#include <linux/init.h>
-#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
-#include <linux/smp.h>
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
@@ -600,6 +597,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return core_mask;
}
+const struct cpumask *cpu_clustergroup_mask(int cpu)
+{
+ return &cpu_topology[cpu].cluster_sibling;
+}
+
void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -617,6 +619,12 @@ void update_siblings_masks(unsigned int cpuid)
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
+ if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
+ cpuid_topo->cluster_id != -1) {
+ cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
+ }
+
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
@@ -635,6 +643,9 @@ static void clear_cpu_topology(int cpu)
cpumask_clear(&cpu_topo->llc_sibling);
cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+ cpumask_clear(&cpu_topo->cluster_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
+
cpumask_clear(&cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
@@ -650,6 +661,7 @@ void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
+ cpu_topo->cluster_id = -1;
cpu_topo->package_id = -1;
cpu_topo->llc_id = -1;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 5e79299f6c3f..2d25a6416587 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -9,7 +9,6 @@
*/
#include <linux/component.h>
#include <linux/device.h>
-#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -246,7 +245,7 @@ static int try_to_bring_up_master(struct master *master,
return 0;
}
- if (!devres_open_group(master->parent, NULL, GFP_KERNEL))
+ if (!devres_open_group(master->parent, master, GFP_KERNEL))
return -ENOMEM;
/* Found all components */
@@ -258,6 +257,7 @@ static int try_to_bring_up_master(struct master *master,
return ret;
}
+ devres_close_group(master->parent, NULL);
master->bound = true;
return 1;
}
@@ -282,7 +282,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->parent);
- devres_release_group(master->parent, NULL);
+ devres_release_group(master->parent, master);
master->bound = false;
}
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 249da496581a..fd034d742447 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -821,9 +821,7 @@ struct device_link *device_link_add(struct device *consumer,
dev_bus_name(supplier), dev_name(supplier),
dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
- put_device(consumer);
- put_device(supplier);
- kfree(link);
+ put_device(&link->link_dev);
link = NULL;
goto out;
}
@@ -2875,7 +2873,7 @@ void device_initialize(struct device *dev)
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
- set_dev_node(dev, -1);
+ set_dev_node(dev, NUMA_NO_NODE);
#ifdef CONFIG_GENERIC_MSI_IRQ
raw_spin_lock_init(&dev->msi_lock);
INIT_LIST_HEAD(&dev->msi_list);
@@ -4690,6 +4688,11 @@ define_dev_printk_level(_dev_info, KERN_INFO);
*
* return dev_err_probe(dev, err, ...);
*
+ * Note that it is deemed acceptable to use this function for error
+ * prints during probe even if the @err is known to never be -EPROBE_DEFER.
+ * The benefit compared to a normal dev_err() is the standardized format
+ * of the error code and the fact that the error code is returned.
+ *
* Returns @err.
*
*/
@@ -4835,6 +4838,12 @@ int device_match_acpi_dev(struct device *dev, const void *adev)
}
EXPORT_SYMBOL(device_match_acpi_dev);
+int device_match_acpi_handle(struct device *dev, const void *handle)
+{
+ return ACPI_HANDLE(dev) == handle;
+}
+EXPORT_SYMBOL(device_match_acpi_handle);
+
int device_match_any(struct device *dev, const void *unused)
{
return 1;
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 101754ad48d9..eb4be452062a 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+obj-y += main.o
# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
# leading /, it's relative to $(srctree).
fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR))
fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
-obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
+firmware := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
+obj-y += $(firmware)
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
@@ -34,7 +36,7 @@ $(obj)/%.gen.S: FORCE
$(call filechk,fwbin)
# The .o files depend on the binaries directly; the .S files don't.
-$(addprefix $(obj)/, $(obj-y)): $(obj)/%.gen.o: $(fwdir)/%
+$(addprefix $(obj)/, $(firmware)): $(obj)/%.gen.o: $(fwdir)/%
targets := $(patsubst $(obj)/%,%, \
$(shell find $(obj) -name \*.gen.S 2>/dev/null))
diff --git a/drivers/base/firmware_loader/builtin/main.c b/drivers/base/firmware_loader/builtin/main.c
new file mode 100644
index 000000000000..a065c3150897
--- /dev/null
+++ b/drivers/base/firmware_loader/builtin/main.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Builtin firmware support */
+
+#include <linux/firmware.h>
+#include "../firmware.h"
+
+/* Only if FW_LOADER=y */
+#ifdef CONFIG_FW_LOADER
+
+struct builtin_fw {
+ char *name;
+ void *data;
+ unsigned long size;
+};
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_copy_to_prealloc_buf(struct firmware *fw,
+ void *buf, size_t size)
+{
+ if (!buf)
+ return true;
+ if (size < fw->size)
+ return false;
+ memcpy(buf, fw->data, fw->size);
+ return true;
+}
+
+/**
+ * firmware_request_builtin() - load builtin firmware
+ * @fw: pointer to firmware struct
+ * @name: name of firmware file
+ *
+ * Some use cases in the kernel have a requirement so that no memory allocator
+ * is involved as these calls take place early in boot process. An example is
+ * the x86 CPU microcode loader. In these cases all the caller wants is to see
+ * if the firmware was built-in and if so use it right away. This can be used
+ * for such cases.
+ *
+ * This looks for the firmware in the built-in kernel. Only if the kernel was
+ * built-in with the firmware you are looking for will this return successfully.
+ *
+ * Callers of this API do not need to use release_firmware() as the pointer to
+ * the firmware is expected to be provided locally on the stack of the caller.
+ **/
+bool firmware_request_builtin(struct firmware *fw, const char *name)
+{
+ struct builtin_fw *b_fw;
+
+ if (!fw)
+ return false;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+ if (strcmp(name, b_fw->name) == 0) {
+ fw->size = b_fw->size;
+ fw->data = b_fw->data;
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
+
+/**
+ * firmware_request_builtin_buf() - load builtin firmware into optional buffer
+ * @fw: pointer to firmware struct
+ * @name: name of firmware file
+ * @buf: If set this lets you use a pre-allocated buffer so that the built-in
+ * firmware into is copied into. This field can be NULL. It is used by
+ * callers such as request_firmware_into_buf() and
+ * request_partial_firmware_into_buf()
+ * @size: if buf was provided, the max size of the allocated buffer available.
+ * If the built-in firmware does not fit into the pre-allocated @buf this
+ * call will fail.
+ *
+ * This looks for the firmware in the built-in kernel. Only if the kernel was
+ * built-in with the firmware you are looking for will this call possibly
+ * succeed. If you passed a @buf the firmware will be copied into it *iff* the
+ * built-in firmware fits into the pre-allocated buffer size specified in
+ * @size.
+ *
+ * This caller is to be used internally by the firmware_loader only.
+ **/
+bool firmware_request_builtin_buf(struct firmware *fw, const char *name,
+ void *buf, size_t size)
+{
+ if (!firmware_request_builtin(fw, name))
+ return false;
+
+ return fw_copy_to_prealloc_buf(fw, buf, size);
+}
+
+bool firmware_is_builtin(const struct firmware *fw)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+ if (fw->data == b_fw->data)
+ return true;
+
+ return false;
+}
+
+#endif
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index a3014e9e2c85..2889f446ad41 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -151,6 +151,23 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
int assign_fw(struct firmware *fw, struct device *device);
+#ifdef CONFIG_FW_LOADER
+bool firmware_is_builtin(const struct firmware *fw);
+bool firmware_request_builtin_buf(struct firmware *fw, const char *name,
+ void *buf, size_t size);
+#else /* module case */
+static inline bool firmware_is_builtin(const struct firmware *fw)
+{
+ return false;
+}
+static inline bool firmware_request_builtin_buf(struct firmware *fw,
+ const char *name,
+ void *buf, size_t size)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_FW_LOADER_PAGED_BUF
void fw_free_paged_buf(struct fw_priv *fw_priv);
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index bdbedc6660a8..94d1789a233e 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -93,65 +93,6 @@ DEFINE_MUTEX(fw_lock);
static struct firmware_cache fw_cache;
-/* Builtin firmware support */
-
-#ifdef CONFIG_FW_LOADER
-
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-
-static void fw_copy_to_prealloc_buf(struct firmware *fw,
- void *buf, size_t size)
-{
- if (!buf || size < fw->size)
- return;
- memcpy(buf, fw->data, fw->size);
-}
-
-static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
- void *buf, size_t size)
-{
- struct builtin_fw *b_fw;
-
- for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
- if (strcmp(name, b_fw->name) == 0) {
- fw->size = b_fw->size;
- fw->data = b_fw->data;
- fw_copy_to_prealloc_buf(fw, buf, size);
-
- return true;
- }
- }
-
- return false;
-}
-
-static bool fw_is_builtin_firmware(const struct firmware *fw)
-{
- struct builtin_fw *b_fw;
-
- for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
- if (fw->data == b_fw->data)
- return true;
-
- return false;
-}
-
-#else /* Module case - no builtin firmware support */
-
-static inline bool fw_get_builtin_firmware(struct firmware *fw,
- const char *name, void *buf,
- size_t size)
-{
- return false;
-}
-
-static inline bool fw_is_builtin_firmware(const struct firmware *fw)
-{
- return false;
-}
-#endif
-
static void fw_state_init(struct fw_priv *fw_priv)
{
struct fw_state *fw_st = &fw_priv->fw_st;
@@ -736,7 +677,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return -ENOMEM;
}
- if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
+ if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
dev_dbg(device, "using built-in %s\n", name);
return 0; /* assigned */
}
@@ -1051,7 +992,7 @@ EXPORT_SYMBOL(request_partial_firmware_into_buf);
void release_firmware(const struct firmware *fw)
{
if (fw) {
- if (!fw_is_builtin_firmware(fw))
+ if (!firmware_is_builtin(fw))
firmware_free_data(fw);
kfree(fw);
}
@@ -1215,7 +1156,7 @@ static int uncache_firmware(const char *fw_name)
pr_debug("%s: %s\n", __func__, fw_name);
- if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
+ if (firmware_request_builtin(&fw, fw_name))
return 0;
fw_priv = lookup_fw_priv(fw_name);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 652531f67135..598acf93a360 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1466,8 +1466,7 @@ int platform_dma_configure(struct device *dev)
}
static const struct dev_pm_ops platform_dev_pm_ops = {
- .runtime_suspend = pm_generic_runtime_suspend,
- .runtime_resume = pm_generic_runtime_resume,
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
USE_PLATFORM_PM_SLEEP_OPS
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index cbea78e79f3d..ac4dde8fdb8b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,7 +32,6 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
-#include <linux/cpuidle.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
@@ -747,8 +746,6 @@ void dpm_resume_noirq(pm_message_t state)
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
-
- cpuidle_resume();
}
/**
@@ -1051,7 +1048,7 @@ static void device_complete(struct device *dev, pm_message_t state)
const char *info = NULL;
if (dev->power.syscore)
- return;
+ goto out;
device_lock(dev);
@@ -1081,6 +1078,7 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
+out:
pm_runtime_put(dev);
}
@@ -1336,8 +1334,6 @@ int dpm_suspend_noirq(pm_message_t state)
{
int ret;
- cpuidle_pause();
-
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
@@ -1794,9 +1790,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
int (*callback)(struct device *) = NULL;
int ret = 0;
- if (dev->power.syscore)
- return 0;
-
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1805,6 +1798,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
*/
pm_runtime_get_noresume(dev);
+ if (dev->power.syscore)
+ return 0;
+
device_lock(dev);
dev->power.wakeup_path = false;
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 54292cdd7808..0eb7f02b3ad5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -25,8 +25,10 @@ extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
- WAKE_IRQ_DEDICATED_MANAGED)
+ WAKE_IRQ_DEDICATED_MANAGED | \
+ WAKE_IRQ_DEDICATED_REVERSE)
struct wake_irq {
struct device *dev;
@@ -39,7 +41,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
-extern void dev_pm_disable_wake_irq_check(struct device *dev);
+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
+extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ec94049442b9..d504cd4ab3cb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -645,6 +645,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
+ dev_pm_enable_wake_irq_complete(dev);
+
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -690,7 +692,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -873,7 +875,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index b91a3a9bf9f6..0004db4a9d3b 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
-/**
- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
- * @dev: Device entry
- * @irq: Device wake-up interrupt
- *
- * Unless your hardware has separate wake-up interrupts in addition
- * to the device IO interrupts, you don't need this.
- *
- * Sets up a threaded interrupt handler for a device that has
- * a dedicated wake-up interrupt in addition to the device IO
- * interrupt.
- *
- * The interrupt starts disabled, and needs to be managed for
- * the device by the bus code or the device driver using
- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
- * functions.
- */
-int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
@@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
- wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
@@ -210,9 +193,58 @@ err_free:
return err;
}
+
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
+}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
+ * with reverse enable ordering
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has a dedicated
+ * wake-up interrupt in addition to the device IO interrupt. It sets
+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
+ * to enable dedicated wake-up interrupt after running the runtime suspend
+ * callback for @dev.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
+
+/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
@@ -282,28 +314,55 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
- enable_irq(wirq->irq);
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ enable_irq(wirq->irq);
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
-void dev_pm_disable_wake_irq_check(struct device *dev)
+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ return;
+
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
disable_irq_nosync(wirq->irq);
}
/**
+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
+ * @dev: Device using the wake IRQ
+ *
+ * Enable wake IRQ conditionally based on status, mainly used if want to
+ * enable wake IRQ after running ->runtime_suspend() which depends on
+ * WAKE_IRQ_DEDICATED_REVERSE.
+ *
+ * Should be only called from rpm_suspend() path.
+ */
+void dev_pm_enable_wake_irq_complete(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ enable_irq(wirq->irq);
+}
+
+/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 453918eb7390..f1f35b48ab8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -15,7 +15,6 @@
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/property.h>
-#include <linux/etherdevice.h>
#include <linux/phy.h>
struct fwnode_handle *dev_fwnode(struct device *dev)
@@ -935,68 +934,6 @@ int device_get_phy_mode(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
-static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
- const char *name, char *addr,
- int alen)
-{
- int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
-
- if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
- return addr;
- return NULL;
-}
-
-/**
- * fwnode_get_mac_address - Get the MAC from the firmware node
- * @fwnode: Pointer to the firmware node
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- *
- * Search the firmware node for the best MAC address to use. 'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the firmware tables, but were not updated by the firmware. For
- * example, the DTS could define 'mac-address' and 'local-mac-address', with
- * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
- * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
- * exists but is all zeros.
-*/
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
-{
- char *res;
-
- res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
- if (res)
- return res;
-
- res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
- if (res)
- return res;
-
- return fwnode_get_mac_addr(fwnode, "address", addr, alen);
-}
-EXPORT_SYMBOL(fwnode_get_mac_address);
-
-/**
- * device_get_mac_address - Get the MAC for a given device
- * @dev: Pointer to the device
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- */
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
-{
- return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
-}
-EXPORT_SYMBOL(device_get_mac_address);
-
/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index cfa29dc89bbf..fabf87058d80 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
+ rbnode->block = blk;
+
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
GFP_KERNEL);
- if (!present) {
- kfree(blk);
+ if (!present)
return -ENOMEM;
- }
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
}
/* update the rbnode block, its size and the base register */
- rbnode->block = blk;
rbnode->blklen = blklen;
rbnode->base_reg = base_reg;
rbnode->cache_present = present;
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index 6a20201299f5..f7293040a2b1 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -14,7 +14,7 @@ static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int
{
int ret;
- ret = mdiobus_read(mdio_dev->bus, mdio_dev->addr, reg);
+ ret = mdiodev_read(mdio_dev, reg);
if (ret < 0)
return ret;
@@ -24,7 +24,7 @@ static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int
static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
{
- return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+ return mdiodev_write(mdio_dev, reg, val);
}
static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
@@ -44,7 +44,7 @@ static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int v
if (unlikely(reg & ~REGNUM_C22_MASK))
return -ENXIO;
- return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+ return mdiodev_write(mdio_dev, reg, val);
}
static const struct regmap_bus regmap_mdio_c22_bus = {
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index c1894e93c378..719323bc6c7f 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -109,13 +109,37 @@ static const struct regmap_bus regmap_spi = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ size_t max_size = spi_max_transfer_size(spi);
+ struct regmap_bus *bus;
+
+ if (max_size != SIZE_MAX) {
+ bus = kmemdup(&regmap_spi, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ bus->free_on_exit = true;
+ bus->max_raw_read = max_size;
+ bus->max_raw_write = max_size;
+ return bus;
+ }
+
+ return &regmap_spi;
+}
+
struct regmap *__regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
- return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
- lock_key, lock_name);
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_spi);
@@ -124,8 +148,12 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
struct lock_class_key *lock_key,
const char *lock_name)
{
- return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
- lock_key, lock_name);
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index c46f6a8e14d2..4debcea4fb12 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -413,9 +413,6 @@ software_node_get_name(const struct fwnode_handle *fwnode)
{
const struct swnode *swnode = to_swnode(fwnode);
- if (!swnode)
- return "(null)";
-
return kobject_name(&swnode->kobj);
}
@@ -507,9 +504,6 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
int error;
int i;
- if (!swnode)
- return -ENOENT;
-
prop = property_entry_get(swnode->node->properties, propname);
if (!prop)
return -ENOENT;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 43c0940643f5..8f2b641d0b8c 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -48,6 +48,9 @@ static DEVICE_ATTR_RO(physical_package_id);
define_id_show_func(die_id);
static DEVICE_ATTR_RO(die_id);
+define_id_show_func(cluster_id);
+static DEVICE_ATTR_RO(cluster_id);
+
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -63,6 +66,10 @@ define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, 0);
static BIN_ATTR_RO(core_siblings_list, 0);
+define_siblings_read_func(cluster_cpus, cluster_cpumask);
+static BIN_ATTR_RO(cluster_cpus, 0);
+static BIN_ATTR_RO(cluster_cpus_list, 0);
+
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, 0);
static BIN_ATTR_RO(die_cpus_list, 0);
@@ -94,6 +101,8 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+ &bin_attr_cluster_cpus,
+ &bin_attr_cluster_cpus_list,
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
&bin_attr_package_cpus,
@@ -112,6 +121,7 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
&dev_attr_die_id.attr,
+ &dev_attr_cluster_id.attr,
&dev_attr_core_id.attr,
#ifdef CONFIG_SCHED_BOOK
&dev_attr_book_id.attr,
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index c6d6ba0d00b1..8e7ca3e4c8c4 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
-static unsigned int bcma_bus_next_num = 0;
+static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ab3e37aa1830..2a51dfb09c8f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -180,14 +180,6 @@ config BLK_DEV_LOOP
bits of, say, a sound file). This is also safe if the file resides
on a remote file server.
- There are several ways of encrypting disks. Some of these require
- kernel patches. The vanilla kernel offers the cryptoloop option
- and a Device Mapper target (which is superior, as it supports all
- file systems). If you want to use the cryptoloop, say Y to both
- LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12
- or later) version of util-linux. Additionally, be aware that
- the cryptoloop is not safe for storing journaled filesystems.
-
Note that this loop device has nothing to do with the loopback
device used for network connections from the machine to itself.
@@ -211,21 +203,6 @@ config BLK_DEV_LOOP_MIN_COUNT
is used, it can be set to 0, since needed loop devices can be
dynamically allocated with the /dev/loop-control interface.
-config BLK_DEV_CRYPTOLOOP
- tristate "Cryptoloop Support (DEPRECATED)"
- select CRYPTO
- select CRYPTO_CBC
- depends on BLK_DEV_LOOP
- help
- Say Y here if you want to be able to use the ciphers that are
- provided by the CryptoAPI as loop transformation. This might be
- used as hard disk encryption.
-
- WARNING: This device is not safe for journaled file systems like
- ext3 or Reiserfs. Please use the Device Mapper crypto module
- instead, which can be configured to be on-disk compatible with the
- cryptoloop device. cryptoloop support will be removed in Linux 5.16.
-
source "drivers/block/drbd/Kconfig"
config BLK_DEV_NBD
@@ -304,8 +281,8 @@ config BLK_DEV_RAM_SIZE
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ depends on SCSI
select CDROM
- select SCSI_COMMON
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
@@ -394,6 +371,7 @@ config XEN_BLKDEV_BACKEND
config VIRTIO_BLK
tristate "Virtio block driver"
depends on VIRTIO
+ select SG_POOL
help
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index bc68817ef496..11a74f17c9ad 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
-obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8b1714021498..bf5c124c5452 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -61,10 +61,10 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/blk-mq.h>
-#include <linux/elevator.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -1780,6 +1780,7 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
+ int err;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
if (IS_ERR(disk))
@@ -1798,8 +1799,10 @@ static int fd_alloc_disk(int drive, int system)
set_capacity(disk, 880 * 2);
unit[drive].gendisk[system] = disk;
- add_disk(disk);
- return 0;
+ err = add_disk(disk);
+ if (err)
+ blk_cleanup_disk(disk);
+ return err;
}
static int fd_alloc_drive(int drive)
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 06b360f7123a..52484bcdedb9 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -37,8 +37,7 @@ static ssize_t aoedisk_show_state(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE,
- "%s%s\n",
+ return sysfs_emit(page, "%s%s\n",
(d->flags & DEVFL_UP) ? "up" : "down",
(d->flags & DEVFL_KICKME) ? ",kickme" :
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
@@ -52,8 +51,8 @@ static ssize_t aoedisk_show_mac(struct device *dev,
struct aoetgt *t = d->targets[0];
if (t == NULL)
- return snprintf(page, PAGE_SIZE, "none\n");
- return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
+ return sysfs_emit(page, "none\n");
+ return sysfs_emit(page, "%pm\n", t->addr);
}
static ssize_t aoedisk_show_netif(struct device *dev,
struct device_attribute *attr, char *page)
@@ -85,7 +84,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
ne = nd;
nd = nds;
if (*nd == NULL)
- return snprintf(page, PAGE_SIZE, "none\n");
+ return sysfs_emit(page, "none\n");
for (p = page; nd < ne; nd++)
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
@@ -99,7 +98,7 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
+ return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
}
static ssize_t aoedisk_show_payload(struct device *dev,
struct device_attribute *attr, char *page)
@@ -107,7 +106,7 @@ static ssize_t aoedisk_show_payload(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
+ return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
@@ -417,7 +416,9 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
- device_add_disk(NULL, gd, aoe_attr_groups);
+ err = device_add_disk(NULL, gd, aoe_attr_groups);
+ if (err)
+ goto out_disk_cleanup;
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
@@ -426,6 +427,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
return;
+out_disk_cleanup:
+ blk_cleanup_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index a093644ac39f..d14bdc3589b2 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -68,6 +68,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blk-mq.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
@@ -298,6 +299,7 @@ static struct atari_floppy_struct {
disk change detection) */
int flags; /* flags */
struct gendisk *disk[NUM_DISK_MINORS];
+ bool registered[NUM_DISK_MINORS];
int ref;
int type;
struct blk_mq_tag_set tag_set;
@@ -456,10 +458,20 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
+ DPRINT(("fd_end_request_cur(), bytes %d of %d\n",
+ blk_rq_cur_bytes(fd_request),
+ blk_rq_bytes(fd_request)));
+
if (!blk_update_request(fd_request, err,
blk_rq_cur_bytes(fd_request))) {
+ DPRINT(("calling __blk_mq_end_request()\n"));
__blk_mq_end_request(fd_request, err);
fd_request = NULL;
+ } else {
+ /* requeue rest of request */
+ DPRINT(("calling blk_mq_requeue_request()\n"));
+ blk_mq_requeue_request(fd_request, true);
+ fd_request = NULL;
}
}
@@ -653,9 +665,6 @@ static inline void copy_buffer(void *from, void *to)
*p2++ = *p1++;
}
-
-
-
/* General Interrupt Handling */
static void (*FloppyIRQHandler)( int status ) = NULL;
@@ -700,12 +709,21 @@ static void fd_error( void )
if (fd_request->error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
+ finish_fdc();
+ return;
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
}
+ /* need to re-run request to recalibrate */
+ atari_disable_irq( IRQ_MFP_FDC );
+
+ setup_req_params( SelectedDrive );
+ do_fd_action( SelectedDrive );
+
+ atari_enable_irq( IRQ_MFP_FDC );
}
@@ -732,8 +750,10 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
type--;
if (type >= NUM_DISK_MINORS ||
- minor2disktype[type].drive_types > DriveType)
+ minor2disktype[type].drive_types > DriveType) {
+ finish_fdc();
return -EINVAL;
+ }
}
q = unit[drive].disk[type]->queue;
@@ -751,6 +771,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
+ finish_fdc();
ret = -EINVAL;
goto out;
}
@@ -791,6 +812,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
+ finish_fdc();
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
@@ -825,6 +847,7 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
+ finish_fdc();
return;
}
}
@@ -1229,6 +1252,7 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
+ finish_fdc();
}
return;
@@ -1350,7 +1374,7 @@ static void fd_times_out(struct timer_list *unused)
static void finish_fdc( void )
{
- if (!NeedSeek) {
+ if (!NeedSeek || !stdma_is_locked_by(floppy_irq)) {
finish_fdc_done( 0 );
}
else {
@@ -1385,7 +1409,8 @@ static void finish_fdc_done( int dummy )
start_motor_off_timer();
local_irq_save(flags);
- stdma_release();
+ if (stdma_is_locked_by(floppy_irq))
+ stdma_release();
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@@ -1435,8 +1460,7 @@ static int floppy_revalidate(struct gendisk *disk)
unsigned int drive = p - unit;
if (test_bit(drive, &changed_floppies) ||
- test_bit(drive, &fake_change) ||
- p->disktype == 0) {
+ test_bit(drive, &fake_change) || !p->disktype) {
if (UD.flags & FTD_MSG)
printk(KERN_ERR "floppy: clear format %p!\n", UDT);
BufferDrive = -1;
@@ -1475,15 +1499,6 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
-static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx)
-{
- spin_lock_irq(&ataflop_lock);
- atari_disable_irq(IRQ_MFP_FDC);
- finish_fdc();
- atari_enable_irq(IRQ_MFP_FDC);
- spin_unlock_irq(&ataflop_lock);
-}
-
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1491,6 +1506,10 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
int drive = floppy - unit;
int type = floppy->type;
+ DPRINT(("Queue request: drive %d type %d sectors %d of %d last %d\n",
+ drive, type, blk_rq_cur_sectors(bd->rq),
+ blk_rq_sectors(bd->rq), bd->last));
+
spin_lock_irq(&ataflop_lock);
if (fd_request) {
spin_unlock_irq(&ataflop_lock);
@@ -1511,6 +1530,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
@@ -1527,11 +1547,13 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
type = minor2disktype[type].index;
@@ -1550,8 +1572,6 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
setup_req_params( drive );
do_fd_action( drive );
- if (bd->last)
- finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
out:
@@ -1634,6 +1654,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
+ finish_fdc();
return -EINVAL;
}
@@ -1701,8 +1722,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
- setprm.head != 2)
+ setprm.head != 2) {
+ finish_fdc();
return -EINVAL;
+ }
UDT = dtp;
set_capacity(disk, UDT->blocks);
@@ -1962,7 +1985,6 @@ static const struct block_device_operations floppy_fops = {
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
- .commit_rqs = ataflop_commit_rqs,
};
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
@@ -2000,12 +2022,28 @@ static void ataflop_probe(dev_t dev)
return;
mutex_lock(&ataflop_probe_lock);
if (!unit[drive].disk[type]) {
- if (ataflop_alloc_disk(drive, type) == 0)
+ if (ataflop_alloc_disk(drive, type) == 0) {
add_disk(unit[drive].disk[type]);
+ unit[drive].registered[type] = true;
+ }
}
mutex_unlock(&ataflop_probe_lock);
}
+static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
+{
+ int type;
+
+ for (type = 0; type < NUM_DISK_MINORS; type++) {
+ if (!fs->disk[type])
+ continue;
+ if (fs->registered[type])
+ del_gendisk(fs->disk[type]);
+ blk_cleanup_disk(fs->disk[type]);
+ }
+ blk_mq_free_tag_set(&fs->tag_set);
+}
+
static int __init atari_floppy_init (void)
{
int i;
@@ -2064,7 +2102,10 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
unit[i].flags = 0;
- add_disk(unit[i].disk[0]);
+ ret = add_disk(unit[i].disk[0]);
+ if (ret)
+ goto err_out_dma;
+ unit[i].registered[0] = true;
}
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
@@ -2074,12 +2115,11 @@ static int __init atari_floppy_init (void)
return 0;
+err_out_dma:
+ atari_stram_free(DMABuffer);
err:
- while (--i >= 0) {
- blk_cleanup_queue(unit[i].disk[0]->queue);
- put_disk(unit[i].disk[0]);
- blk_mq_free_tag_set(&unit[i].tag_set);
- }
+ while (--i >= 0)
+ atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_unlock:
@@ -2128,18 +2168,10 @@ __setup("floppy=", atari_floppy_setup);
static void __exit atari_floppy_exit(void)
{
- int i, type;
+ int i;
- for (i = 0; i < FD_MAX_UNITS; i++) {
- for (type = 0; type < NUM_DISK_MINORS; type++) {
- if (!unit[i].disk[type])
- continue;
- del_gendisk(unit[i].disk[type]);
- blk_cleanup_queue(unit[i].disk[type]->queue);
- put_disk(unit[i].disk[type]);
- }
- blk_mq_free_tag_set(&unit[i].tag_set);
- }
+ for (i = 0; i < FD_MAX_UNITS; i++)
+ atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
del_timer_sync(&fd_timer);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 530b31240203..aa0472718dce 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -282,7 +282,7 @@ out:
return err;
}
-static blk_qc_t brd_submit_bio(struct bio *bio)
+static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
sector_t sector = bio->bi_iter.bi_sector;
@@ -299,16 +299,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), sector);
- if (err)
- goto io_error;
+ if (err) {
+ bio_io_error(bio);
+ return;
+ }
sector += len >> SECTOR_SHIFT;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
-io_error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
deleted file mode 100644
index f0a91faa43a8..000000000000
--- a/drivers/block/cryptoloop.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Linux loop encryption enabling module
-
- Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org>
- Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org>
-
- */
-
-#include <linux/module.h>
-
-#include <crypto/skcipher.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-#include <linux/uaccess.h>
-#include "loop.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
-MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
-
-#define LOOP_IV_SECTOR_BITS 9
-#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
-
-static int
-cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
-{
- int err = -EINVAL;
- int cipher_len;
- int mode_len;
- char cms[LO_NAME_SIZE]; /* cipher-mode string */
- char *mode;
- char *cmsp = cms; /* c-m string pointer */
- struct crypto_sync_skcipher *tfm;
-
- /* encryption breaks for non sector aligned offsets */
-
- if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
- goto out;
-
- strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
- cms[LO_NAME_SIZE - 1] = 0;
-
- cipher_len = strcspn(cmsp, "-");
-
- mode = cmsp + cipher_len;
- mode_len = 0;
- if (*mode) {
- mode++;
- mode_len = strcspn(mode, "-");
- }
-
- if (!mode_len) {
- mode = "cbc";
- mode_len = 3;
- }
-
- if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
- return -EINVAL;
-
- memmove(cms, mode, mode_len);
- cmsp = cms + mode_len;
- *cmsp++ = '(';
- memcpy(cmsp, info->lo_crypt_name, cipher_len);
- cmsp += cipher_len;
- *cmsp++ = ')';
- *cmsp = 0;
-
- tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
- if (err != 0)
- goto out_free_tfm;
-
- lo->key_data = tfm;
- return 0;
-
- out_free_tfm:
- crypto_free_sync_skcipher(tfm);
-
- out:
- return err;
-}
-
-
-typedef int (*encdec_cbc_t)(struct skcipher_request *req);
-
-static int
-cryptoloop_transfer(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t IV)
-{
- struct crypto_sync_skcipher *tfm = lo->key_data;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg_out;
- struct scatterlist sg_in;
-
- encdec_cbc_t encdecfunc;
- struct page *in_page, *out_page;
- unsigned in_offs, out_offs;
- int err;
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
-
- sg_init_table(&sg_out, 1);
- sg_init_table(&sg_in, 1);
-
- if (cmd == READ) {
- in_page = raw_page;
- in_offs = raw_off;
- out_page = loop_page;
- out_offs = loop_off;
- encdecfunc = crypto_skcipher_decrypt;
- } else {
- in_page = loop_page;
- in_offs = loop_off;
- out_page = raw_page;
- out_offs = raw_off;
- encdecfunc = crypto_skcipher_encrypt;
- }
-
- while (size > 0) {
- const int sz = min(size, LOOP_IV_SECTOR_SIZE);
- u32 iv[4] = { 0, };
- iv[0] = cpu_to_le32(IV & 0xffffffff);
-
- sg_set_page(&sg_in, in_page, sz, in_offs);
- sg_set_page(&sg_out, out_page, sz, out_offs);
-
- skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
- err = encdecfunc(req);
- if (err)
- goto out;
-
- IV++;
- size -= sz;
- in_offs += sz;
- out_offs += sz;
- }
-
- err = 0;
-
-out:
- skcipher_request_zero(req);
- return err;
-}
-
-static int
-cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
-{
- return -EINVAL;
-}
-
-static int
-cryptoloop_release(struct loop_device *lo)
-{
- struct crypto_sync_skcipher *tfm = lo->key_data;
- if (tfm != NULL) {
- crypto_free_sync_skcipher(tfm);
- lo->key_data = NULL;
- return 0;
- }
- printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
- return -EINVAL;
-}
-
-static struct loop_func_table cryptoloop_funcs = {
- .number = LO_CRYPT_CRYPTOAPI,
- .init = cryptoloop_init,
- .ioctl = cryptoloop_ioctl,
- .transfer = cryptoloop_transfer,
- .release = cryptoloop_release,
- .owner = THIS_MODULE
-};
-
-static int __init
-init_cryptoloop(void)
-{
- int rc = loop_register_transfer(&cryptoloop_funcs);
-
- if (rc)
- printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
- else
- pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
- return rc;
-}
-
-static void __exit
-cleanup_cryptoloop(void)
-{
- if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
- printk(KERN_ERR
- "cryptoloop: loop_unregister_transfer failed\n");
-}
-
-module_init(init_cryptoloop);
-module_exit(cleanup_cryptoloop);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 5d9181382ce1..f27d5b0f9a0b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1448,7 +1448,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *);
-extern blk_qc_t drbd_submit_bio(struct bio *bio);
+void drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1826,8 +1826,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
/* Returns the number of 512 byte sectors of the device */
static inline sector_t drbd_get_capacity(struct block_device *bdev)
{
- /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
- return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
+ return bdev ? bdev_nr_sectors(bdev) : 0;
}
/**
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55234a558e98..19db80a1e409 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2794,7 +2794,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
goto out_idr_remove_vol;
}
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
@@ -2808,6 +2810,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_debugfs_device_add(device);
return NO_ERROR;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5ca233644d70..3235532ae077 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1596,7 +1596,7 @@ void do_submit(struct work_struct *ws)
}
}
-blk_qc_t drbd_submit_bio(struct bio *bio)
+void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
@@ -1609,7 +1609,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio)
inc_ap_bio(device);
__drbd_make_request(device, bio);
- return BLK_QC_T_NONE;
}
static bool net_timeout_reached(struct drbd_request *net_req,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fef79ea52e3e..3873e789478e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -184,6 +184,7 @@ static int print_unex = 1;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
@@ -4478,6 +4479,7 @@ static const struct blk_mq_ops floppy_mq_ops = {
};
static struct platform_device floppy_device[N_DRIVE];
+static bool registered[N_DRIVE];
static bool floppy_available(int drive)
{
@@ -4693,8 +4695,12 @@ static int __init do_floppy_init(void)
if (err)
goto out_remove_drives;
- device_add_disk(&floppy_device[drive].dev, disks[drive][0],
- NULL);
+ registered[drive] = true;
+
+ err = device_add_disk(&floppy_device[drive].dev,
+ disks[drive][0], NULL);
+ if (err)
+ goto out_remove_drives;
}
return 0;
@@ -4703,7 +4709,8 @@ out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive][0]);
- platform_device_unregister(&floppy_device[drive]);
+ if (registered[drive])
+ platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
@@ -4946,30 +4953,14 @@ static void __exit floppy_module_exit(void)
if (disks[drive][i])
del_gendisk(disks[drive][i]);
}
- platform_device_unregister(&floppy_device[drive]);
+ if (registered[drive])
+ platform_device_unregister(&floppy_device[drive]);
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
- blk_cleanup_queue(disks[drive][i]->queue);
+ blk_cleanup_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
-
- /*
- * These disks have not called add_disk(). Don't put down
- * queue reference in put_disk().
- */
- if (!(allowed_drive_mask & (1 << drive)) ||
- fdc_state[FDC(drive)].version == FDC_NONE) {
- for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
- if (disks[drive][i])
- disks[drive][i]->queue = NULL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
- if (disks[drive][i])
- put_disk(disks[drive][i]);
- }
}
cancel_delayed_work_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7bf4686af774..3c09a33fa1c7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -133,58 +133,6 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static int transfer_xor(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block)
-{
- char *raw_buf = kmap_atomic(raw_page) + raw_off;
- char *loop_buf = kmap_atomic(loop_page) + loop_off;
- char *in, *out, *key;
- int i, keysize;
-
- if (cmd == READ) {
- in = raw_buf;
- out = loop_buf;
- } else {
- in = loop_buf;
- out = raw_buf;
- }
-
- key = lo->lo_encrypt_key;
- keysize = lo->lo_encrypt_key_size;
- for (i = 0; i < size; i++)
- *out++ = *in++ ^ key[(i & 511) % keysize];
-
- kunmap_atomic(loop_buf);
- kunmap_atomic(raw_buf);
- cond_resched();
- return 0;
-}
-
-static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
-{
- if (unlikely(info->lo_encrypt_key_size <= 0))
- return -EINVAL;
- return 0;
-}
-
-static struct loop_func_table none_funcs = {
- .number = LO_CRYPT_NONE,
-};
-
-static struct loop_func_table xor_funcs = {
- .number = LO_CRYPT_XOR,
- .transfer = transfer_xor,
- .init = xor_init
-};
-
-/* xfer_funcs[0] is special - its release function is never called */
-static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
- &none_funcs,
- &xor_funcs
-};
-
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
loff_t loopsize;
@@ -228,8 +176,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
/*
* We support direct I/O only if lo_offset is aligned with the
* logical I/O size of backing device, and the logical block
- * size of loop is bigger than the backing device's and the loop
- * needn't transform transfer.
+ * size of loop is bigger than the backing device's.
*
* TODO: the above condition may be loosed in the future, and
* direct I/O may be switched runtime at that time because most
@@ -238,8 +185,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
if (dio) {
if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
!(lo->lo_offset & dio_align) &&
- mapping->a_ops->direct_IO &&
- !lo->transfer)
+ mapping->a_ops->direct_IO)
use_dio = true;
else
use_dio = false;
@@ -273,19 +219,6 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
/**
- * loop_validate_block_size() - validates the passed in block size
- * @bsize: size to validate
- */
-static int
-loop_validate_block_size(unsigned short bsize)
-{
- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
- return -EINVAL;
-
- return 0;
-}
-
-/**
* loop_set_size() - sets device size and notifies userspace
* @lo: struct loop_device to set the size for
* @size: new size of the loop device
@@ -299,24 +232,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static inline int
-lo_do_transfer(struct loop_device *lo, int cmd,
- struct page *rpage, unsigned roffs,
- struct page *lpage, unsigned loffs,
- int size, sector_t rblock)
-{
- int ret;
-
- ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
- if (likely(!ret))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Transfer error at byte offset %llu, length %i.\n",
- (unsigned long long)rblock << 9, size);
- return ret;
-}
-
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
@@ -356,41 +271,6 @@ static int lo_write_simple(struct loop_device *lo, struct request *rq,
return ret;
}
-/*
- * This is the slow, transforming version that needs to double buffer the
- * data as it cannot do the transformations in place without having direct
- * access to the destination pages of the backing file.
- */
-static int lo_write_transfer(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec, b;
- struct req_iterator iter;
- struct page *page;
- int ret = 0;
-
- page = alloc_page(GFP_NOIO);
- if (unlikely(!page))
- return -ENOMEM;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
- bvec.bv_offset, bvec.bv_len, pos >> 9);
- if (unlikely(ret))
- break;
-
- b.bv_page = page;
- b.bv_offset = 0;
- b.bv_len = bvec.bv_len;
- ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
- if (ret < 0)
- break;
- }
-
- __free_page(page);
- return ret;
-}
-
static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
@@ -420,64 +300,12 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
-static int lo_read_transfer(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec, b;
- struct req_iterator iter;
- struct iov_iter i;
- struct page *page;
- ssize_t len;
- int ret = 0;
-
- page = alloc_page(GFP_NOIO);
- if (unlikely(!page))
- return -ENOMEM;
-
- rq_for_each_segment(bvec, rq, iter) {
- loff_t offset = pos;
-
- b.bv_page = page;
- b.bv_offset = 0;
- b.bv_len = bvec.bv_len;
-
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0) {
- ret = len;
- goto out_free_page;
- }
-
- ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
- bvec.bv_offset, len, offset >> 9);
- if (ret)
- goto out_free_page;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- }
-
- ret = 0;
-out_free_page:
- __free_page(page);
- return ret;
-}
-
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
/*
* We use fallocate to manipulate the space mappings used by the image
- * a.k.a. discard/zerorange. However we do not support this if
- * encryption is enabled, because it may give an attacker useful
- * information.
+ * a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
@@ -554,7 +382,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
blk_mq_complete_request(rq);
}
-static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
@@ -627,7 +455,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
lo_rw_aio_do_completion(cmd);
if (ret != -EIOCBQUEUED)
- cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
+ lo_rw_aio_complete(&cmd->iocb, ret);
return 0;
}
@@ -660,16 +488,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (lo->transfer)
- return lo_write_transfer(lo, rq, pos);
- else if (cmd->use_aio)
+ if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, WRITE);
else
return lo_write_simple(lo, rq, pos);
case REQ_OP_READ:
- if (lo->transfer)
- return lo_read_transfer(lo, rq, pos);
- else if (cmd->use_aio)
+ if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, READ);
else
return lo_read_simple(lo, rq, pos);
@@ -934,7 +758,7 @@ static void loop_config_discard(struct loop_device *lo)
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
- if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
+ if (S_ISBLK(inode->i_mode)) {
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
@@ -943,11 +767,9 @@ static void loop_config_discard(struct loop_device *lo)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * image a.k.a. discard.
*/
- } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+ } else if (!file->f_op->fallocate) {
max_discard_sectors = 0;
granularity = 0;
@@ -1084,43 +906,6 @@ static void loop_update_rotational(struct loop_device *lo)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
-static int
-loop_release_xfer(struct loop_device *lo)
-{
- int err = 0;
- struct loop_func_table *xfer = lo->lo_encryption;
-
- if (xfer) {
- if (xfer->release)
- err = xfer->release(lo);
- lo->transfer = NULL;
- lo->lo_encryption = NULL;
- module_put(xfer->owner);
- }
- return err;
-}
-
-static int
-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
- const struct loop_info64 *i)
-{
- int err = 0;
-
- if (xfer) {
- struct module *owner = xfer->owner;
-
- if (!try_module_get(owner))
- return -EINVAL;
- if (xfer->init)
- err = xfer->init(lo, i);
- if (err)
- module_put(owner);
- else
- lo->lo_encryption = xfer;
- }
- return err;
-}
-
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@@ -1133,55 +918,27 @@ static int
loop_set_status_from_info(struct loop_device *lo,
const struct loop_info64 *info)
{
- int err;
- struct loop_func_table *xfer;
- kuid_t uid = current_uid();
-
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
- err = loop_release_xfer(lo);
- if (err)
- return err;
-
- if (info->lo_encrypt_type) {
- unsigned int type = info->lo_encrypt_type;
-
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
- xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
- } else
- xfer = NULL;
-
- err = loop_init_xfer(lo, xfer, info);
- if (err)
- return err;
+ switch (info->lo_encrypt_type) {
+ case LO_CRYPT_NONE:
+ break;
+ case LO_CRYPT_XOR:
+ pr_warn("support for the xor transformation has been removed.\n");
+ return -EINVAL;
+ case LO_CRYPT_CRYPTOAPI:
+ pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
-
- if (!xfer)
- xfer = &none_funcs;
- lo->transfer = xfer->transfer;
- lo->ioctl = xfer->ioctl;
-
lo->lo_flags = info->lo_flags;
-
- lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
- lo->lo_init[0] = info->lo_init[0];
- lo->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_key_size) {
- memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
- lo->lo_key_owner = uid;
- }
-
return 0;
}
@@ -1236,7 +993,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
if (config->block_size) {
- error = loop_validate_block_size(config->block_size);
+ error = blk_validate_block_size(config->block_size);
if (error)
goto out_unlock;
}
@@ -1329,7 +1086,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp = NULL;
gfp_t gfp = lo->old_gfp_mask;
- struct block_device *bdev = lo->lo_device;
int err = 0;
bool partscan = false;
int lo_number;
@@ -1381,36 +1137,23 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
- loop_release_xfer(lo);
- lo->transfer = NULL;
- lo->ioctl = NULL;
lo->lo_device = NULL;
- lo->lo_encryption = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
- lo->lo_encrypt_key_size = 0;
- memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
- memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
blk_queue_logical_block_size(lo->lo_queue, 512);
blk_queue_physical_block_size(lo->lo_queue, 512);
blk_queue_io_min(lo->lo_queue, 512);
- if (bdev) {
- invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
- }
- set_capacity(lo->lo_disk, 0);
+ invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
- if (bdev) {
- /* let user-space know about this change */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- }
+ /* let user-space know about this change */
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
- partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
out_unlock:
@@ -1498,7 +1241,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- kuid_t uid = current_uid();
int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
@@ -1506,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
- if (lo->lo_encrypt_key_size &&
- !uid_eq(lo->lo_key_owner, uid) &&
- !capable(CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto out_unlock;
- }
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@@ -1597,14 +1333,6 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
info->lo_sizelimit = lo->lo_sizelimit;
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
- memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
- info->lo_encrypt_type =
- lo->lo_encryption ? lo->lo_encryption->number : 0;
- if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
- info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
- memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
- lo->lo_encrypt_key_size);
- }
/* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
@@ -1630,16 +1358,8 @@ loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
info64->lo_rdevice = info->lo_rdevice;
info64->lo_offset = info->lo_offset;
info64->lo_sizelimit = 0;
- info64->lo_encrypt_type = info->lo_encrypt_type;
- info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
info64->lo_flags = info->lo_flags;
- info64->lo_init[0] = info->lo_init[0];
- info64->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
- else
- memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
- memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
}
static int
@@ -1651,16 +1371,8 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
info->lo_inode = info64->lo_inode;
info->lo_rdevice = info64->lo_rdevice;
info->lo_offset = info64->lo_offset;
- info->lo_encrypt_type = info64->lo_encrypt_type;
- info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
info->lo_flags = info64->lo_flags;
- info->lo_init[0] = info64->lo_init[0];
- info->lo_init[1] = info64->lo_init[1];
- if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
- else
- memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
- memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info->lo_device != info64->lo_device ||
@@ -1759,7 +1471,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- err = loop_validate_block_size(arg);
+ err = blk_validate_block_size(arg);
if (err)
return err;
@@ -1809,7 +1521,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
err = loop_set_block_size(lo, arg);
break;
default:
- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ err = -EINVAL;
}
mutex_unlock(&lo->lo_mutex);
return err;
@@ -1885,7 +1597,6 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
- compat_int_t lo_encrypt_type;
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];
@@ -1914,16 +1625,8 @@ loop_info64_from_compat(const struct compat_loop_info __user *arg,
info64->lo_rdevice = info.lo_rdevice;
info64->lo_offset = info.lo_offset;
info64->lo_sizelimit = 0;
- info64->lo_encrypt_type = info.lo_encrypt_type;
- info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
info64->lo_flags = info.lo_flags;
- info64->lo_init[0] = info.lo_init[0];
- info64->lo_init[1] = info.lo_init[1];
- if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
- else
- memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
- memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
return 0;
}
@@ -1943,24 +1646,14 @@ loop_info64_to_compat(const struct loop_info64 *info64,
info.lo_inode = info64->lo_inode;
info.lo_rdevice = info64->lo_rdevice;
info.lo_offset = info64->lo_offset;
- info.lo_encrypt_type = info64->lo_encrypt_type;
- info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
info.lo_flags = info64->lo_flags;
- info.lo_init[0] = info64->lo_init[0];
- info.lo_init[1] = info64->lo_init[1];
- if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
- else
- memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
- memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info.lo_device != info64->lo_device ||
info.lo_rdevice != info64->lo_rdevice ||
info.lo_inode != info64->lo_inode ||
- info.lo_offset != info64->lo_offset ||
- info.lo_init[0] != info64->lo_init[0] ||
- info.lo_init[1] != info64->lo_init[1])
+ info.lo_offset != info64->lo_offset)
return -EOVERFLOW;
if (copy_to_user(arg, &info, sizeof(info)))
@@ -2101,43 +1794,6 @@ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
-int loop_register_transfer(struct loop_func_table *funcs)
-{
- unsigned int n = funcs->number;
-
- if (n >= MAX_LO_CRYPT || xfer_funcs[n])
- return -EINVAL;
- xfer_funcs[n] = funcs;
- return 0;
-}
-
-int loop_unregister_transfer(int number)
-{
- unsigned int n = number;
- struct loop_func_table *xfer;
-
- if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
- return -EINVAL;
- /*
- * This function is called from only cleanup_cryptoloop().
- * Given that each loop device that has a transfer enabled holds a
- * reference to the module implementing it we should never get here
- * with a transfer that is set (unless forced module unloading is
- * requested). Thus, check module's refcount and warn if this is
- * not a clean unloading.
- */
-#ifdef CONFIG_MODULE_UNLOAD
- if (xfer->owner && module_refcount(xfer->owner) != -1)
- pr_err("Danger! Unregistering an in use transfer function.\n");
-#endif
-
- xfer_funcs[n] = NULL;
- return 0;
-}
-
-EXPORT_SYMBOL(loop_register_transfer);
-EXPORT_SYMBOL(loop_unregister_transfer);
-
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2394,13 +2050,19 @@ static int loop_add(int i)
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
/* Make this loop device reachable from pathname. */
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
+
/* Show this loop device. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
+
return i;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 04c88dd6eabd..082d4b6bfc6a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -32,23 +32,10 @@ struct loop_device {
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
- int (*transfer)(struct loop_device *, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block);
char lo_file_name[LO_NAME_SIZE];
- char lo_crypt_name[LO_NAME_SIZE];
- char lo_encrypt_key[LO_KEY_SIZE];
- int lo_encrypt_key_size;
- struct loop_func_table *lo_encryption;
- __u32 lo_init[2];
- kuid_t lo_key_owner; /* Who set the key */
- int (*ioctl)(struct loop_device *, int cmd,
- unsigned long arg);
struct file * lo_backing_file;
struct block_device *lo_device;
- void *key_data;
gfp_t old_gfp_mask;
@@ -82,21 +69,4 @@ struct loop_cmd {
struct cgroup_subsys_state *memcg_css;
};
-/* Support for loadable transfer modules */
-struct loop_func_table {
- int number; /* filter type */
- int (*transfer)(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block);
- int (*init)(struct loop_device *, const struct loop_info64 *);
- /* release is called from loop_unregister_transfer or clr_fd */
- int (*release)(struct loop_device *);
- int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
- struct module *owner;
-};
-
-int loop_register_transfer(struct loop_func_table *funcs);
-int loop_unregister_transfer(int number);
-
#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 901855717cb5..c91b9010c1a6 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3633,7 +3633,9 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
+ rv = device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
+ if (rv)
+ goto read_capacity_error;
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -4061,7 +4063,6 @@ block_initialize_err:
msi_initialize_err:
if (dd->isr_workq) {
- flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
@@ -4119,7 +4120,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
mtip_block_remove(dd);
if (dd->isr_workq) {
- flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 26798da661bd..78282f01f581 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -84,7 +84,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
return true;
}
-static blk_qc_t n64cart_submit_bio(struct bio *bio)
+static void n64cart_submit_bio(struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -92,16 +92,14 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio)
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) {
- if (!n64cart_do_bvec(dev, &bvec, pos))
- goto io_error;
+ if (!n64cart_do_bvec(dev, &bvec, pos)) {
+ bio_io_error(bio);
+ return;
+ }
pos += bvec.bv_len;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
-io_error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static const struct block_device_operations n64cart_fops = {
@@ -117,6 +115,7 @@ static const struct block_device_operations n64cart_fops = {
static int __init n64cart_probe(struct platform_device *pdev)
{
struct gendisk *disk;
+ int err = -ENOMEM;
if (!start || !size) {
pr_err("start or size not specified\n");
@@ -134,7 +133,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- return -ENOMEM;
+ goto out;
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART_SCAN;
@@ -149,11 +148,18 @@ static int __init n64cart_probe(struct platform_device *pdev)
blk_queue_physical_block_size(disk->queue, 4096);
blk_queue_logical_block_size(disk->queue, 4096);
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
pr_info("n64cart: %u kb disk\n", size / 1024);
return 0;
+
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out:
+ return err;
}
static struct platform_driver n64cart_driver = {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1183f7872b71..b47b2a87ae8f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -122,15 +122,21 @@ struct nbd_device {
struct work_struct remove_work;
struct list_head list;
- struct task_struct *task_recv;
struct task_struct *task_setup;
unsigned long flags;
+ pid_t pid; /* pid of nbd-client, if attached */
char *backend;
};
#define NBD_CMD_REQUEUED 1
+/*
+ * This flag will be set if nbd_queue_rq() succeed, and will be checked and
+ * cleared in completion. Both setting and clearing of the flag are protected
+ * by cmd->lock.
+ */
+#define NBD_CMD_INFLIGHT 2
struct nbd_cmd {
struct nbd_device *nbd;
@@ -217,7 +223,7 @@ static ssize_t pid_show(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
- return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
+ return sprintf(buf, "%d\n", nbd->pid);
}
static const struct device_attribute pid_attr = {
@@ -310,26 +316,19 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static void nbd_size_clear(struct nbd_device *nbd)
-{
- if (nbd->config->bytesize) {
- set_capacity(nbd->disk, 0);
- kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
- }
-}
-
static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
- if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
+
+ if (blk_validate_block_size(blksize))
return -EINVAL;
nbd->config->bytesize = bytesize;
nbd->config->blksize_bits = __ffs(blksize);
- if (!nbd->task_recv)
+ if (!nbd->pid)
return 0;
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
@@ -405,6 +404,11 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_DONE;
+ }
+
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
mutex_unlock(&cmd->lock);
@@ -484,7 +488,8 @@ done:
}
/*
- * Send or receive packet.
+ * Send or receive packet. Return a positive value on success and
+ * negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
@@ -610,7 +615,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
trace_nbd_header_sent(req, handle);
- if (result <= 0) {
+ if (result < 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
@@ -654,7 +659,7 @@ send_pages:
skip = 0;
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
- if (result <= 0) {
+ if (result < 0) {
if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
@@ -688,38 +693,45 @@ out:
return 0;
}
-/* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
+static int nbd_read_reply(struct nbd_device *nbd, int index,
+ struct nbd_reply *reply)
{
- struct nbd_config *config = nbd->config;
- int result;
- struct nbd_reply reply;
- struct nbd_cmd *cmd;
- struct request *req = NULL;
- u64 handle;
- u16 hwq;
- u32 tag;
- struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
+ struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
struct iov_iter to;
- int ret = 0;
+ int result;
- reply.magic = 0;
- iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
+ reply->magic = 0;
+ iov_iter_kvec(&to, READ, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
- if (result <= 0) {
- if (!nbd_disconnected(config))
+ if (result < 0) {
+ if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
- return ERR_PTR(result);
+ return result;
}
- if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
+ if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
- (unsigned long)ntohl(reply.magic));
- return ERR_PTR(-EPROTO);
+ (unsigned long)ntohl(reply->magic));
+ return -EPROTO;
}
- memcpy(&handle, reply.handle, sizeof(handle));
+ return 0;
+}
+
+/* NULL returned = something went wrong, inform userspace */
+static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
+ struct nbd_reply *reply)
+{
+ int result;
+ struct nbd_cmd *cmd;
+ struct request *req = NULL;
+ u64 handle;
+ u16 hwq;
+ u32 tag;
+ int ret = 0;
+
+ memcpy(&handle, reply->handle, sizeof(handle));
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -734,6 +746,16 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
+ tag, cmd->status, cmd->flags);
+ ret = -ENOENT;
+ goto out;
+ }
+ if (cmd->index != index) {
+ dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
+ tag, index, cmd->index);
+ }
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
@@ -752,9 +774,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
ret = -ENOENT;
goto out;
}
- if (ntohl(reply.error)) {
+ if (ntohl(reply->error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
- ntohl(reply.error));
+ ntohl(reply->error));
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -763,11 +785,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
+ struct iov_iter to;
rq_for_each_segment(bvec, req, iter) {
iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
- if (result <= 0) {
+ if (result < 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
/*
@@ -776,7 +799,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
* and let the timeout stuff handle resubmitting
* this request onto another connection.
*/
- if (nbd_disconnected(config)) {
+ if (nbd_disconnected(nbd->config)) {
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -800,24 +823,46 @@ static void recv_work(struct work_struct *work)
work);
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
+ struct request_queue *q = nbd->disk->queue;
+ struct nbd_sock *nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
- cmd = nbd_read_stat(nbd, args->index);
- if (IS_ERR(cmd)) {
- struct nbd_sock *nsock = config->socks[args->index];
+ struct nbd_reply reply;
- mutex_lock(&nsock->tx_lock);
- nbd_mark_nsock_dead(nbd, nsock, 1);
- mutex_unlock(&nsock->tx_lock);
+ if (nbd_read_reply(nbd, args->index, &reply))
+ break;
+
+ /*
+ * Grab .q_usage_counter so request pool won't go away, then no
+ * request use-after-free is possible during nbd_handle_reply().
+ * If queue is frozen, there won't be any inflight requests, we
+ * needn't to handle the incoming garbage message.
+ */
+ if (!percpu_ref_tryget(&q->q_usage_counter)) {
+ dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
+ __func__);
+ break;
+ }
+
+ cmd = nbd_handle_reply(nbd, args->index, &reply);
+ if (IS_ERR(cmd)) {
+ percpu_ref_put(&q->q_usage_counter);
break;
}
rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
+ percpu_ref_put(&q->q_usage_counter);
}
+
+ nsock = config->socks[args->index];
+ mutex_lock(&nsock->tx_lock);
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ mutex_unlock(&nsock->tx_lock);
+
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
@@ -833,6 +878,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
return true;
mutex_lock(&cmd->lock);
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return true;
+ }
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
@@ -914,7 +963,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
- blk_mq_start_request(req);
return -EINVAL;
}
config = nbd->config;
@@ -923,7 +971,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
- blk_mq_start_request(req);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
@@ -947,7 +994,6 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
- blk_mq_start_request(req);
return -EIO;
}
goto again;
@@ -969,7 +1015,13 @@ again:
* returns EAGAIN can be retried on a different socket.
*/
ret = nbd_send_cmd(nbd, cmd, index);
- if (ret == -EAGAIN) {
+ /*
+ * Access to this flag is protected by cmd->lock, thus it's safe to set
+ * the flag after nbd_send_cmd() succeed to send request to server.
+ */
+ if (!ret)
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ else if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
@@ -1206,7 +1258,7 @@ static void send_disconnects(struct nbd_device *nbd)
iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
- if (ret <= 0)
+ if (ret < 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
mutex_unlock(&nsock->tx_lock);
@@ -1237,11 +1289,13 @@ static void nbd_config_put(struct nbd_device *nbd)
&nbd->config_lock)) {
struct nbd_config *config = nbd->config;
nbd_dev_dbg_close(nbd);
- nbd_size_clear(nbd);
+ invalidate_disk(nbd->disk);
+ if (nbd->config->bytesize)
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- nbd->task_recv = NULL;
+ nbd->pid = 0;
if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
&config->runtime_flags)) {
device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
@@ -1282,7 +1336,7 @@ static int nbd_start_device(struct nbd_device *nbd)
int num_connections = config->num_connections;
int error = 0, i;
- if (nbd->task_recv)
+ if (nbd->pid)
return -EBUSY;
if (!config->socks)
return -EINVAL;
@@ -1301,7 +1355,7 @@ static int nbd_start_device(struct nbd_device *nbd)
}
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
- nbd->task_recv = current;
+ nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@@ -1557,8 +1611,8 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
- if (nbd->task_recv)
- seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
+ if (nbd->pid)
+ seq_printf(s, "recv: %d\n", nbd->pid);
return 0;
}
@@ -1762,7 +1816,9 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_err_disk;
/*
* Now publish the device.
@@ -1771,6 +1827,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd_total_devices++;
return nbd;
+out_err_disk:
+ blk_cleanup_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
@@ -2135,7 +2193,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
- !nbd->task_recv) {
+ !nbd->pid) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
ret = -EINVAL;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 187d779c8ca0..323af5c9c802 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -92,6 +92,10 @@ static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
+static int g_poll_queues = 1;
+module_param_named(poll_queues, g_poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
+
static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
@@ -324,29 +328,69 @@ nullb_device_##NAME##_store(struct config_item *item, const char *page, \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
-static int nullb_apply_submit_queues(struct nullb_device *dev,
- unsigned int submit_queues)
+static int nullb_update_nr_hw_queues(struct nullb_device *dev,
+ unsigned int submit_queues,
+ unsigned int poll_queues)
+
{
- struct nullb *nullb = dev->nullb;
struct blk_mq_tag_set *set;
+ int ret, nr_hw_queues;
- if (!nullb)
+ if (!dev->nullb)
return 0;
/*
+ * Make sure at least one queue exists for each of submit and poll.
+ */
+ if (!submit_queues || !poll_queues)
+ return -EINVAL;
+
+ /*
* Make sure that null_init_hctx() does not access nullb->queues[] past
* the end of that array.
*/
- if (submit_queues > nr_cpu_ids)
+ if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
return -EINVAL;
- set = nullb->tag_set;
- blk_mq_update_nr_hw_queues(set, submit_queues);
- return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
+
+ /*
+ * Keep previous and new queue numbers in nullb_device for reference in
+ * the call back function null_map_queues().
+ */
+ dev->prev_submit_queues = dev->submit_queues;
+ dev->prev_poll_queues = dev->poll_queues;
+ dev->submit_queues = submit_queues;
+ dev->poll_queues = poll_queues;
+
+ set = dev->nullb->tag_set;
+ nr_hw_queues = submit_queues + poll_queues;
+ blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+ ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
+
+ if (ret) {
+ /* on error, revert the queue numbers */
+ dev->submit_queues = dev->prev_submit_queues;
+ dev->poll_queues = dev->prev_poll_queues;
+ }
+
+ return ret;
+}
+
+static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+{
+ return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+}
+
+static int nullb_apply_poll_queues(struct nullb_device *dev,
+ unsigned int poll_queues)
+{
+ return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
+NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
@@ -466,6 +510,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_size,
&nullb_device_attr_completion_nsec,
&nullb_device_attr_submit_queues,
+ &nullb_device_attr_poll_queues,
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
@@ -593,6 +638,9 @@ static struct nullb_device *null_alloc_dev(void)
dev->size = g_gb * 1024;
dev->completion_nsec = g_completion_nsec;
dev->submit_queues = g_submit_queues;
+ dev->prev_submit_queues = g_submit_queues;
+ dev->poll_queues = g_poll_queues;
+ dev->prev_poll_queues = g_poll_queues;
dev->home_node = g_home_node;
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
@@ -1422,7 +1470,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
return &nullb->queues[index];
}
-static blk_qc_t null_submit_bio(struct bio *bio)
+static void null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
@@ -1434,7 +1482,6 @@ static blk_qc_t null_submit_bio(struct bio *bio)
cmd->bio = bio;
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
- return BLK_QC_T_NONE;
}
static bool should_timeout_request(struct request *rq)
@@ -1455,12 +1502,100 @@ static bool should_requeue_request(struct request *rq)
return false;
}
+static int null_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nullb *nullb = set->driver_data;
+ int i, qoff;
+ unsigned int submit_queues = g_submit_queues;
+ unsigned int poll_queues = g_poll_queues;
+
+ if (nullb) {
+ struct nullb_device *dev = nullb->dev;
+
+ /*
+ * Refer nr_hw_queues of the tag set to check if the expected
+ * number of hardware queues are prepared. If block layer failed
+ * to prepare them, use previous numbers of submit queues and
+ * poll queues to map queues.
+ */
+ if (set->nr_hw_queues ==
+ dev->submit_queues + dev->poll_queues) {
+ submit_queues = dev->submit_queues;
+ poll_queues = dev->poll_queues;
+ } else if (set->nr_hw_queues ==
+ dev->prev_submit_queues + dev->prev_poll_queues) {
+ submit_queues = dev->prev_submit_queues;
+ poll_queues = dev->prev_poll_queues;
+ } else {
+ pr_warn("tag set has unexpected nr_hw_queues: %d\n",
+ set->nr_hw_queues);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ switch (i) {
+ case HCTX_TYPE_DEFAULT:
+ map->nr_queues = submit_queues;
+ break;
+ case HCTX_TYPE_READ:
+ map->nr_queues = 0;
+ continue;
+ case HCTX_TYPE_POLL:
+ map->nr_queues = poll_queues;
+ break;
+ }
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+ blk_mq_map_queues(map);
+ }
+
+ return 0;
+}
+
+static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct nullb_queue *nq = hctx->driver_data;
+ LIST_HEAD(list);
+ int nr = 0;
+
+ spin_lock(&nq->poll_lock);
+ list_splice_init(&nq->poll_list, &list);
+ spin_unlock(&nq->poll_lock);
+
+ while (!list_empty(&list)) {
+ struct nullb_cmd *cmd;
+ struct request *req;
+
+ req = list_first_entry(&list, struct request, queuelist);
+ list_del_init(&req->queuelist);
+ cmd = blk_mq_rq_to_pdu(req);
+ cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
+ blk_rq_sectors(req));
+ end_cmd(cmd);
+ nr++;
+ }
+
+ return nr;
+}
+
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
pr_info("rq %p timed out\n", rq);
+ if (hctx->type == HCTX_TYPE_POLL) {
+ struct nullb_queue *nq = hctx->driver_data;
+
+ spin_lock(&nq->poll_lock);
+ list_del_init(&rq->queuelist);
+ spin_unlock(&nq->poll_lock);
+ }
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
@@ -1481,10 +1616,11 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nullb_queue *nq = hctx->driver_data;
sector_t nr_sectors = blk_rq_sectors(bd->rq);
sector_t sector = blk_rq_pos(bd->rq);
+ const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- if (nq->dev->irqmode == NULL_IRQ_TIMER) {
+ if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
@@ -1508,6 +1644,13 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
}
+
+ if (is_poll) {
+ spin_lock(&nq->poll_lock);
+ list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+ spin_unlock(&nq->poll_lock);
+ return BLK_STS_OK;
+ }
if (cmd->fake_timeout)
return BLK_STS_OK;
@@ -1543,6 +1686,8 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
+ INIT_LIST_HEAD(&nq->poll_list);
+ spin_lock_init(&nq->poll_lock);
}
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
@@ -1568,6 +1713,8 @@ static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
+ .poll = null_poll,
+ .map_queues = null_map_queues,
.init_hctx = null_init_hctx,
.exit_hctx = null_exit_hctx,
};
@@ -1664,13 +1811,17 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
+ int nqueues = nr_cpu_ids;
+
+ if (g_poll_queues)
+ nqueues += g_poll_queues;
+
+ nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->queue_depth = nullb->dev->hw_queue_depth;
-
return 0;
}
@@ -1722,9 +1873,14 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ int poll_queues;
+
set->ops = &null_mq_ops;
set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
g_submit_queues;
+ poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
+ if (poll_queues)
+ set->nr_hw_queues += poll_queues;
set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
g_hw_queue_depth;
set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
@@ -1734,7 +1890,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->flags |= BLK_MQ_F_NO_SCHED;
if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- set->driver_data = NULL;
+ set->driver_data = nullb;
+ if (g_poll_queues)
+ set->nr_maps = 3;
+ else
+ set->nr_maps = 1;
if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
@@ -1754,6 +1914,13 @@ static int null_validate_conf(struct nullb_device *dev)
dev->submit_queues = nr_cpu_ids;
else if (dev->submit_queues == 0)
dev->submit_queues = 1;
+ dev->prev_submit_queues = dev->submit_queues;
+
+ if (dev->poll_queues > g_poll_queues)
+ dev->poll_queues = g_poll_queues;
+ else if (dev->poll_queues == 0)
+ dev->poll_queues = 1;
+ dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 64bef125d1df..78eb56b0ca55 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -32,6 +32,9 @@ struct nullb_queue {
struct nullb_device *dev;
unsigned int requeue_selection;
+ struct list_head poll_list;
+ spinlock_t poll_lock;
+
struct nullb_cmd *cmds;
};
@@ -83,6 +86,9 @@ struct nullb_device {
unsigned int zone_max_open; /* max number of open zones */
unsigned int zone_max_active; /* max number of active zones */
unsigned int submit_queues; /* number of submission queues */
+ unsigned int prev_submit_queues; /* number of submission queues before change */
+ unsigned int poll_queues; /* number of IOPOLL submission queues */
+ unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f9cdd11f02f5..f6b1d63e96e1 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -183,8 +183,6 @@ static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
static int pcd_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc);
-static int pcd_detect(void);
-static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
@@ -302,53 +300,6 @@ static const struct blk_mq_ops pcd_mq_ops = {
.queue_rq = pcd_queue_rq,
};
-static void pcd_init_units(void)
-{
- struct pcd_unit *cd;
- int unit;
-
- pcd_drive_count = 0;
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- struct gendisk *disk;
-
- if (blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE))
- continue;
-
- disk = blk_mq_alloc_disk(&cd->tag_set, cd);
- if (IS_ERR(disk)) {
- blk_mq_free_tag_set(&cd->tag_set);
- continue;
- }
-
- INIT_LIST_HEAD(&cd->rq_list);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- cd->disk = disk;
- cd->pi = &cd->pia;
- cd->present = 0;
- cd->last_sense = 0;
- cd->changed = 1;
- cd->drive = (*drives[unit])[D_SLV];
- if ((*drives[unit])[D_PRT])
- pcd_drive_count++;
-
- cd->name = &cd->info.name[0];
- snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
- cd->info.ops = &pcd_dops;
- cd->info.handle = cd;
- cd->info.speed = 0;
- cd->info.capacity = 1;
- cd->info.mask = 0;
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, cd->name); /* umm... */
- disk->fops = &pcd_bdops;
- disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- }
-}
-
static int pcd_open(struct cdrom_device_info *cdi, int purpose)
{
struct pcd_unit *cd = cdi->handle;
@@ -630,10 +581,11 @@ static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_DISC_OK;
}
-static int pcd_identify(struct pcd_unit *cd, char *id)
+static int pcd_identify(struct pcd_unit *cd)
{
- int k, s;
char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+ char id[18];
+ int k, s;
pcd_bufblk = -1;
@@ -661,108 +613,47 @@ static int pcd_identify(struct pcd_unit *cd, char *id)
}
/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
+ * returns 0, with id set if drive is detected, otherwise an error code.
*/
-static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
+static int pcd_probe(struct pcd_unit *cd, int ms)
{
if (ms == -1) {
for (cd->drive = 0; cd->drive <= 1; cd->drive++)
- if (!pcd_reset(cd) && !pcd_identify(cd, id))
+ if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
} else {
cd->drive = ms;
- if (!pcd_reset(cd) && !pcd_identify(cd, id))
+ if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
}
- return -1;
+ return -ENODEV;
}
-static void pcd_probe_capabilities(void)
+static int pcd_probe_capabilities(struct pcd_unit *cd)
{
- int unit, r;
- char buffer[32];
char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
- struct pcd_unit *cd;
-
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->present)
- continue;
- r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
- if (r)
- continue;
- /* we should now have the cap page */
- if ((buffer[11] & 1) == 0)
- cd->info.mask |= CDC_CD_R;
- if ((buffer[11] & 2) == 0)
- cd->info.mask |= CDC_CD_RW;
- if ((buffer[12] & 1) == 0)
- cd->info.mask |= CDC_PLAY_AUDIO;
- if ((buffer[14] & 1) == 0)
- cd->info.mask |= CDC_LOCK;
- if ((buffer[14] & 8) == 0)
- cd->info.mask |= CDC_OPEN_TRAY;
- if ((buffer[14] >> 6) == 0)
- cd->info.mask |= CDC_CLOSE_TRAY;
- }
-}
-
-static int pcd_detect(void)
-{
- char id[18];
- int k, unit;
- struct pcd_unit *cd;
+ char buffer[32];
+ int ret;
- printk("%s: %s version %s, major %d, nice %d\n",
- name, name, PCD_VERSION, major, nice);
+ ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
+ if (ret)
+ return ret;
+
+ /* we should now have the cap page */
+ if ((buffer[11] & 1) == 0)
+ cd->info.mask |= CDC_CD_R;
+ if ((buffer[11] & 2) == 0)
+ cd->info.mask |= CDC_CD_RW;
+ if ((buffer[12] & 1) == 0)
+ cd->info.mask |= CDC_PLAY_AUDIO;
+ if ((buffer[14] & 1) == 0)
+ cd->info.mask |= CDC_LOCK;
+ if ((buffer[14] & 8) == 0)
+ cd->info.mask |= CDC_OPEN_TRAY;
+ if ((buffer[14] >> 6) == 0)
+ cd->info.mask |= CDC_CLOSE_TRAY;
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- k = 0;
- if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- cd = pcd;
- if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1,
- pcd_buffer, PI_PCD, verbose, cd->name)) {
- if (!pcd_probe(cd, -1, id)) {
- cd->present = 1;
- k++;
- } else
- pi_release(cd->pi);
- }
- } else {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (!cd->disk)
- continue;
- if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- pcd_buffer, PI_PCD, verbose, cd->name))
- continue;
- if (!pcd_probe(cd, conf[D_SLV], id)) {
- cd->present = 1;
- k++;
- } else
- pi_release(cd->pi);
- }
- }
- if (k)
- return 0;
-
- printk("%s: No CD-ROM drive found\n", name);
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
- continue;
- blk_cleanup_disk(cd->disk);
- blk_mq_free_tag_set(&cd->tag_set);
- }
- pi_unregister_driver(par_drv);
- return -1;
+ return 0;
}
/* I/O request processing */
@@ -999,43 +890,130 @@ static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
return 0;
}
+static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
+ int mode, int unit, int protocol, int delay, int ms)
+{
+ struct gendisk *disk;
+ int ret;
+
+ ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (ret)
+ return ret;
+
+ disk = blk_mq_alloc_disk(&cd->tag_set, cd);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_free_tag_set;
+ }
+
+ INIT_LIST_HEAD(&cd->rq_list);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+ cd->disk = disk;
+ cd->pi = &cd->pia;
+ cd->present = 0;
+ cd->last_sense = 0;
+ cd->changed = 1;
+ cd->drive = (*drives[cd - pcd])[D_SLV];
+
+ cd->name = &cd->info.name[0];
+ snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
+ cd->info.ops = &pcd_dops;
+ cd->info.handle = cd;
+ cd->info.speed = 0;
+ cd->info.capacity = 1;
+ cd->info.mask = 0;
+ disk->major = major;
+ disk->first_minor = unit;
+ disk->minors = 1;
+ strcpy(disk->disk_name, cd->name); /* umm... */
+ disk->fops = &pcd_bdops;
+ disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+
+ if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
+ pcd_buffer, PI_PCD, verbose, cd->name)) {
+ ret = -ENODEV;
+ goto out_free_disk;
+ }
+ ret = pcd_probe(cd, ms);
+ if (ret)
+ goto out_pi_release;
+
+ cd->present = 1;
+ pcd_probe_capabilities(cd);
+ ret = register_cdrom(cd->disk, &cd->info);
+ if (ret)
+ goto out_pi_release;
+ ret = add_disk(cd->disk);
+ if (ret)
+ goto out_unreg_cdrom;
+ return 0;
+
+out_unreg_cdrom:
+ unregister_cdrom(&cd->info);
+out_pi_release:
+ pi_release(cd->pi);
+out_free_disk:
+ blk_cleanup_disk(cd->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&cd->tag_set);
+ return ret;
+}
+
static int __init pcd_init(void)
{
- struct pcd_unit *cd;
- int unit;
+ int found = 0, unit;
if (disable)
return -EINVAL;
- pcd_init_units();
+ if (register_blkdev(major, name))
+ return -EBUSY;
- if (pcd_detect())
- return -ENODEV;
+ pr_info("%s: %s version %s, major %d, nice %d\n",
+ name, name, PCD_VERSION, major, nice);
- /* get the atapi capabilities page */
- pcd_probe_capabilities();
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ goto out_unregister_blkdev;
+ }
- if (register_blkdev(major, name)) {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
- continue;
+ for (unit = 0; unit < PCD_UNITS; unit++) {
+ if ((*drives[unit])[D_PRT])
+ pcd_drive_count++;
+ }
+
+ if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+ if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1))
+ found++;
+ } else {
+ for (unit = 0; unit < PCD_UNITS; unit++) {
+ struct pcd_unit *cd = &pcd[unit];
+ int *conf = *drives[unit];
- blk_cleanup_queue(cd->disk->queue);
- blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
+ if (!conf[D_PRT])
+ continue;
+ if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD],
+ conf[D_UNI], conf[D_PRO], conf[D_DLY],
+ conf[D_SLV]))
+ found++;
}
- return -EBUSY;
}
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (cd->present) {
- register_cdrom(cd->disk, &cd->info);
- cd->disk->private_data = cd;
- add_disk(cd->disk);
- }
+ if (!found) {
+ pr_info("%s: No CD-ROM drive found\n", name);
+ goto out_unregister_pi_driver;
}
return 0;
+
+out_unregister_pi_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
+ unregister_blkdev(major, name);
+ return -ENODEV;
}
static void __exit pcd_exit(void)
@@ -1044,20 +1022,18 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
+ if (!cd->present)
continue;
- if (cd->present) {
- del_gendisk(cd->disk);
- pi_release(cd->pi);
- unregister_cdrom(&cd->info);
- }
- blk_cleanup_queue(cd->disk->queue);
+ unregister_cdrom(&cd->info);
+ del_gendisk(cd->disk);
+ pi_release(cd->pi);
+ blk_cleanup_disk(cd->disk);
+
blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
}
- unregister_blkdev(major, name);
pi_unregister_driver(par_drv);
+ unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 675327df6aff..fba865058a17 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -775,14 +775,14 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq;
struct pd_req *req;
- rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
+ rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = blk_mq_rq_to_pdu(rq);
req->func = func;
blk_execute_rq(disk->gd, rq, 0);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return 0;
}
@@ -875,9 +875,27 @@ static const struct blk_mq_ops pd_mq_ops = {
.queue_rq = pd_queue_rq,
};
-static void pd_probe_drive(struct pd_unit *disk)
+static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
+ int mode, int unit, int protocol, int delay)
{
+ int index = disk - pd;
+ int *parm = *drives[index];
struct gendisk *p;
+ int ret;
+
+ disk->pi = &disk->pia;
+ disk->access = 0;
+ disk->changed = 1;
+ disk->capacity = 0;
+ disk->drive = parm[D_SLV];
+ snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a' + index);
+ disk->alt_geom = parm[D_GEO];
+ disk->standby = parm[D_SBY];
+ INIT_LIST_HEAD(&disk->rq_list);
+
+ if (!pi_init(disk->pi, autoprobe, port, mode, unit, protocol, delay,
+ pd_scratch, PI_PD, verbose, disk->name))
+ return -ENXIO;
memset(&disk->tag_set, 0, sizeof(disk->tag_set));
disk->tag_set.ops = &pd_mq_ops;
@@ -887,14 +905,14 @@ static void pd_probe_drive(struct pd_unit *disk)
disk->tag_set.queue_depth = 2;
disk->tag_set.numa_node = NUMA_NO_NODE;
disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
-
- if (blk_mq_alloc_tag_set(&disk->tag_set))
- return;
+ ret = blk_mq_alloc_tag_set(&disk->tag_set);
+ if (ret)
+ goto pi_release;
p = blk_mq_alloc_disk(&disk->tag_set, disk);
if (IS_ERR(p)) {
- blk_mq_free_tag_set(&disk->tag_set);
- return;
+ ret = PTR_ERR(p);
+ goto free_tag_set;
}
disk->gd = p;
@@ -905,102 +923,88 @@ static void pd_probe_drive(struct pd_unit *disk)
p->minors = 1 << PD_BITS;
p->events = DISK_EVENT_MEDIA_CHANGE;
p->private_data = disk;
-
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
if (disk->drive == -1) {
- for (disk->drive = 0; disk->drive <= 1; disk->drive++)
- if (pd_special_command(disk, pd_identify) == 0)
- return;
- } else if (pd_special_command(disk, pd_identify) == 0)
- return;
- disk->gd = NULL;
+ for (disk->drive = 0; disk->drive <= 1; disk->drive++) {
+ ret = pd_special_command(disk, pd_identify);
+ if (ret == 0)
+ break;
+ }
+ } else {
+ ret = pd_special_command(disk, pd_identify);
+ }
+ if (ret)
+ goto put_disk;
+ set_capacity(disk->gd, disk->capacity);
+ ret = add_disk(disk->gd);
+ if (ret)
+ goto cleanup_disk;
+ return 0;
+cleanup_disk:
+ blk_cleanup_disk(disk->gd);
+put_disk:
put_disk(p);
+ disk->gd = NULL;
+free_tag_set:
+ blk_mq_free_tag_set(&disk->tag_set);
+pi_release:
+ pi_release(disk->pi);
+ return ret;
}
-static int pd_detect(void)
+static int __init pd_init(void)
{
int found = 0, unit, pd_drive_count = 0;
struct pd_unit *disk;
- for (unit = 0; unit < PD_UNITS; unit++) {
- int *parm = *drives[unit];
- struct pd_unit *disk = pd + unit;
- disk->pi = &disk->pia;
- disk->access = 0;
- disk->changed = 1;
- disk->capacity = 0;
- disk->drive = parm[D_SLV];
- snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
- disk->alt_geom = parm[D_GEO];
- disk->standby = parm[D_SBY];
- if (parm[D_PRT])
- pd_drive_count++;
- INIT_LIST_HEAD(&disk->rq_list);
- }
+ if (disable)
+ return -ENODEV;
+
+ if (register_blkdev(major, name))
+ return -ENODEV;
+
+ printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+ name, name, PD_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
- return -1;
+ goto out_unregister_blkdev;
}
- if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- disk = pd;
- if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
- PI_PD, verbose, disk->name)) {
- pd_probe_drive(disk);
- if (!disk->gd)
- pi_release(disk->pi);
- }
+ for (unit = 0; unit < PD_UNITS; unit++) {
+ int *parm = *drives[unit];
+ if (parm[D_PRT])
+ pd_drive_count++;
+ }
+
+ if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+ if (!pd_probe_drive(pd, 1, -1, -1, -1, -1, -1))
+ found++;
} else {
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
int *parm = *drives[unit];
if (!parm[D_PRT])
continue;
- if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY],
- pd_scratch, PI_PD, verbose, disk->name)) {
- pd_probe_drive(disk);
- if (!disk->gd)
- pi_release(disk->pi);
- }
- }
- }
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- if (disk->gd) {
- set_capacity(disk->gd, disk->capacity);
- add_disk(disk->gd);
- found = 1;
+ if (!pd_probe_drive(disk, 0, parm[D_PRT], parm[D_MOD],
+ parm[D_UNI], parm[D_PRO], parm[D_DLY]))
+ found++;
}
}
if (!found) {
printk("%s: no valid drive found\n", name);
- pi_unregister_driver(par_drv);
+ goto out_pi_unregister_driver;
}
- return found;
-}
-
-static int __init pd_init(void)
-{
- if (disable)
- goto out1;
-
- if (register_blkdev(major, name))
- goto out1;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PD_VERSION, major, cluster, nice);
- if (!pd_detect())
- goto out2;
return 0;
-out2:
+out_pi_unregister_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
unregister_blkdev(major, name);
-out1:
return -ENODEV;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index d5b9c88ba76f..bf8d0ef41a0a 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -214,7 +214,6 @@ static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static void pf_release(struct gendisk *disk, fmode_t mode);
-static int pf_detect(void);
static void do_pf_read(void);
static void do_pf_read_start(void);
static void do_pf_write(void);
@@ -285,45 +284,6 @@ static const struct blk_mq_ops pf_mq_ops = {
.queue_rq = pf_queue_rq,
};
-static void __init pf_init_units(void)
-{
- struct pf_unit *pf;
- int unit;
-
- pf_drive_count = 0;
- for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
- struct gendisk *disk;
-
- if (blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE))
- continue;
-
- disk = blk_mq_alloc_disk(&pf->tag_set, pf);
- if (IS_ERR(disk)) {
- blk_mq_free_tag_set(&pf->tag_set);
- continue;
- }
-
- INIT_LIST_HEAD(&pf->rq_list);
- blk_queue_max_segments(disk->queue, cluster);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- pf->disk = disk;
- pf->pi = &pf->pia;
- pf->media_status = PF_NM;
- pf->drive = (*drives[unit])[D_SLV];
- pf->lun = (*drives[unit])[D_LUN];
- snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, pf->name);
- disk->fops = &pf_fops;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- if (!(*drives[unit])[D_PRT])
- pf_drive_count++;
- }
-}
-
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
@@ -691,9 +651,9 @@ static int pf_identify(struct pf_unit *pf)
return 0;
}
-/* returns 0, with id set if drive is detected
- -1, if drive detection failed
-*/
+/*
+ * returns 0, with id set if drive is detected, otherwise an error code.
+ */
static int pf_probe(struct pf_unit *pf)
{
if (pf->drive == -1) {
@@ -715,60 +675,7 @@ static int pf_probe(struct pf_unit *pf)
if (!pf_identify(pf))
return 0;
}
- return -1;
-}
-
-static int pf_detect(void)
-{
- struct pf_unit *pf = units;
- int k, unit;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PF_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
- k = 0;
- if (pf_drive_count == 0) {
- if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
- verbose, pf->name)) {
- if (!pf_probe(pf) && pf->disk) {
- pf->present = 1;
- k++;
- } else
- pi_release(pf->pi);
- }
-
- } else
- for (unit = 0; unit < PF_UNITS; unit++, pf++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- pf_scratch, PI_PF, verbose, pf->name)) {
- if (pf->disk && !pf_probe(pf)) {
- pf->present = 1;
- k++;
- } else
- pi_release(pf->pi);
- }
- }
- if (k)
- return 0;
-
- printk("%s: No ATAPI disk detected\n", name);
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
- continue;
- blk_cleanup_disk(pf->disk);
- blk_mq_free_tag_set(&pf->tag_set);
- }
- pi_unregister_driver(par_drv);
- return -1;
+ return -ENODEV;
}
/* The i/o request engine */
@@ -1014,61 +921,134 @@ static void do_pf_write_done(void)
next_request(0);
}
+static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
+ int mode, int unit, int protocol, int delay, int ms)
+{
+ struct gendisk *disk;
+ int ret;
+
+ ret = blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (ret)
+ return ret;
+
+ disk = blk_mq_alloc_disk(&pf->tag_set, pf);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_free_tag_set;
+ }
+ disk->major = major;
+ disk->first_minor = pf - units;
+ disk->minors = 1;
+ strcpy(disk->disk_name, pf->name);
+ disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+ disk->private_data = pf;
+
+ blk_queue_max_segments(disk->queue, cluster);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+
+ INIT_LIST_HEAD(&pf->rq_list);
+ pf->disk = disk;
+ pf->pi = &pf->pia;
+ pf->media_status = PF_NM;
+ pf->drive = (*drives[disk->first_minor])[D_SLV];
+ pf->lun = (*drives[disk->first_minor])[D_LUN];
+ snprintf(pf->name, PF_NAMELEN, "%s%d", name, disk->first_minor);
+
+ if (!pi_init(pf->pi, autoprobe, port, mode, unit, protocol, delay,
+ pf_scratch, PI_PF, verbose, pf->name)) {
+ ret = -ENODEV;
+ goto out_free_disk;
+ }
+ ret = pf_probe(pf);
+ if (ret)
+ goto out_pi_release;
+
+ ret = add_disk(disk);
+ if (ret)
+ goto out_pi_release;
+ pf->present = 1;
+ return 0;
+
+out_pi_release:
+ pi_release(pf->pi);
+out_free_disk:
+ blk_cleanup_disk(pf->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&pf->tag_set);
+ return ret;
+}
+
static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
- int unit;
+ int found = 0, unit;
if (disable)
return -EINVAL;
- pf_init_units();
+ if (register_blkdev(major, name))
+ return -EBUSY;
- if (pf_detect())
- return -ENODEV;
- pf_busy = 0;
+ printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+ name, name, PF_VERSION, major, cluster, nice);
- if (register_blkdev(major, name)) {
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
- continue;
- blk_cleanup_queue(pf->disk->queue);
- blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
- }
- return -EBUSY;
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ goto out_unregister_blkdev;
}
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- struct gendisk *disk = pf->disk;
+ for (unit = 0; unit < PF_UNITS; unit++) {
+ if (!(*drives[unit])[D_PRT])
+ pf_drive_count++;
+ }
- if (!pf->present)
- continue;
- disk->private_data = pf;
- add_disk(disk);
+ pf = units;
+ if (pf_drive_count == 0) {
+ if (pf_init_unit(pf, 1, -1, -1, -1, -1, -1, verbose))
+ found++;
+ } else {
+ for (unit = 0; unit < PF_UNITS; unit++, pf++) {
+ int *conf = *drives[unit];
+ if (!conf[D_PRT])
+ continue;
+ if (pf_init_unit(pf, 0, conf[D_PRT], conf[D_MOD],
+ conf[D_UNI], conf[D_PRO], conf[D_DLY],
+ verbose))
+ found++;
+ }
+ }
+ if (!found) {
+ printk("%s: No ATAPI disk detected\n", name);
+ goto out_unregister_pi_driver;
}
+ pf_busy = 0;
return 0;
+
+out_unregister_pi_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
+ unregister_blkdev(major, name);
+ return -ENODEV;
}
static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
- unregister_blkdev(major, name);
+
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
+ if (!pf->present)
continue;
-
- if (pf->present)
- del_gendisk(pf->disk);
-
- blk_cleanup_queue(pf->disk->queue);
+ del_gendisk(pf->disk);
+ blk_cleanup_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
-
- if (pf->present)
- pi_release(pf->pi);
+ pi_release(pf->pi);
}
+
+ unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 0f26b2510a75..b53f648302c1 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -703,7 +703,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
struct request *rq;
int ret = 0;
- rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+ rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -726,7 +726,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (scsi_req(rq)->result)
ret = -EIO;
out:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
@@ -2400,7 +2400,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
}
-static blk_qc_t pkt_submit_bio(struct bio *bio)
+static void pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@@ -2423,7 +2423,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
*/
if (bio_data_dir(bio) == READ) {
pkt_make_request_read(pd, bio);
- return BLK_QC_T_NONE;
+ return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2455,10 +2455,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
- return BLK_QC_T_NONE;
+ return;
end_io:
bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static void pkt_init_queue(struct pktcdvd_device *pd)
@@ -2537,6 +2536,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
int i;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
+ struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
pkt_err(pd, "recursive setup not allowed\n");
@@ -2560,10 +2560,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+ sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ if (!sdev) {
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
+ put_device(&sdev->sdev_gendev);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2729,7 +2731,9 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- add_disk(disk);
+ ret = add_disk(disk);
+ if (ret)
+ goto out_mem2;
pkt_sysfs_dev_new(pd);
pkt_debugfs_dev_new(pd);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c7b19e128b03..d1ebf193cb9a 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -578,7 +578,7 @@ out:
return next;
}
-static blk_qc_t ps3vram_submit_bio(struct bio *bio)
+static void ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -594,13 +594,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio)
spin_unlock_irq(&priv->lock);
if (busy)
- return BLK_QC_T_NONE;
+ return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
-
- return BLK_QC_T_NONE;
}
static const struct block_device_operations ps3vram_fops = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e65c9d706f6f..953fa134cd3d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -836,7 +836,7 @@ struct rbd_options {
u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
};
-#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
@@ -7054,7 +7054,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
if (rc)
goto err_out_image_lock;
- device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
+ rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
+ if (rc)
+ goto err_out_cleanup_disk;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
@@ -7068,6 +7070,8 @@ out:
module_put(THIS_MODULE);
return rc;
+err_out_cleanup_disk:
+ rbd_free_disk(rbd_dev);
err_out_image_lock:
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index bd4a41afbbfc..2df0657cdf00 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1176,7 +1176,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx)
+static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct rnbd_queue *q = hctx->driver_data;
struct rnbd_clt_dev *dev = q->dev;
@@ -1384,8 +1384,10 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
}
-static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
{
+ int err;
+
dev->gd->major = rnbd_client_major;
dev->gd->first_minor = idx << RNBD_PART_BITS;
dev->gd->minors = 1 << RNBD_PART_BITS;
@@ -1410,7 +1412,11 @@ static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
if (!dev->rotational)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
- add_disk(dev->gd);
+ err = add_disk(dev->gd);
+ if (err)
+ blk_cleanup_disk(dev->gd);
+
+ return err;
}
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
@@ -1426,8 +1432,7 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
rnbd_init_mq_hw_queues(dev);
setup_request_queue(dev);
- rnbd_clt_setup_gen_disk(dev, idx);
- return 0;
+ return rnbd_clt_setup_gen_disk(dev, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index c1bc5c0fef71..de5d5a8df81d 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -10,7 +10,7 @@
#define RNBD_PROTO_H
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/limits.h>
#include <linux/inet.h>
#include <linux/in.h>
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 83636714b8d7..8d9d69f5dfbc 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -935,7 +935,9 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->size8 = 0;
}
- rsxx_attach_dev(card);
+ st = rsxx_attach_dev(card);
+ if (st)
+ goto failed_create_dev;
/************* Setup Debugfs *************/
rsxx_debugfs_dev_new(card);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 1cc40b0ea761..dd33f1bdf3b8 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -50,7 +50,7 @@ struct rsxx_bio_meta {
static struct kmem_cache *bio_meta_pool;
-static blk_qc_t rsxx_submit_bio(struct bio *bio);
+static void rsxx_submit_bio(struct bio *bio);
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
@@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
}
}
-static blk_qc_t rsxx_submit_bio(struct bio *bio)
+static void rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
@@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio)
if (st)
goto queue_err;
- return BLK_QC_T_NONE;
+ return;
queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
@@ -177,7 +177,6 @@ req_err:
if (st)
bio->bi_status = st;
bio_endio(bio);
- return BLK_QC_T_NONE;
}
/*----------------- Device Setup -------------------*/
@@ -192,6 +191,8 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
int rsxx_attach_dev(struct rsxx_cardinfo *card)
{
+ int err = 0;
+
mutex_lock(&card->dev_lock);
/* The block device requires the stripe size from the config. */
@@ -200,13 +201,17 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
- card->bdev_attached = 1;
+ err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
+ if (err == 0)
+ card->bdev_attached = 1;
}
mutex_unlock(&card->dev_lock);
- return 0;
+ if (err)
+ blk_cleanup_disk(card->gendisk);
+
+ return err;
}
void rsxx_detach_dev(struct rsxx_cardinfo *card)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 7ccc8d2a41bc..821594cd1315 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -16,6 +16,7 @@
#include <linux/fd.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
@@ -184,6 +185,7 @@ struct floppy_state {
int track;
int ref_count;
+ bool registered;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
@@ -771,6 +773,20 @@ static const struct blk_mq_ops swim_mq_ops = {
.queue_rq = swim_queue_rq,
};
+static void swim_cleanup_floppy_disk(struct floppy_state *fs)
+{
+ struct gendisk *disk = fs->disk;
+
+ if (!disk)
+ return;
+
+ if (fs->registered)
+ del_gendisk(fs->disk);
+
+ blk_cleanup_disk(disk);
+ blk_mq_free_tag_set(&fs->tag_set);
+}
+
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@@ -827,7 +843,10 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
- add_disk(swd->unit[drive].disk);
+ err = add_disk(swd->unit[drive].disk);
+ if (err)
+ goto exit_put_disks;
+ swd->unit[drive].registered = true;
}
return 0;
@@ -835,12 +854,7 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
do {
- struct gendisk *disk = swd->unit[drive].disk;
-
- if (!disk)
- continue;
- blk_cleanup_disk(disk);
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
+ swim_cleanup_floppy_disk(&swd->unit[drive]);
} while (drive--);
return err;
}
@@ -909,12 +923,8 @@ static int swim_remove(struct platform_device *dev)
int drive;
struct resource *res;
- for (drive = 0; drive < swd->floppy_count; drive++) {
- del_gendisk(swd->unit[drive].disk);
- blk_cleanup_queue(swd->unit[drive].disk->queue);
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
- put_disk(swd->unit[drive].disk);
- }
+ for (drive = 0; drive < swd->floppy_count; drive++)
+ swim_cleanup_floppy_disk(&swd->unit[drive]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 965af0a3e95b..4b91c9aa5892 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/major.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
@@ -1229,7 +1230,9 @@ static int swim3_attach(struct macio_dev *mdev,
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
- add_disk(disk);
+ rc = add_disk(disk);
+ if (rc)
+ goto out_cleanup_disk;
disks[floppy_count++] = disk;
return 0;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 420cd952ddc4..d1676fe0da1a 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -297,6 +297,7 @@ struct carm_host {
struct work_struct fsm_task;
+ int probe_err;
struct completion probe_comp;
};
@@ -1181,8 +1182,11 @@ static void carm_fsm_task (struct work_struct *work)
struct gendisk *disk = port->disk;
set_capacity(disk, port->capacity);
- add_disk(disk);
- activated++;
+ host->probe_err = add_disk(disk);
+ if (!host->probe_err)
+ activated++;
+ else
+ break;
}
printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
@@ -1192,11 +1196,9 @@ static void carm_fsm_task (struct work_struct *work)
reschedule = 1;
break;
}
-
case HST_PROBE_FINISHED:
complete(&host->probe_comp);
break;
-
case HST_ERROR:
/* FIXME: TODO */
break;
@@ -1507,7 +1509,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_irq;
DPRINTK("waiting for probe_comp\n");
+ host->probe_err = -ENODEV;
wait_for_completion(&host->probe_comp);
+ if (host->probe_err) {
+ rc = host->probe_err;
+ goto err_out_free_irq;
+ }
printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 303caf2d17d0..97bf051a50ce 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -24,6 +24,19 @@
/* The maximum number of sg elements that fit into a virtqueue */
#define VIRTIO_BLK_MAX_SG_ELEMS 32768
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define VIRTIO_BLK_INLINE_SG_CNT 0
+#else
+#define VIRTIO_BLK_INLINE_SG_CNT 2
+#endif
+
+static unsigned int num_request_queues;
+module_param(num_request_queues, uint, 0644);
+MODULE_PARM_DESC(num_request_queues,
+ "Limit the number of request queues to use for blk device. "
+ "0 for no limit. "
+ "Values > nr_cpu_ids truncated to nr_cpu_ids.");
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -77,6 +90,7 @@ struct virtio_blk {
struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
u8 status;
+ struct sg_table sg_table;
struct scatterlist sg[];
};
@@ -162,12 +176,93 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
return 0;
}
-static inline void virtblk_request_done(struct request *req)
+static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
{
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ if (blk_rq_nr_phys_segments(req))
+ sg_free_table_chained(&vbr->sg_table,
+ VIRTIO_BLK_INLINE_SG_CNT);
+}
+
+static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
+ struct virtblk_req *vbr)
+{
+ int err;
+
+ if (!blk_rq_nr_phys_segments(req))
+ return 0;
+
+ vbr->sg_table.sgl = vbr->sg;
+ err = sg_alloc_table_chained(&vbr->sg_table,
+ blk_rq_nr_phys_segments(req),
+ vbr->sg_table.sgl,
+ VIRTIO_BLK_INLINE_SG_CNT);
+ if (unlikely(err))
+ return -ENOMEM;
+ return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+}
+
+static void virtblk_cleanup_cmd(struct request *req)
+{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
kfree(bvec_virt(&req->special_vec));
+}
+
+static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
+ struct request *req,
+ struct virtblk_req *vbr)
+{
+ bool unmap = false;
+ u32 type;
+
+ vbr->out_hdr.sector = 0;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ type = VIRTIO_BLK_T_IN;
+ vbr->out_hdr.sector = cpu_to_virtio64(vdev,
+ blk_rq_pos(req));
+ break;
+ case REQ_OP_WRITE:
+ type = VIRTIO_BLK_T_OUT;
+ vbr->out_hdr.sector = cpu_to_virtio64(vdev,
+ blk_rq_pos(req));
+ break;
+ case REQ_OP_FLUSH:
+ type = VIRTIO_BLK_T_FLUSH;
+ break;
+ case REQ_OP_DISCARD:
+ type = VIRTIO_BLK_T_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ type = VIRTIO_BLK_T_WRITE_ZEROES;
+ unmap = !(req->cmd_flags & REQ_NOUNMAP);
+ break;
+ case REQ_OP_DRV_IN:
+ type = VIRTIO_BLK_T_GET_ID;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_STS_IOERR;
+ }
+
+ vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
+
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ if (virtblk_setup_discard_write_zeroes(req, unmap))
+ return BLK_STS_RESOURCE;
+ }
+
+ return 0;
+}
+
+static inline void virtblk_request_done(struct request *req)
+{
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
blk_mq_end_request(req, virtblk_result(vbr));
}
@@ -223,59 +318,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
unsigned long flags;
unsigned int num;
int qid = hctx->queue_num;
- int err;
bool notify = false;
- bool unmap = false;
- u32 type;
+ blk_status_t status;
+ int err;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
- switch (req_op(req)) {
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- type = 0;
- break;
- case REQ_OP_FLUSH:
- type = VIRTIO_BLK_T_FLUSH;
- break;
- case REQ_OP_DISCARD:
- type = VIRTIO_BLK_T_DISCARD;
- break;
- case REQ_OP_WRITE_ZEROES:
- type = VIRTIO_BLK_T_WRITE_ZEROES;
- unmap = !(req->cmd_flags & REQ_NOUNMAP);
- break;
- case REQ_OP_DRV_IN:
- type = VIRTIO_BLK_T_GET_ID;
- break;
- default:
- WARN_ON_ONCE(1);
- return BLK_STS_IOERR;
- }
-
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
- vbr->out_hdr.sector = type ?
- 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
+ status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ if (unlikely(status))
+ return status;
blk_mq_start_request(req);
- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
- err = virtblk_setup_discard_write_zeroes(req, unmap);
- if (err)
- return BLK_STS_RESOURCE;
- }
-
- num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
- if (num) {
- if (rq_data_dir(req) == WRITE)
- vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
- else
- vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
+ num = virtblk_map_data(hctx, req, vbr);
+ if (unlikely(num < 0)) {
+ virtblk_cleanup_cmd(req);
+ return BLK_STS_RESOURCE;
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -284,6 +346,8 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
@@ -312,7 +376,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req;
int err;
- req = blk_get_request(q, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -323,7 +387,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
blk_execute_rq(vblk->disk, req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
@@ -497,8 +561,14 @@ static int init_vq(struct virtio_blk *vblk)
&num_vqs);
if (err)
num_vqs = 1;
+ if (!err && !num_vqs) {
+ dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
+ return -EINVAL;
+ }
- num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+ num_vqs = min_t(unsigned int,
+ min_not_zero(num_request_queues, nr_cpu_ids),
+ num_vqs);
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
@@ -624,7 +694,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
u8 writeback = virtblk_get_cache_mode(vblk->vdev);
BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
- return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
+ return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
}
static DEVICE_ATTR_RW(cache_type);
@@ -660,16 +730,6 @@ static const struct attribute_group *virtblk_attr_groups[] = {
NULL,
};
-static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct virtio_blk *vblk = set->driver_data;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
-
- sg_init_table(vbr->sg, vblk->sg_elems);
- return 0;
-}
-
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
@@ -682,7 +742,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
- .init_request = virtblk_init_request,
.map_queues = virtblk_map_queues,
};
@@ -762,7 +821,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
- sizeof(struct scatterlist) * sg_elems;
+ sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
@@ -815,9 +874,17 @@ static int virtblk_probe(struct virtio_device *vdev)
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
&blk_size);
- if (!err)
+ if (!err) {
+ err = blk_validate_block_size(blk_size);
+ if (err) {
+ dev_err(&vdev->dev,
+ "virtio_blk: invalid block size: 0x%x\n",
+ blk_size);
+ goto out_cleanup_disk;
+ }
+
blk_queue_logical_block_size(q, blk_size);
- else
+ } else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
@@ -982,6 +1049,7 @@ static struct virtio_driver virtio_blk = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 33eba3df4dd9..914587aabca0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -98,7 +98,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
+ err = sync_blockdev(blkif->vbd.bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 72902104f111..8e3983e456f3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -42,6 +42,7 @@
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
@@ -2385,7 +2386,13 @@ static void blkfront_connect(struct blkfront_info *info)
for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(rinfo);
- device_add_disk(&info->xbdev->dev, info->gd, NULL);
+ err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
+ if (err) {
+ blk_cleanup_disk(info->gd);
+ blk_mq_free_tag_set(&info->tag_set);
+ info->rq = NULL;
+ goto fail;
+ }
info->is_ready = 1;
return;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fcaf2750f68f..a68297fb51a2 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1598,22 +1598,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static blk_qc_t zram_submit_bio(struct bio *bio)
+static void zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
- goto error;
+ bio_io_error(bio);
+ return;
}
__zram_make_request(zram, bio);
- return BLK_QC_T_NONE;
-
-error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static void zram_slot_free_notify(struct block_device *bdev,
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index f1705b46fc88..9359bff47296 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -1037,8 +1037,9 @@ static bool btintel_firmware_version(struct hci_dev *hdev,
params = (void *)(fw_ptr + sizeof(*cmd));
- bt_dev_info(hdev, "Boot Address: 0x%x",
- le32_to_cpu(params->boot_addr));
+ *boot_addr = le32_to_cpu(params->boot_addr);
+
+ bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr);
bt_dev_info(hdev, "Firmware Version: %u-%u.%u",
params->fw_build_num, params->fw_build_ww,
@@ -1071,9 +1072,6 @@ int btintel_download_firmware(struct hci_dev *hdev,
/* Skip version checking */
break;
default:
- /* Skip reading firmware file version in bootloader mode */
- if (ver->fw_variant == 0x06)
- break;
/* Skip download if firmware has the same version */
if (btintel_firmware_version(hdev, ver->fw_build_num,
@@ -1114,19 +1112,16 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev,
int err;
u32 css_header_ver;
- /* Skip reading firmware file version in bootloader mode */
- if (ver->img_type != 0x01) {
- /* Skip download if firmware has the same version */
- if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
- ver->min_fw_build_cw,
- ver->min_fw_build_yy,
- fw, boot_param)) {
- bt_dev_info(hdev, "Firmware already loaded");
- /* Return -EALREADY to indicate that firmware has
- * already been loaded.
- */
- return -EALREADY;
- }
+ /* Skip download if firmware has the same version */
+ if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
+ ver->min_fw_build_cw,
+ ver->min_fw_build_yy,
+ fw, boot_param)) {
+ bt_dev_info(hdev, "Firmware already loaded");
+ /* Return -EALREADY to indicate that firmware has
+ * already been loaded.
+ */
+ return -EALREADY;
}
/* The firmware variant determines if the device is in bootloader
@@ -1285,12 +1280,16 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
- u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 };
+ u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 };
+ u8 trace_enable = 0x02;
struct sk_buff *skb;
- if (!features)
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
return -EINVAL;
+ }
if (!(features->page1[0] & 0x3f)) {
bt_dev_info(hdev, "Telemetry exception format not supported");
@@ -1303,11 +1302,95 @@ static int btintel_set_debug_features(struct hci_dev *hdev,
PTR_ERR(skb));
return PTR_ERR(skb);
}
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
+ return 0;
+}
+
+static int btintel_reset_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+{
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+ u8 trace_enable = 0x00;
+ struct sk_buff *skb;
+
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
+ return -EINVAL;
+ }
+
+ if (!(features->page1[0] & 0x3f)) {
+ bt_dev_info(hdev, "Telemetry exception format not supported");
+ return 0;
+ }
+
+ /* Should stop the trace before writing ddc event mask. */
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
kfree_skb(skb);
+
+ bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
return 0;
}
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ struct intel_debug_features features;
+ int err;
+
+ bt_dev_dbg(hdev, "enable %d", enable);
+
+ /* Read the Intel supported features and if new exception formats
+ * supported, need to load the additional DDC config to enable.
+ */
+ err = btintel_read_debug_features(hdev, &features);
+ if (err)
+ return err;
+
+ /* Set or reset the debug features. */
+ if (enable)
+ err = btintel_set_debug_features(hdev, &features);
+ else
+ err = btintel_reset_debug_features(hdev, &features);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_quality_report);
+
static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1893,7 +1976,6 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
BT_DBG("%s", hdev->name);
@@ -1934,14 +2016,7 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
btintel_load_ddc_config(hdev, ddcname);
}
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &new_ver);
@@ -2083,13 +2158,102 @@ done:
return err;
}
+static int btintel_get_codec_config_data(struct hci_dev *hdev,
+ __u8 link, struct bt_codec *codec,
+ __u8 *ven_len, __u8 **ven_data)
+{
+ int err = 0;
+
+ if (!ven_data || !ven_len)
+ return -EINVAL;
+
+ *ven_len = 0;
+ *ven_data = NULL;
+
+ if (link != ESCO_LINK) {
+ bt_dev_err(hdev, "Invalid link type(%u)", link);
+ return -EINVAL;
+ }
+
+ *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL);
+ if (!*ven_data) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* supports only CVSD and mSBC offload codecs */
+ switch (codec->id) {
+ case 0x02:
+ **ven_data = 0x00;
+ break;
+ case 0x05:
+ **ven_data = 0x01;
+ break;
+ default:
+ err = -EINVAL;
+ bt_dev_err(hdev, "Invalid codec id(%u)", codec->id);
+ goto error;
+ }
+ /* codec and its capabilities are pre-defined to ids
+ * preset id = 0x00 represents CVSD codec with sampling rate 8K
+ * preset id = 0x01 represents mSBC codec with sampling rate 16K
+ */
+ *ven_len = sizeof(__u8);
+ return err;
+
+error:
+ kfree(*ven_data);
+ *ven_data = NULL;
+ return err;
+}
+
+static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+ /* Intel uses 1 as data path id for all the usecases */
+ *data_path_id = 1;
+ return 0;
+}
+
+static int btintel_configure_offload(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err = 0;
+ struct intel_offload_use_cases *use_cases;
+
+ skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading offload use cases failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len < sizeof(*use_cases)) {
+ err = -EIO;
+ goto error;
+ }
+
+ use_cases = (void *)skb->data;
+
+ if (use_cases->status) {
+ err = -bt_to_errno(skb->data[0]);
+ goto error;
+ }
+
+ if (use_cases->preset[0] & 0x03) {
+ hdev->get_data_path_id = btintel_get_data_path_id;
+ hdev->get_codec_config_data = btintel_get_codec_config_data;
+ }
+error:
+ kfree_skb(skb);
+ return err;
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
struct intel_version_tlv new_ver;
bt_dev_dbg(hdev, "");
@@ -2125,14 +2289,10 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
btintel_load_ddc_config(hdev, ddcname);
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ /* Read supported use cases and set callbacks to fetch datapath id */
+ btintel_configure_offload(hdev);
+
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
@@ -2232,6 +2392,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ /* Set up the quality report callback for Intel devices */
+ hdev->set_quality_report = btintel_set_quality_report;
+
/* For Legacy device, check the HW platform value and size */
if (skb->len == sizeof(ver) && skb->data[1] == 0x37) {
bt_dev_dbg(hdev, "Read the legacy Intel version information");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index aa64072bbe68..e500c0d7a729 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -132,6 +132,11 @@ struct intel_debug_features {
__u8 page1[16];
} __packed;
+struct intel_offload_use_cases {
+ __u8 status;
+ __u8 preset[8];
+} __packed;
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -204,6 +209,7 @@ int btintel_configure_setup(struct hci_dev *hdev);
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -294,4 +300,9 @@ static inline void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len)
{
}
+
+static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ return -ENODEV;
+}
#endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 8b9d78ce6bb2..5ccbe4d459d0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -587,12 +587,12 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
-static bool btmrvl_prevent_wake(struct hci_dev *hdev)
+static bool btmrvl_wakeup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
- return !device_may_wakeup(&card->func->dev);
+ return device_may_wakeup(&card->func->dev);
}
/*
@@ -696,7 +696,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr;
- hdev->prevent_wake = btmrvl_prevent_wake;
+ hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e9d91d7c0db4..9ba22b13b4fa 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -158,8 +158,10 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
int err;
hlen = sizeof(*hdr) + wmt_params->dlen;
- if (hlen > 255)
- return -EINVAL;
+ if (hlen > 255) {
+ err = -EINVAL;
+ goto err_free_skb;
+ }
hdr = (struct mtk_wmt_hdr *)&wc;
hdr->dir = 1;
@@ -173,7 +175,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
if (err < 0) {
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -190,13 +192,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_skb;
}
/* Parse and handle the return WMT event */
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 8646b6dd11e9..634cf8f5ed2d 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -19,7 +19,6 @@
#include <net/bluetooth/hci_core.h>
#include <asm/unaligned.h>
#include <net/rsi_91x.h>
-#include <net/genetlink.h>
#define RSI_DMA_ALIGN 8
#define RSI_FRAME_DESC_SIZE 16
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 1f8afa0244d8..c2bdd1e6060e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -59,6 +59,7 @@ struct id_table {
__u8 hci_bus;
bool config_needed;
bool has_rom_version;
+ bool has_msft_ext;
char *fw_name;
char *cfg_name;
};
@@ -121,6 +122,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
.cfg_name = "rtl_bt/rtl8821c_config" },
@@ -135,6 +137,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8761b_fw.bin",
.cfg_name = "rtl_bt/rtl8761b_config" },
@@ -149,6 +152,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cs_fw.bin",
.cfg_name = "rtl_bt/rtl8822cs_config" },
@@ -156,6 +160,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cu_fw.bin",
.cfg_name = "rtl_bt/rtl8822cu_config" },
@@ -163,6 +168,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
.cfg_name = "rtl_bt/rtl8822b_config" },
@@ -170,6 +176,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8852au_fw.bin",
.cfg_name = "rtl_bt/rtl8852au_config" },
};
@@ -594,8 +601,10 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
- if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c &&
- resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e)
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info)
btrtl_dev->drop_fw = true;
if (btrtl_dev->drop_fw) {
@@ -634,13 +643,13 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
+
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
}
out_free:
kfree_skb(skb);
- btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
- hdev->bus);
-
if (!btrtl_dev->ic_info) {
rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
@@ -684,12 +693,8 @@ out_free:
/* The following chips supports the Microsoft vendor extension,
* therefore set the corresponding VsMsftOpCode.
*/
- switch (lmp_subver) {
- case RTL_ROM_LMP_8822B:
- case RTL_ROM_LMP_8852A:
+ if (btrtl_dev->ic_info->has_msft_ext)
hci_set_msft_opcode(hdev, 0xFCF0);
- break;
- }
return btrtl_dev;
@@ -746,6 +751,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852A:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
break;
default:
rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60d2fce59a71..75c83768c257 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -384,6 +384,12 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -410,6 +416,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -433,6 +442,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8761B Bluetooth devices */
+ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -451,10 +464,6 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
- /* Bluetooth component of Realtek 8852AE device */
- { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
-
{ USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK |
@@ -652,11 +661,33 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
+ struct gpio_desc *reset_gpio = data->reset_gpio;
int err;
if (++data->cmd_timeout_cnt < 5)
return;
+ if (reset_gpio) {
+ bt_dev_err(hdev, "Reset qca device via bt_en gpio");
+
+ /* Toggle the hard reset line. The qca bt device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ bt_dev_err(hdev, "last reset failed? Not resetting again");
+ return;
+ }
+
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 1);
+
+ return;
+ }
+
bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device.");
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -2200,6 +2231,23 @@ struct btmtk_section_map {
};
} __packed;
+static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ long ret;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
+ ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -2804,6 +2852,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
+ case 0x7922:
case 0x7961:
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3591,11 +3640,11 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
-static bool btusb_prevent_wake(struct hci_dev *hdev)
+static bool btusb_wakeup(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- return !device_may_wakeup(&data->udev->dev);
+ return device_may_wakeup(&data->udev->dev);
}
static int btusb_shutdown_qca(struct hci_dev *hdev)
@@ -3752,7 +3801,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
- hdev->prevent_wake = btusb_prevent_wake;
+ hdev->wakeup = btusb_wakeup;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3819,6 +3868,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
+ hdev->set_bdaddr = btusb_set_bdaddr_mtk;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0c0dedece59c..34286ffe0568 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -587,9 +587,11 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
count -= processed;
}
- pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
- pm_runtime_put_autosuspend(&hu->serdev->dev);
+ if (hu->serdev) {
+ pm_runtime_get(&hu->serdev->dev);
+ pm_runtime_mark_last_busy(&hu->serdev->dev);
+ pm_runtime_put_autosuspend(&hu->serdev->dev);
+ }
return 0;
}
@@ -814,7 +816,6 @@ static int h5_serdev_probe(struct serdev_device *serdev)
struct device *dev = &serdev->dev;
struct h5 *h5;
const struct h5_device_data *data;
- int err;
h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
if (!h5)
@@ -846,6 +847,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
h5->vnd = data->vnd;
}
+ if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
+ set_bit(H5_WAKEUP_DISABLE, &h5->flags);
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
@@ -856,14 +859,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- err = hci_uart_register_device(&h5->serdev_hu, &h5p);
- if (err)
- return err;
-
- if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
- set_bit(H5_WAKEUP_DISABLE, &h5->flags);
-
- return 0;
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -962,11 +958,13 @@ static void h5_btrtl_open(struct h5 *h5)
serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
serdev_device_set_baudrate(h5->hu->serdev, 115200);
- pm_runtime_set_active(&h5->hu->serdev->dev);
- pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
- pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
- SUSPEND_TIMEOUT_MS);
- pm_runtime_enable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
+ pm_runtime_set_active(&h5->hu->serdev->dev);
+ pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
+ pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
+ SUSPEND_TIMEOUT_MS);
+ pm_runtime_enable(&h5->hu->serdev->dev);
+ }
/* The controller needs up to 500ms to wakeup */
gpiod_set_value_cansleep(h5->enable_gpio, 1);
@@ -976,7 +974,8 @@ static void h5_btrtl_open(struct h5 *h5)
static void h5_btrtl_close(struct h5 *h5)
{
- pm_runtime_disable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
+ pm_runtime_disable(&h5->hu->serdev->dev);
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
gpiod_set_value_cansleep(h5->enable_gpio, 0);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 824d1c7e3e53..ecdf8e034351 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -479,6 +479,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
/* Error if the tty has no write op instead of leaving an exploitable
* hole
*/
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 53deea2eb7b4..dd768a8ed7cb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1577,7 +1577,7 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
mutex_unlock(&qca->hci_memdump_lock);
}
-static bool qca_prevent_wake(struct hci_dev *hdev)
+static bool qca_wakeup(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
@@ -1730,6 +1730,7 @@ retry:
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1764,7 +1765,7 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- hu->hdev->prevent_wake = qca_prevent_wake;
+ hu->hdev->wakeup = qca_wakeup;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 8ab26dec5f6e..b45db0db347c 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -37,6 +38,9 @@ struct vhci_data {
struct mutex open_mutex;
struct delayed_work open_timeout;
+
+ bool suspended;
+ bool wakeup;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -73,6 +77,115 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id)
+{
+ *data_path_id = 0;
+ return 0;
+}
+
+static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data)
+{
+ if (type != ESCO_LINK)
+ return -EINVAL;
+
+ *vnd_len = 0;
+ *vnd_data = NULL;
+ return 0;
+}
+
+static bool vhci_wakeup(struct hci_dev *hdev)
+{
+ struct vhci_data *data = hci_get_drvdata(hdev);
+
+ return data->wakeup;
+}
+
+static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->suspended ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_suspend_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->suspended == enable)
+ return -EALREADY;
+
+ if (enable)
+ err = hci_suspend_dev(data->hdev);
+ else
+ err = hci_resume_dev(data->hdev);
+
+ if (err)
+ return err;
+
+ data->suspended = enable;
+
+ return count;
+}
+
+static const struct file_operations force_suspend_fops = {
+ .open = simple_open,
+ .read = force_suspend_read,
+ .write = force_suspend_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t force_wakeup_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->wakeup ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_wakeup_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->wakeup == enable)
+ return -EALREADY;
+
+ return count;
+}
+
+static const struct file_operations force_wakeup_fops = {
+ .open = simple_open,
+ .read = force_wakeup_read,
+ .write = force_wakeup_write,
+ .llseek = default_llseek,
+};
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -112,6 +225,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
+ hdev->get_data_path_id = vhci_get_data_path_id;
+ hdev->get_codec_config_data = vhci_get_codec_config_data;
+ hdev->wakeup = vhci_wakeup;
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -129,6 +245,12 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a4cf3d692dc3..3c68e174a113 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -30,7 +30,7 @@ config ARM_INTEGRATOR_LM
found on the ARM Integrator AP (Application Platform)
config BRCMSTB_GISB_ARB
- bool "Broadcom STB GISB bus arbiter"
+ tristate "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
default ARCH_BRCMSTB || BMIPS_GENERIC
help
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 6551286a60cc..4c2f7d61cb9b 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2021 Broadcom
*/
#include <linux/init.h>
@@ -536,6 +536,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .suppress_bind_attrs = true,
},
};
@@ -546,3 +547,7 @@ static int __init brcm_gisb_driver_init(void)
}
module_init(brcm_gisb_driver_init);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB GISB arbiter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
index 4ae292a30e53..892946245527 100644
--- a/drivers/bus/fsl-mc/Makefile
+++ b/drivers/bus/fsl-mc/Makefile
@@ -15,7 +15,8 @@ mc-bus-driver-objs := fsl-mc-bus.o \
dprc-driver.o \
fsl-mc-allocator.o \
fsl-mc-msi.o \
- dpmcp.o
+ dpmcp.o \
+ obj-api.o
# MC userspace support
obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 1958fa065360..b3520ea1b9f4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -48,7 +48,6 @@ struct dpmng_rsp_get_version {
/* DPMCP command IDs */
#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
-#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
struct dpmcp_cmd_open {
@@ -91,7 +90,6 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
-#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
@@ -453,7 +451,6 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
/* Command IDs */
#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
-#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
@@ -492,7 +489,6 @@ struct dpbp_rsp_get_attributes {
/* Command IDs */
#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
-#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
@@ -524,6 +520,41 @@ struct dpcon_cmd_set_notification {
__le64 user_ctx;
};
+/*
+ * Generic FSL MC API
+ */
+
+/* generic command versioning */
+#define OBJ_CMD_BASE_VERSION 1
+#define OBJ_CMD_ID_OFFSET 4
+
+#define OBJ_CMD(id) (((id) << OBJ_CMD_ID_OFFSET) | OBJ_CMD_BASE_VERSION)
+
+/* open command codes */
+#define DPRTC_CMDID_OPEN OBJ_CMD(0x810)
+#define DPNI_CMDID_OPEN OBJ_CMD(0x801)
+#define DPSW_CMDID_OPEN OBJ_CMD(0x802)
+#define DPIO_CMDID_OPEN OBJ_CMD(0x803)
+#define DPBP_CMDID_OPEN OBJ_CMD(0x804)
+#define DPRC_CMDID_OPEN OBJ_CMD(0x805)
+#define DPDMUX_CMDID_OPEN OBJ_CMD(0x806)
+#define DPCI_CMDID_OPEN OBJ_CMD(0x807)
+#define DPCON_CMDID_OPEN OBJ_CMD(0x808)
+#define DPSECI_CMDID_OPEN OBJ_CMD(0x809)
+#define DPAIOP_CMDID_OPEN OBJ_CMD(0x80a)
+#define DPMCP_CMDID_OPEN OBJ_CMD(0x80b)
+#define DPMAC_CMDID_OPEN OBJ_CMD(0x80c)
+#define DPDCEI_CMDID_OPEN OBJ_CMD(0x80d)
+#define DPDMAI_CMDID_OPEN OBJ_CMD(0x80e)
+#define DPDBG_CMDID_OPEN OBJ_CMD(0x80f)
+
+/* Generic object command IDs */
+#define OBJ_CMDID_CLOSE OBJ_CMD(0x800)
+#define OBJ_CMDID_RESET OBJ_CMD(0x005)
+
+struct fsl_mc_obj_cmd_open {
+ __le32 obj_id;
+};
/**
* struct fsl_mc_resource_pool - Pool of MC resources of a given
diff --git a/drivers/bus/fsl-mc/obj-api.c b/drivers/bus/fsl-mc/obj-api.c
new file mode 100644
index 000000000000..06c1dd84e38d
--- /dev/null
+++ b/drivers/bus/fsl-mc/obj-api.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_get_open_cmd_id(const char *type)
+{
+ static const struct {
+ int cmd_id;
+ const char *type;
+ } dev_ids[] = {
+ { DPRTC_CMDID_OPEN, "dprtc" },
+ { DPRC_CMDID_OPEN, "dprc" },
+ { DPNI_CMDID_OPEN, "dpni" },
+ { DPIO_CMDID_OPEN, "dpio" },
+ { DPSW_CMDID_OPEN, "dpsw" },
+ { DPBP_CMDID_OPEN, "dpbp" },
+ { DPCON_CMDID_OPEN, "dpcon" },
+ { DPMCP_CMDID_OPEN, "dpmcp" },
+ { DPMAC_CMDID_OPEN, "dpmac" },
+ { DPSECI_CMDID_OPEN, "dpseci" },
+ { DPDMUX_CMDID_OPEN, "dpdmux" },
+ { DPDCEI_CMDID_OPEN, "dpdcei" },
+ { DPAIOP_CMDID_OPEN, "dpaiop" },
+ { DPCI_CMDID_OPEN, "dpci" },
+ { DPDMAI_CMDID_OPEN, "dpdmai" },
+ { DPDBG_CMDID_OPEN, "dpdbg" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_ids[i].type; i++)
+ if (!strcmp(dev_ids[i].type, type))
+ return dev_ids[i].cmd_id;
+
+ return -1;
+}
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct fsl_mc_obj_cmd_open *cmd_params;
+ int err = 0;
+ int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
+
+ if (cmd_id == -1)
+ return -ENODEV;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
+ cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 672518741f86..414f29cdedf0 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -15,10 +15,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev)
int ret;
ret = sunxi_sram_claim(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't map SRAM to device\n");
of_platform_populate(np, NULL, NULL, &pdev->dev);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 6a8b7fb5be58..54c0ee6dda30 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/timekeeping.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
@@ -51,11 +53,18 @@ struct sysc_address {
struct list_head node;
};
+struct sysc_module {
+ struct sysc *ddata;
+ struct list_head node;
+};
+
struct sysc_soc_info {
unsigned long general_purpose:1;
enum sysc_soc soc;
- struct mutex list_lock; /* disabled modules list lock */
+ struct mutex list_lock; /* disabled and restored modules list lock */
struct list_head disabled_modules;
+ struct list_head restored_modules;
+ struct notifier_block nb;
};
enum sysc_clocks {
@@ -131,6 +140,7 @@ struct sysc {
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
+ u32 sysconfig;
unsigned int reserved:1;
unsigned int enabled:1;
unsigned int needs_resume:1;
@@ -147,6 +157,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
+static int sysc_reset(struct sysc *ddata);
static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
@@ -223,37 +234,77 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}
-/* Poll on reset status */
-static int sysc_wait_softreset(struct sysc *ddata)
+static int sysc_poll_reset_sysstatus(struct sysc *ddata)
{
- u32 sysc_mask, syss_done, rstval;
- int syss_offset, error = 0;
-
- if (ddata->cap->regbits->srst_shift < 0)
- return 0;
-
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+ int error, retries;
+ u32 syss_done, rstval;
if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
syss_done = 0;
else
syss_done = ddata->cfg.syss_mask;
- if (syss_offset >= 0) {
+ if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
rstval, (rstval & ddata->cfg.syss_mask) ==
syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysstatus(ddata);
+ if ((rstval & ddata->cfg.syss_mask) == syss_done)
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+static int sysc_poll_reset_sysconfig(struct sysc *ddata)
+{
+ int error, retries;
+ u32 sysc_mask, rstval;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
rstval, !(rstval & sysc_mask),
100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysconfig(ddata);
+ if (!(rstval & sysc_mask))
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
}
return error;
}
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ int syss_offset, error = 0;
+
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (syss_offset >= 0)
+ error = sysc_poll_reset_sysstatus(ddata);
+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
+ error = sysc_poll_reset_sysconfig(ddata);
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -1094,7 +1145,8 @@ set_midle:
best_mode = fls(ddata->cfg.midlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
dev_err(dev, "%s: invalid midlemode\n", __func__);
- return -EINVAL;
+ error = -EINVAL;
+ goto save_context;
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
@@ -1112,13 +1164,16 @@ set_autoidle:
sysc_write_sysconfig(ddata, reg);
}
- /* Flush posted write */
- sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ error = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
- return 0;
+ return error;
}
static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
@@ -1175,8 +1230,10 @@ static int sysc_disable_module(struct device *dev)
set_sidle:
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
- if (!idlemodes || regbits->sidle_shift < 0)
- return 0;
+ if (!idlemodes || regbits->sidle_shift < 0) {
+ ret = 0;
+ goto save_context;
+ }
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
best_mode = SYSC_IDLE_FORCE;
@@ -1184,7 +1241,8 @@ set_sidle:
ret = sysc_best_idle_mode(idlemodes, &best_mode);
if (ret) {
dev_err(dev, "%s: invalid sidlemode\n", __func__);
- return ret;
+ ret = -EINVAL;
+ goto save_context;
}
}
@@ -1195,10 +1253,13 @@ set_sidle:
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
- /* Flush posted write */
- sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ ret = 0;
- return 0;
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ return ret;
}
static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
@@ -1336,13 +1397,40 @@ err_allow_idle:
return error;
}
+/*
+ * Checks if device context was lost. Assumes the sysconfig register value
+ * after lost context is different from the configured value. Only works for
+ * enabled devices.
+ *
+ * Eventually we may want to also add support to using the context lost
+ * registers that some SoCs have.
+ */
+static int sysc_check_context(struct sysc *ddata)
+{
+ u32 reg;
+
+ if (!ddata->enabled)
+ return -ENODATA;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ if (reg == ddata->sysconfig)
+ return 0;
+
+ return -EACCES;
+}
+
static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
{
struct device *dev = ddata->dev;
int error;
- /* Disable target module if it is enabled */
if (ddata->enabled) {
+ /* Nothing to do if enabled and context not lost */
+ error = sysc_check_context(ddata);
+ if (!error)
+ return 0;
+
+ /* Disable target module if it is enabled */
error = sysc_runtime_suspend(dev);
if (error)
dev_warn(dev, "reinit suspend failed: %i\n", error);
@@ -1353,6 +1441,15 @@ static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
if (error)
dev_warn(dev, "reinit resume failed: %i\n", error);
+ /* Some modules like am335x gpmc need reset and restore of sysconfig */
+ if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_warn(dev, "reinit reset failed: %i\n", error);
+
+ sysc_write_sysconfig(ddata, ddata->sysconfig);
+ }
+
if (leave_enabled)
return error;
@@ -1442,10 +1539,6 @@ struct sysc_revision_quirk {
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
- SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1479,7 +1572,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET),
SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
@@ -1515,10 +1611,11 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
- 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
- SYSC_QUIRK_REINIT_ON_RESUME),
+ SYSC_QUIRK_REINIT_ON_CTX_LOST),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* PRUSS on am3, am4 and am5 */
@@ -1583,6 +1680,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
@@ -1874,6 +1972,22 @@ static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
sysc_quirk_rtc(ddata, true);
}
+/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
+static void sysc_module_enable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+
+ sysc_write(ddata, offset, 0);
+}
+
+static void sysc_module_disable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+ u32 val = BIT(0); /* ENABLEFORCE */
+
+ sysc_write(ddata, offset, val);
+}
+
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
{
@@ -1956,6 +2070,11 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
+ ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
+ }
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -2401,6 +2520,78 @@ static struct dev_pm_domain sysc_child_pm_domain = {
}
};
+/* Caller needs to take list_lock if ever used outside of cpu_pm */
+static void sysc_reinit_modules(struct sysc_soc_info *soc)
+{
+ struct sysc_module *module;
+ struct list_head *pos;
+ struct sysc *ddata;
+
+ list_for_each(pos, &sysc_soc->restored_modules) {
+ module = list_entry(pos, struct sysc_module, node);
+ ddata = module->ddata;
+ sysc_reinit_module(ddata, ddata->enabled);
+ }
+}
+
+/**
+ * sysc_context_notifier - optionally reset and restore module after idle
+ * @nb: notifier block
+ * @cmd: unused
+ * @v: unused
+ *
+ * Some interconnect target modules need to be restored, or reset and restored
+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
+ * OTG and GPMC target modules even if the modules are unused.
+ */
+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct sysc_soc_info *soc;
+
+ soc = container_of(nb, struct sysc_soc_info, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ sysc_reinit_modules(soc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * sysc_add_restored - optionally add reset and restore quirk hanlling
+ * @ddata: device data
+ */
+static void sysc_add_restored(struct sysc *ddata)
+{
+ struct sysc_module *restored_module;
+
+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
+ if (!restored_module)
+ return;
+
+ restored_module->ddata = ddata;
+
+ mutex_lock(&sysc_soc->list_lock);
+
+ list_add(&restored_module->node, &sysc_soc->restored_modules);
+
+ if (sysc_soc->nb.notifier_call)
+ goto out_unlock;
+
+ sysc_soc->nb.notifier_call = sysc_context_notifier;
+ cpu_pm_register_notifier(&sysc_soc->nb);
+
+out_unlock:
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
/**
* sysc_legacy_idle_quirk - handle children in omap_device compatible way
* @ddata: device driver data
@@ -2900,12 +3091,14 @@ static int sysc_add_disabled(unsigned long base)
}
/*
- * One time init to detect the booted SoC and disable unavailable features.
+ * One time init to detect the booted SoC, disable unavailable features
+ * and initialize list for optional cpu_pm notifier.
+ *
* Note that we initialize static data shared across all ti-sysc instances
* so ddata is only used for SoC type. This can be called from module_init
* once we no longer need to rely on platform data.
*/
-static int sysc_init_soc(struct sysc *ddata)
+static int sysc_init_static_data(struct sysc *ddata)
{
const struct soc_device_attribute *match;
struct ti_sysc_platform_data *pdata;
@@ -2921,6 +3114,7 @@ static int sysc_init_soc(struct sysc *ddata)
mutex_init(&sysc_soc->list_lock);
INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ INIT_LIST_HEAD(&sysc_soc->restored_modules);
sysc_soc->general_purpose = true;
pdata = dev_get_platdata(ddata->dev);
@@ -2985,15 +3179,24 @@ static int sysc_init_soc(struct sysc *ddata)
return 0;
}
-static void sysc_cleanup_soc(void)
+static void sysc_cleanup_static_data(void)
{
+ struct sysc_module *restored_module;
struct sysc_address *disabled_module;
struct list_head *pos, *tmp;
if (!sysc_soc)
return;
+ if (sysc_soc->nb.notifier_call)
+ cpu_pm_unregister_notifier(&sysc_soc->nb);
+
mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
+ restored_module = list_entry(pos, struct sysc_module, node);
+ list_del(pos);
+ kfree(restored_module);
+ }
list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
disabled_module = list_entry(pos, struct sysc_address, node);
list_del(pos);
@@ -3061,7 +3264,7 @@ static int sysc_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- error = sysc_init_soc(ddata);
+ error = sysc_init_static_data(ddata);
if (error)
return error;
@@ -3159,6 +3362,9 @@ static int sysc_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
+ sysc_add_restored(ddata);
+
return 0;
err:
@@ -3240,7 +3446,7 @@ static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
- sysc_cleanup_soc();
+ sysc_cleanup_static_data();
}
module_exit(sysc_exit);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index bd2e5b1560f5..9877e413fce3 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -344,6 +344,12 @@ static void cdrom_sysctl_register(void);
static LIST_HEAD(cdrom_list);
+static void signal_media_change(struct cdrom_device_info *cdi)
+{
+ cdi->mc_flags = 0x3; /* set media changed bits, on both queues */
+ cdi->last_media_change_ms = ktime_to_ms(ktime_get());
+}
+
int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
@@ -616,6 +622,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdi->options = CDO_USE_FFLAGS;
+ cdi->last_media_change_ms = ktime_to_ms(ktime_get());
if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= (int) CDO_AUTO_CLOSE;
@@ -864,7 +871,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[32];
- int ret, mmc3_profile;
+ int mmc3_profile;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
@@ -874,7 +881,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
cgc.quiet = 1;
- if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ if (cdi->ops->generic_packet(cdi, &cgc))
mmc3_profile = 0xffff;
else
mmc3_profile = (buffer[6] << 8) | buffer[7];
@@ -1421,8 +1428,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
cdi->ops->check_events(cdi, 0, slot);
if (slot == CDSL_NONE) {
- /* set media changed bits, on both queues */
- cdi->mc_flags = 0x3;
+ signal_media_change(cdi);
return cdrom_load_unload(cdi, -1);
}
@@ -1455,7 +1461,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
slot = curslot;
/* set media changed bits on both queues */
- cdi->mc_flags = 0x3;
+ signal_media_change(cdi);
if ((ret = cdrom_load_unload(cdi, slot)))
return ret;
@@ -1521,7 +1527,7 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
cdi->ioctl_events = 0;
if (changed) {
- cdi->mc_flags = 0x3; /* set bit on both queues */
+ signal_media_change(cdi);
ret |= 1;
cdi->media_written = 0;
}
@@ -2336,6 +2342,49 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
return ret;
}
+/*
+ * Media change detection with timing information.
+ *
+ * arg is a pointer to a cdrom_timed_media_change_info struct.
+ * arg->last_media_change may be set by calling code to signal
+ * the timestamp (in ms) of the last known media change (by the caller).
+ * Upon successful return, ioctl call will set arg->last_media_change
+ * to the latest media change timestamp known by the kernel/driver
+ * and set arg->has_changed to 1 if that timestamp is more recent
+ * than the timestamp set by the caller.
+ */
+static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ int ret;
+ struct cdrom_timed_media_change_info __user *info;
+ struct cdrom_timed_media_change_info tmp_info;
+
+ if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+ return -ENOSYS;
+
+ info = (struct cdrom_timed_media_change_info __user *)arg;
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_TIMED_MEDIA_CHANGE\n");
+
+ ret = cdrom_ioctl_media_changed(cdi, CDSL_CURRENT);
+ if (ret < 0)
+ return ret;
+
+ if (copy_from_user(&tmp_info, info, sizeof(tmp_info)) != 0)
+ return -EFAULT;
+
+ tmp_info.media_flags = 0;
+ if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
+ tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+
+ tmp_info.last_media_change = cdi->last_media_change_ms;
+
+ if (copy_to_user(info, &tmp_info, sizeof(*info)) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
@@ -3313,6 +3362,8 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
return cdrom_ioctl_eject_sw(cdi, arg);
case CDROM_MEDIA_CHANGED:
return cdrom_ioctl_media_changed(cdi, arg);
+ case CDROM_TIMED_MEDIA_CHANGE:
+ return cdrom_ioctl_timed_media_change(cdi, arg);
case CDROM_SET_OPTIONS:
return cdrom_ioctl_set_options(cdi, arg);
case CDROM_CLEAR_OPTIONS:
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 8e1fe75af93f..d50cc1fd34d5 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -805,9 +805,14 @@ static int probe_gdrom(struct platform_device *devptr)
err = -ENOMEM;
goto probe_fail_free_irqs;
}
- add_disk(gd.disk);
+ err = add_disk(gd.disk);
+ if (err)
+ goto probe_fail_add_disk;
+
return 0;
+probe_fail_add_disk:
+ kfree(gd.toc);
probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 239eca4d6805..814b3d0ca7b7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -63,7 +63,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on ARCH_AT91 && HAVE_CLK && OF
+ depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -87,7 +87,7 @@ config HW_RANDOM_BA431
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
- ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -100,7 +100,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200
@@ -165,7 +165,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -179,7 +179,7 @@ config HW_RANDOM_OMAP
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
- depends on ARCH_OMAP3
+ depends on ARCH_OMAP3 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -298,7 +298,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
- depends on ARCH_NOMADIK
+ depends on ARCH_NOMADIK || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index 188854dd16a9..7df5e9f7519d 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -42,13 +42,11 @@ static int ixp4xx_rng_probe(struct platform_device *pdev)
{
void __iomem * rng_base;
struct device *dev = &pdev->dev;
- struct resource *res;
if (!cpu_is_ixp46x()) /* includes IXP455 */
return -ENOSYS;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng_base = devm_ioremap_resource(dev, res);
+ rng_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng_base))
return PTR_ERR(rng_base);
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index e446236e81f2..8bb30282ca46 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -54,9 +54,10 @@ static int meson_rng_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->core_clk = devm_clk_get(dev, "core");
+ data->core_clk = devm_clk_get_optional(dev, "core");
if (IS_ERR(data->core_clk))
- data->core_clk = NULL;
+ return dev_err_probe(dev, PTR_ERR(data->core_clk),
+ "Failed to get core clock\n");
if (data->core_clk) {
ret = clk_prepare_enable(data->core_clk);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8ad7b515a51b..6c00ea008555 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -166,8 +166,13 @@ static int mtk_rng_runtime_resume(struct device *dev)
return mtk_rng_init(&priv->rng);
}
-static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend,
- mtk_rng_runtime_resume, NULL);
+static const struct dev_pm_ops mtk_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend,
+ mtk_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
#define MTK_RNG_PM_OPS (&mtk_rng_pm_ops)
#else /* CONFIG_PM */
#define MTK_RNG_PM_OPS NULL
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 7c673afd7241..2beaa35c0d74 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -111,7 +111,7 @@ static ssize_t trng_counter_show(struct device *dev,
#if IS_ENABLED(CONFIG_ARCH_RANDOM)
u64 arch_counter = atomic64_read(&s390_arch_random_counter);
- return snprintf(buf, PAGE_SIZE,
+ return sysfs_emit(buf,
"trng: %llu\n"
"hwrng: %llu\n"
"arch: %llu\n"
@@ -119,7 +119,7 @@ static ssize_t trng_counter_show(struct device *dev,
dev_counter, hwrng_counter, arch_counter,
dev_counter + hwrng_counter + arch_counter);
#else
- return snprintf(buf, PAGE_SIZE,
+ return sysfs_emit(buf,
"trng: %llu\n"
"hwrng: %llu\n"
"total: %llu\n",
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index a90001e02bf7..0a7dde135db1 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -18,13 +18,20 @@ static DEFINE_IDA(rng_index_ida);
struct virtrng_info {
struct hwrng hwrng;
struct virtqueue *vq;
- struct completion have_data;
char name[25];
- unsigned int data_avail;
int index;
- bool busy;
bool hwrng_register_done;
bool hwrng_removed;
+ /* data transfer */
+ struct completion have_data;
+ unsigned int data_avail;
+ unsigned int data_idx;
+ /* minimal size returned by rng_buffer_size() */
+#if SMP_CACHE_BYTES < 32
+ u8 data[32];
+#else
+ u8 data[SMP_CACHE_BYTES];
+#endif
};
static void random_recv_done(struct virtqueue *vq)
@@ -35,54 +42,88 @@ static void random_recv_done(struct virtqueue *vq)
if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
return;
+ vi->data_idx = 0;
+
complete(&vi->have_data);
}
-/* The host will fill any buffer we give it with sweet, sweet randomness. */
-static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
+static void request_entropy(struct virtrng_info *vi)
{
struct scatterlist sg;
- sg_init_one(&sg, buf, size);
+ reinit_completion(&vi->have_data);
+ vi->data_avail = 0;
+ vi->data_idx = 0;
+
+ sg_init_one(&sg, vi->data, sizeof(vi->data));
/* There should always be room for one buffer. */
- virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
+ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
virtqueue_kick(vi->vq);
}
+static unsigned int copy_data(struct virtrng_info *vi, void *buf,
+ unsigned int size)
+{
+ size = min_t(unsigned int, size, vi->data_avail);
+ memcpy(buf, vi->data + vi->data_idx, size);
+ vi->data_idx += size;
+ vi->data_avail -= size;
+ if (vi->data_avail == 0)
+ request_entropy(vi);
+ return size;
+}
+
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+ unsigned int chunk;
+ size_t read;
if (vi->hwrng_removed)
return -ENODEV;
- if (!vi->busy) {
- vi->busy = true;
- reinit_completion(&vi->have_data);
- register_buffer(vi, buf, size);
+ read = 0;
+
+ /* copy available data */
+ if (vi->data_avail) {
+ chunk = copy_data(vi, buf, size);
+ size -= chunk;
+ read += chunk;
}
if (!wait)
- return 0;
-
- ret = wait_for_completion_killable(&vi->have_data);
- if (ret < 0)
- return ret;
+ return read;
+
+ /* We have already copied available entropy,
+ * so either size is 0 or data_avail is 0
+ */
+ while (size != 0) {
+ /* data_avail is 0 but a request is pending */
+ ret = wait_for_completion_killable(&vi->have_data);
+ if (ret < 0)
+ return ret;
+ /* if vi->data_avail is 0, we have been interrupted
+ * by a cleanup, but buffer stays in the queue
+ */
+ if (vi->data_avail == 0)
+ return read;
- vi->busy = false;
+ chunk = copy_data(vi, buf + read, size);
+ size -= chunk;
+ read += chunk;
+ }
- return vi->data_avail;
+ return read;
}
static void virtio_cleanup(struct hwrng *rng)
{
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
- if (vi->busy)
- wait_for_completion(&vi->have_data);
+ complete(&vi->have_data);
}
static int probe_common(struct virtio_device *vdev)
@@ -118,6 +159,9 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
+ /* we always have a pending entropy request */
+ request_entropy(vi);
+
return 0;
err_find:
@@ -133,9 +177,9 @@ static void remove_common(struct virtio_device *vdev)
vi->hwrng_removed = true;
vi->data_avail = 0;
+ vi->data_idx = 0;
complete(&vi->have_data);
vdev->config->reset(vdev);
- vi->busy = false;
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 249b31197eea..b061e6b513ed 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -69,12 +69,21 @@ config IPMI_SI
config IPMI_SSIF
tristate 'IPMI SMBus handler (SSIF)'
- select I2C
+ depends on I2C
help
Provides a driver for a SMBus interface to a BMC, meaning that you
have a driver that must be accessed over an I2C bus instead of a
standard interface. This module requires I2C support.
+config IPMI_IPMB
+ tristate 'IPMI IPMB interface'
+ depends on I2C && I2C_SLAVE
+ help
+ Provides a driver for a system running right on the IPMB bus.
+ It supports normal system interface messages to a BMC on the IPMB
+ bus, and it also supports direct messaging on the bus using
+ IPMB direct messages. This module requires I2C support.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 84f47d18007f..7ce790efad92 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IPMI_SI) += ipmi_si.o
obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
obj-$(CONFIG_IPMI_PLAT_DATA) += ipmi_plat_data.o
obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_IPMB) += ipmi_ipmb.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6e3d247b55d1..7450904e330a 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -8,13 +8,11 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
-#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/timer.h>
@@ -59,8 +57,7 @@
struct bt_bmc {
struct device dev;
struct miscdevice miscdev;
- struct regmap *map;
- int offset;
+ void __iomem *base;
int irq;
wait_queue_head_t queue;
struct timer_list poll_timer;
@@ -69,29 +66,14 @@ struct bt_bmc {
static atomic_t open_count = ATOMIC_INIT(0);
-static const struct regmap_config bt_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
{
- uint32_t val = 0;
- int rc;
-
- rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val);
- WARN(rc != 0, "regmap_read() failed: %d\n", rc);
-
- return rc == 0 ? (u8) val : 0;
+ return readb(bt_bmc->base + reg);
}
static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
{
- int rc;
-
- rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data);
- WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+ writeb(data, bt_bmc->base + reg);
}
static void clr_rd_ptr(struct bt_bmc *bt_bmc)
@@ -376,18 +358,15 @@ static irqreturn_t bt_bmc_irq(int irq, void *arg)
{
struct bt_bmc *bt_bmc = arg;
u32 reg;
- int rc;
- rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, &reg);
- if (rc)
- return IRQ_NONE;
+ reg = readl(bt_bmc->base + BT_CR2);
reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
if (!reg)
return IRQ_NONE;
/* ack pending IRQs */
- regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg);
+ writel(reg, bt_bmc->base + BT_CR2);
wake_up(&bt_bmc->queue);
return IRQ_HANDLED;
@@ -398,6 +377,7 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
{
struct device *dev = &pdev->dev;
int rc;
+ u32 reg;
bt_bmc->irq = platform_get_irq_optional(pdev, 0);
if (bt_bmc->irq < 0)
@@ -417,11 +397,11 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
* will be cleared (along with B2H) when we can write the next
* message to the BT buffer
*/
- rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1,
- (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY),
- (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY));
+ reg = readl(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ writel(reg, bt_bmc->base + BT_CR1);
- return rc;
+ return 0;
}
static int bt_bmc_probe(struct platform_device *pdev)
@@ -439,25 +419,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, bt_bmc);
- bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(bt_bmc->map)) {
- void __iomem *base;
-
- /*
- * Assume it's not the MFD-based devicetree description, in
- * which case generate a regmap ourselves
- */
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg);
- bt_bmc->offset = 0;
- } else {
- rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset);
- if (rc)
- return rc;
- }
+ bt_bmc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
mutex_init(&bt_bmc->mutex);
init_waitqueue_head(&bt_bmc->queue);
@@ -483,12 +447,12 @@ static int bt_bmc_probe(struct platform_device *pdev)
add_timer(&bt_bmc->poll_timer);
}
- regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0,
- (BT_IO_BASE << BT_CR0_IO_BASE) |
+ writel((BT_IO_BASE << BT_CR0_IO_BASE) |
(BT_IRQ << BT_CR0_IRQ) |
BT_CR0_EN_CLR_SLV_RDP |
BT_CR0_EN_CLR_SLV_WRP |
- BT_CR0_ENABLE_IBT);
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
clr_b_busy(bt_bmc);
@@ -508,6 +472,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
static const struct of_device_id bt_bmc_match[] = {
{ .compatible = "aspeed,ast2400-ibt-bmc" },
{ .compatible = "aspeed,ast2500-ibt-bmc" },
+ { .compatible = "aspeed,ast2600-ibt-bmc" },
{ },
};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 3dd1d5abb298..d160fa4c73fe 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -247,11 +247,13 @@ static int handle_recv(struct ipmi_file_private *priv,
if (msg->msg.data_len > 0) {
if (rsp->msg.data_len < msg->msg.data_len) {
- rv2 = -EMSGSIZE;
- if (trunc)
+ if (trunc) {
+ rv2 = -EMSGSIZE;
msg->msg.data_len = rsp->msg.data_len;
- else
+ } else {
+ rv = -EMSGSIZE;
goto recv_putback_on_err;
+ }
}
if (copy_to_user(rsp->msg.data,
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
new file mode 100644
index 000000000000..ba0c2d2c6bbe
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Driver to talk to a remote management controller on IPMB.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/ipmi_msgdefs.h>
+#include <linux/ipmi_smi.h>
+
+#define DEVICE_NAME "ipmi-ipmb"
+
+static int bmcaddr = 0x20;
+module_param(bmcaddr, int, 0644);
+MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+
+static unsigned int retry_time_ms = 250;
+module_param(retry_time_ms, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
+
+static unsigned int max_retries = 1;
+module_param(max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out.");
+
+/* Add room for the two slave addresses, two checksums, and rqSeq. */
+#define IPMB_MAX_MSG_LEN (IPMI_MAX_MSG_LENGTH + 5)
+
+struct ipmi_ipmb_dev {
+ struct ipmi_smi *intf;
+ struct i2c_client *client;
+
+ struct ipmi_smi_handlers handlers;
+
+ bool ready;
+
+ u8 curr_seq;
+
+ u8 bmcaddr;
+ u32 retry_time_ms;
+ u32 max_retries;
+
+ struct ipmi_smi_msg *next_msg;
+ struct ipmi_smi_msg *working_msg;
+
+ /* Transmit thread. */
+ struct task_struct *thread;
+ struct semaphore wake_thread;
+ struct semaphore got_rsp;
+ spinlock_t lock;
+ bool stopping;
+
+ u8 xmitmsg[IPMB_MAX_MSG_LEN];
+ unsigned int xmitlen;
+
+ u8 rcvmsg[IPMB_MAX_MSG_LEN];
+ unsigned int rcvlen;
+ bool overrun;
+};
+
+static bool valid_ipmb(struct ipmi_ipmb_dev *iidev)
+{
+ u8 *msg = iidev->rcvmsg;
+ u8 netfn;
+
+ if (iidev->overrun)
+ return false;
+
+ /* Minimum message size. */
+ if (iidev->rcvlen < 7)
+ return false;
+
+ /* Is it a response? */
+ netfn = msg[1] >> 2;
+ if (netfn & 1) {
+ /* Response messages have an added completion code. */
+ if (iidev->rcvlen < 8)
+ return false;
+ }
+
+ if (ipmb_checksum(msg, 3) != 0)
+ return false;
+ if (ipmb_checksum(msg + 3, iidev->rcvlen - 3) != 0)
+ return false;
+
+ return true;
+}
+
+static void ipmi_ipmb_check_msg_done(struct ipmi_ipmb_dev *iidev)
+{
+ struct ipmi_smi_msg *imsg = NULL;
+ u8 *msg = iidev->rcvmsg;
+ bool is_cmd;
+ unsigned long flags;
+
+ if (iidev->rcvlen == 0)
+ return;
+ if (!valid_ipmb(iidev))
+ goto done;
+
+ is_cmd = ((msg[1] >> 2) & 1) == 0;
+
+ if (is_cmd) {
+ /* Ignore commands until we are up. */
+ if (!iidev->ready)
+ goto done;
+
+ /* It's a command, allocate a message for it. */
+ imsg = ipmi_alloc_smi_msg();
+ if (!imsg)
+ goto done;
+ imsg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ imsg->data_size = 0;
+ } else {
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->working_msg) {
+ u8 seq = msg[4] >> 2;
+ bool xmit_rsp = (iidev->working_msg->data[0] >> 2) & 1;
+
+ /*
+ * Responses should carry the sequence we sent
+ * them with. If it's a transmitted response,
+ * ignore it. And if the message hasn't been
+ * transmitted, ignore it.
+ */
+ if (!xmit_rsp && seq == iidev->curr_seq) {
+ iidev->curr_seq = (iidev->curr_seq + 1) & 0x3f;
+
+ imsg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ }
+
+ if (!imsg)
+ goto done;
+
+ if (imsg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Keep the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 3, iidev->rcvlen - 4);
+ imsg->rsp_size = iidev->rcvlen - 3;
+ } else {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Skip the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 5, iidev->rcvlen - 6);
+ imsg->rsp_size = iidev->rcvlen - 5;
+ }
+ ipmi_smi_msg_received(iidev->intf, imsg);
+ if (!is_cmd)
+ up(&iidev->got_rsp);
+
+done:
+ iidev->overrun = false;
+ iidev->rcvlen = 0;
+}
+
+/*
+ * The IPMB protocol only supports i2c writes so there is no need to
+ * support I2C_SLAVE_READ* events, except to know if the other end has
+ * issued a read without going to stop mode.
+ */
+static int ipmi_ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ ipmi_ipmb_check_msg_done(iidev);
+ /*
+ * First byte is the slave address, to ease the checksum
+ * calculation.
+ */
+ iidev->rcvmsg[0] = client->addr << 1;
+ iidev->rcvlen = 1;
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (iidev->rcvlen >= sizeof(iidev->rcvmsg))
+ iidev->overrun = true;
+ else
+ iidev->rcvmsg[iidev->rcvlen++] = *val;
+ break;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_STOP:
+ ipmi_ipmb_check_msg_done(iidev);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ break;
+ }
+
+ return 0;
+}
+
+static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg, u8 cc)
+{
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response being sent, we needto return a
+ * response response. Fake a send msg command
+ * response with channel 0. This will always be ipmb
+ * direct.
+ */
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST | 1) << 2;
+ msg->data[3] = IPMI_SEND_MSG_CMD;
+ msg->data[4] = cc;
+ msg->data_size = 5;
+ }
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = msg->data[2];
+ msg->rsp[3] = msg->data[3];
+ msg->rsp[4] = cc;
+ msg->rsp_size = 5;
+ } else {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cc;
+ msg->rsp_size = 3;
+ }
+ ipmi_smi_msg_received(iidev->intf, msg);
+}
+
+static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ iidev->xmitmsg[0] = msg->data[1];
+ iidev->xmitmsg[1] = msg->data[0];
+ memcpy(iidev->xmitmsg + 4, msg->data + 2, msg->data_size - 2);
+ iidev->xmitlen = msg->data_size + 2;
+ } else {
+ iidev->xmitmsg[0] = iidev->bmcaddr;
+ iidev->xmitmsg[1] = msg->data[0];
+ iidev->xmitmsg[4] = 0;
+ memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1);
+ iidev->xmitlen = msg->data_size + 4;
+ }
+ iidev->xmitmsg[3] = iidev->client->addr << 1;
+ if (((msg->data[0] >> 2) & 1) == 0)
+ /* If it's a command, put in our own sequence number. */
+ iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) |
+ (iidev->curr_seq << 2));
+
+ /* Now add on the final checksums. */
+ iidev->xmitmsg[2] = ipmb_checksum(iidev->xmitmsg, 2);
+ iidev->xmitmsg[iidev->xmitlen] =
+ ipmb_checksum(iidev->xmitmsg + 3, iidev->xmitlen - 3);
+ iidev->xmitlen++;
+}
+
+static int ipmi_ipmb_thread(void *data)
+{
+ struct ipmi_ipmb_dev *iidev = data;
+
+ while (!kthread_should_stop()) {
+ long ret;
+ struct i2c_msg i2c_msg;
+ struct ipmi_smi_msg *msg = NULL;
+ unsigned long flags;
+ unsigned int retries = 0;
+
+ /* Wait for a message to send */
+ ret = down_interruptible(&iidev->wake_thread);
+ if (iidev->stopping)
+ break;
+ if (ret)
+ continue;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->next_msg) {
+ msg = iidev->next_msg;
+ iidev->next_msg = NULL;
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ if (!msg)
+ continue;
+
+ ipmi_ipmb_format_for_xmit(iidev, msg);
+
+retry:
+ i2c_msg.len = iidev->xmitlen - 1;
+ if (i2c_msg.len > 32) {
+ ipmi_ipmb_send_response(iidev, msg,
+ IPMI_REQ_LEN_EXCEEDED_ERR);
+ continue;
+ }
+
+ i2c_msg.addr = iidev->xmitmsg[0] >> 1;
+ i2c_msg.flags = 0;
+ i2c_msg.buf = iidev->xmitmsg + 1;
+
+ /* Rely on i2c_transfer for a barrier. */
+ iidev->working_msg = msg;
+
+ ret = i2c_transfer(iidev->client->adapter, &i2c_msg, 1);
+
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response, nothing will be returned
+ * by the other end.
+ */
+
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg,
+ ret < 0 ? IPMI_BUS_ERR : 0);
+ continue;
+ }
+ if (ret < 0) {
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg, IPMI_BUS_ERR);
+ continue;
+ }
+
+ /* A command was sent, wait for its response. */
+ ret = down_timeout(&iidev->got_rsp,
+ msecs_to_jiffies(iidev->retry_time_ms));
+
+ /*
+ * Grab the message if we can. If the handler hasn't
+ * already handled it, the message will still be there.
+ */
+ spin_lock_irqsave(&iidev->lock, flags);
+ msg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ if (!msg && ret) {
+ /*
+ * If working_msg is not set and we timed out,
+ * that means the message grabbed by
+ * check_msg_done before we could grab it
+ * here. Wait again for check_msg_done to up
+ * the semaphore.
+ */
+ down(&iidev->got_rsp);
+ } else if (msg && ++retries <= iidev->max_retries) {
+ spin_lock_irqsave(&iidev->lock, flags);
+ iidev->working_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ goto retry;
+ }
+
+ if (msg)
+ ipmi_ipmb_send_response(iidev, msg, IPMI_TIMEOUT_ERR);
+ }
+
+ if (iidev->next_msg)
+ /* Return an unspecified error. */
+ ipmi_ipmb_send_response(iidev, iidev->next_msg, 0xff);
+
+ return 0;
+}
+
+static int ipmi_ipmb_start_processing(void *send_info,
+ struct ipmi_smi *new_intf)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ iidev->intf = new_intf;
+ iidev->ready = true;
+ return 0;
+}
+
+static void ipmi_ipmb_stop_thread(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->thread) {
+ struct task_struct *t = iidev->thread;
+
+ iidev->thread = NULL;
+ iidev->stopping = true;
+ up(&iidev->wake_thread);
+ up(&iidev->got_rsp);
+ kthread_stop(t);
+ }
+}
+
+static void ipmi_ipmb_shutdown(void *send_info)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ BUG_ON(iidev->next_msg);
+
+ iidev->next_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ up(&iidev->wake_thread);
+}
+
+static void ipmi_ipmb_request_events(void *send_info)
+{
+ /* We don't fetch events here. */
+}
+
+static int ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ if (iidev->client) {
+ iidev->client = NULL;
+ i2c_slave_unregister(client);
+ }
+ ipmi_ipmb_stop_thread(iidev);
+
+ return 0;
+}
+
+static int ipmi_ipmb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ipmi_ipmb_dev *iidev;
+ int rv;
+
+ iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL);
+ if (!iidev)
+ return -ENOMEM;
+
+ if (of_property_read_u8(dev->of_node, "bmcaddr", &iidev->bmcaddr) != 0)
+ iidev->bmcaddr = bmcaddr;
+ if (iidev->bmcaddr == 0 || iidev->bmcaddr & 1) {
+ /* Can't have the write bit set. */
+ dev_notice(&client->dev,
+ "Invalid bmc address value %2.2x\n", iidev->bmcaddr);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dev->of_node, "retry-time",
+ &iidev->retry_time_ms) != 0)
+ iidev->retry_time_ms = retry_time_ms;
+
+ if (of_property_read_u32(dev->of_node, "max-retries",
+ &iidev->max_retries) != 0)
+ iidev->max_retries = max_retries;
+
+ i2c_set_clientdata(client, iidev);
+ client->flags |= I2C_CLIENT_SLAVE;
+
+ rv = i2c_slave_register(client, ipmi_ipmb_slave_cb);
+ if (rv)
+ return rv;
+
+ iidev->client = client;
+
+ iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT;
+ iidev->handlers.start_processing = ipmi_ipmb_start_processing;
+ iidev->handlers.shutdown = ipmi_ipmb_shutdown;
+ iidev->handlers.sender = ipmi_ipmb_sender;
+ iidev->handlers.request_events = ipmi_ipmb_request_events;
+
+ spin_lock_init(&iidev->lock);
+ sema_init(&iidev->wake_thread, 0);
+ sema_init(&iidev->got_rsp, 0);
+
+ iidev->thread = kthread_run(ipmi_ipmb_thread, iidev,
+ "kipmb%4.4x", client->addr);
+ if (IS_ERR(iidev->thread)) {
+ rv = PTR_ERR(iidev->thread);
+ dev_notice(&client->dev,
+ "Could not start kernel thread: error %d\n", rv);
+ goto out_err;
+ }
+
+ rv = ipmi_register_smi(&iidev->handlers,
+ iidev,
+ &client->dev,
+ iidev->bmcaddr);
+ if (rv)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipmi_ipmb_remove(client);
+ return rv;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_ipmb_match[] = {
+ { .type = "ipmi", .compatible = DEVICE_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
+#else
+#define of_ipmi_ipmb_match NULL
+#endif
+
+static const struct i2c_device_id ipmi_ipmb_id[] = {
+ { DEVICE_NAME, 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
+
+static struct i2c_driver ipmi_ipmb_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_ipmb_match,
+ },
+ .probe = ipmi_ipmb_probe,
+ .remove = ipmi_ipmb_remove,
+ .id_table = ipmi_ipmb_id,
+};
+module_i2c_driver(ipmi_ipmb_driver);
+
+MODULE_AUTHOR("Corey Minyard");
+MODULE_DESCRIPTION("IPMI IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e96cb5c4f97a..deed355422f4 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -653,6 +653,11 @@ static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
+static int is_ipmb_direct_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
+}
+
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
@@ -805,6 +810,17 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
+ if (is_ipmb_direct_addr(addr1)) {
+ struct ipmi_ipmb_direct_addr *daddr1
+ = (struct ipmi_ipmb_direct_addr *) addr1;
+ struct ipmi_ipmb_direct_addr *daddr2
+ = (struct ipmi_ipmb_direct_addr *) addr2;
+
+ return daddr1->slave_addr == daddr2->slave_addr &&
+ daddr1->rq_lun == daddr2->rq_lun &&
+ daddr1->rs_lun == daddr2->rs_lun;
+ }
+
if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
@@ -843,6 +859,23 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
return 0;
}
+ if (is_ipmb_direct_addr(addr)) {
+ struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
+
+ if (addr->channel != 0)
+ return -EINVAL;
+ if (len < sizeof(struct ipmi_ipmb_direct_addr))
+ return -EINVAL;
+
+ if (daddr->slave_addr & 0x01)
+ return -EINVAL;
+ if (daddr->rq_lun >= 4)
+ return -EINVAL;
+ if (daddr->rs_lun >= 4)
+ return -EINVAL;
+ return 0;
+ }
+
if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
@@ -862,6 +895,9 @@ unsigned int ipmi_addr_length(int addr_type)
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
return sizeof(struct ipmi_ipmb_addr);
+ if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
+ return sizeof(struct ipmi_ipmb_direct_addr);
+
if (addr_type == IPMI_LAN_ADDR_TYPE)
return sizeof(struct ipmi_lan_addr);
@@ -1710,7 +1746,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
-static unsigned char
+unsigned char
ipmb_checksum(unsigned char *data, int size)
{
unsigned char csum = 0;
@@ -1720,6 +1756,7 @@ ipmb_checksum(unsigned char *data, int size)
return -csum;
}
+EXPORT_SYMBOL(ipmb_checksum);
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
struct kernel_ipmi_msg *msg,
@@ -2051,6 +2088,58 @@ out_err:
return rv;
}
+static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun)
+{
+ struct ipmi_ipmb_direct_addr *daddr;
+ bool is_cmd = !(recv_msg->msg.netfn & 0x1);
+
+ if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
+ return -EAFNOSUPPORT;
+
+ /* Responses must have a completion code. */
+ if (!is_cmd && msg->data_len < 1) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ daddr = (struct ipmi_ipmb_direct_addr *) addr;
+ if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ smi_msg->msgid = msgid;
+
+ if (is_cmd) {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
+ } else {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
+ }
+ smi_msg->data[1] = daddr->slave_addr;
+ smi_msg->data[3] = msg->cmd;
+
+ memcpy(smi_msg->data + 4, msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 4;
+
+ smi_msg->user_data = recv_msg;
+
+ return 0;
+}
+
static int i_ipmi_req_lan(struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
@@ -2240,6 +2329,9 @@ static int i_ipmi_request(struct ipmi_user *user,
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
source_address, source_lun,
retries, retry_time_ms);
+ } else if (is_ipmb_direct_addr(addr)) {
+ rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
+ recv_msg, source_lun);
} else if (is_lan_addr(addr)) {
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
source_lun, retries, retry_time_ms);
@@ -2369,6 +2461,13 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
return;
}
+ if (msg->msg.data[0]) {
+ dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
+ msg->msg.data[0]);
+ intf->bmc->dyn_id_set = 0;
+ goto out;
+ }
+
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
if (rv) {
@@ -2384,7 +2483,7 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
smp_wmb();
intf->bmc->dyn_id_set = 1;
}
-
+out:
wake_up(&intf->waitq);
}
@@ -2617,7 +2716,7 @@ static ssize_t device_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "%u\n", id.device_id);
+ return sysfs_emit(buf, "%u\n", id.device_id);
}
static DEVICE_ATTR_RO(device_id);
@@ -2633,7 +2732,7 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
+ return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
}
static DEVICE_ATTR_RO(provides_device_sdrs);
@@ -2648,7 +2747,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
if (rv)
return rv;
- return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
+ return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
}
static DEVICE_ATTR_RO(revision);
@@ -2664,7 +2763,7 @@ static ssize_t firmware_revision_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
+ return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
id.firmware_revision_2);
}
static DEVICE_ATTR_RO(firmware_revision);
@@ -2681,7 +2780,7 @@ static ssize_t ipmi_version_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "%u.%u\n",
+ return sysfs_emit(buf, "%u.%u\n",
ipmi_version_major(&id),
ipmi_version_minor(&id));
}
@@ -2699,7 +2798,7 @@ static ssize_t add_dev_support_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
+ return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
}
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
@@ -2716,7 +2815,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
+ return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
}
static DEVICE_ATTR_RO(manufacturer_id);
@@ -2732,7 +2831,7 @@ static ssize_t product_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
+ return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
}
static DEVICE_ATTR_RO(product_id);
@@ -2748,7 +2847,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
id.aux_firmware_revision[3],
id.aux_firmware_revision[2],
id.aux_firmware_revision[1],
@@ -2770,7 +2869,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
if (!guid_set)
return -ENOENT;
- return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
+ return sysfs_emit(buf, "%pUl\n", &guid);
}
static DEVICE_ATTR_RO(guid);
@@ -3794,6 +3893,123 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
return rv;
}
+static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_direct_addr *daddr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned char netfn = msg->rsp[0] >> 2;
+ unsigned char cmd = msg->rsp[3];
+
+ rcu_read_lock();
+ /* We always use channel 0 for direct messages. */
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
+ msg->data[1] = msg->rsp[2];
+ msg->data[2] = msg->rsp[4] & ~0x3;
+ msg->data[3] = cmd;
+ msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data_size = 5;
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_ipmb_direct_addr *daddr;
+
+ recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rq_lun = msg->rsp[0] & 3;
+ daddr->rs_lun = msg->rsp[2] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
@@ -4219,18 +4435,40 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
- int requeue;
+ int requeue = 0;
int chan;
+ unsigned char cc;
+ bool is_cmd = !((msg->rsp[0] >> 2) & 1);
pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
- if ((msg->data_size >= 2)
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+return_unspecified:
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ /* commands must have at least 3 bytes, responses 4. */
+ if (is_cmd && (msg->rsp_size < 3)) {
+ ipmi_inc_stat(intf, invalid_commands);
+ goto out;
+ }
+ if (!is_cmd && (msg->rsp_size < 4))
+ goto return_unspecified;
+ } else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
if (intf->in_shutdown)
- goto free_msg;
+ goto out;
/*
* This is the local response to a command send, start
@@ -4265,21 +4503,6 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
} else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
-free_msg:
- requeue = 0;
- goto out;
-
- } else if (msg->rsp_size < 2) {
- /* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
-
- /* Generate an error response for the message. */
- msg->rsp[0] = msg->data[0] | (1 << 2);
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|| (msg->rsp[1] != msg->data[1])) {
/*
@@ -4291,39 +4514,46 @@ free_msg:
(msg->data[0] >> 2) | 1, msg->data[1],
msg->rsp[0] >> 2, msg->rsp[1]);
- /* Generate an error response for the message. */
- msg->rsp[0] = msg->data[0] | (1 << 2);
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
+ goto return_unspecified;
}
- if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
- && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ if ((msg->data[0] >> 2) & 1) {
+ /* It's a response to a sent response. */
+ chan = 0;
+ cc = msg->rsp[4];
+ goto process_response_response;
+ }
+ if (is_cmd)
+ requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
+ else
+ requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
*/
- struct ipmi_recv_msg *recv_msg = msg->user_data;
-
- requeue = 0;
- if (msg->rsp_size < 2)
- /* Message is too small to be correct. */
- goto out;
+ struct ipmi_recv_msg *recv_msg;
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
goto out;
+ cc = msg->rsp[2];
+process_response_response:
+ recv_msg = msg->user_data;
+
+ requeue = 0;
if (!recv_msg)
goto out;
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg_data[0] = cc;
recv_msg->msg.data_len = 1;
- recv_msg->msg_data[0] = msg->rsp[2];
deliver_local_response(intf, recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
@@ -4789,7 +5019,9 @@ static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
static void free_smi_msg(struct ipmi_smi_msg *msg)
{
atomic_dec(&smi_msg_inuse_count);
- kfree(msg);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
}
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
@@ -4808,7 +5040,9 @@ EXPORT_SYMBOL(ipmi_alloc_smi_msg);
static void free_recv_msg(struct ipmi_recv_msg *msg)
{
atomic_dec(&recv_msg_inuse_count);
- kfree(msg);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
}
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
@@ -4826,7 +5060,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user)
+ if (msg->user && !oops_in_progress)
kref_put(&msg->user->refcount, free_user);
msg->done(msg);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6f3272b58ced..64dedb3ef8ec 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1603,7 +1603,7 @@ static ssize_t name##_show(struct device *dev, \
{ \
struct smi_info *smi_info = dev_get_drvdata(dev); \
\
- return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
+ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
} \
static DEVICE_ATTR_RO(name)
@@ -1613,7 +1613,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
}
static DEVICE_ATTR_RO(type);
@@ -1624,7 +1624,7 @@ static ssize_t interrupts_enabled_show(struct device *dev,
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
- return snprintf(buf, 10, "%d\n", enabled);
+ return sysfs_emit(buf, "%d\n", enabled);
}
static DEVICE_ATTR_RO(interrupts_enabled);
@@ -1646,7 +1646,7 @@ static ssize_t params_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return snprintf(buf, 200,
+ return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi_info->io.si_type],
addr_space_to_str[smi_info->io.addr_space],
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 20d5af92966d..0c62e578749e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1190,7 +1190,7 @@ static ssize_t ipmi_##name##_show(struct device *dev, \
{ \
struct ssif_info *ssif_info = dev_get_drvdata(dev); \
\
- return snprintf(buf, 10, "%u\n", ssif_get_stat(ssif_info, name));\
+ return sysfs_emit(buf, "%u\n", ssif_get_stat(ssif_info, name));\
} \
static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
@@ -1198,7 +1198,7 @@ static ssize_t ipmi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, 10, "ssif\n");
+ return sysfs_emit(buf, "ssif\n");
}
static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index e4ff3b50de7f..883b4a341012 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -342,13 +342,17 @@ static atomic_t msg_tofree = ATOMIC_INIT(0);
static DECLARE_COMPLETION(msg_wait);
static void msg_free_smi(struct ipmi_smi_msg *msg)
{
- if (atomic_dec_and_test(&msg_tofree))
- complete(&msg_wait);
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
}
static void msg_free_recv(struct ipmi_recv_msg *msg)
{
- if (atomic_dec_and_test(&msg_tofree))
- complete(&msg_wait);
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
}
static struct ipmi_smi_msg smi_msg = {
.done = msg_free_smi
@@ -434,8 +438,10 @@ static int _ipmi_set_timeout(int do_heartbeat)
rv = __ipmi_set_timeout(&smi_msg,
&recv_msg,
&send_heartbeat_now);
- if (rv)
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
return rv;
+ }
wait_for_completion(&msg_wait);
@@ -497,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_inc(&panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -507,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_dec(&panic_done_count);
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -531,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_inc(&panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = __ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_dec(&panic_done_count);
+ atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
@@ -580,6 +586,7 @@ restart:
&recv_msg,
1);
if (rv) {
+ atomic_set(&msg_tofree, 0);
pr_warn("heartbeat send failure: %d\n", rv);
return rv;
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
index 7948cabde50b..7e2067628a6c 100644
--- a/drivers/char/ipmi/kcs_bmc_serio.c
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -73,10 +73,12 @@ static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
struct serio *port;
priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
/* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!(priv && port))
+ if (!port)
return -ENOMEM;
port->id.type = SERIO_8042;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1c596b5cdb27..cc296f0823bd 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -495,6 +495,10 @@ static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
written += n;
if (signal_pending(current))
return written ? written : -ERESTARTSYS;
+ if (!need_resched())
+ continue;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return written ? written : -EAGAIN;
cond_resched();
}
return written;
@@ -696,11 +700,11 @@ static const struct memdev {
#ifdef CONFIG_DEVMEM
[DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
#endif
- [3] = { "null", 0666, &null_fops, 0 },
+ [3] = { "null", 0666, &null_fops, FMODE_NOWAIT },
#ifdef CONFIG_DEVPORT
[4] = { "port", 0, &port_fops, 0 },
#endif
- [5] = { "zero", 0666, &zero_fops, 0 },
+ [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 },
[8] = { "random", 0666, &random_fops, 0 },
[9] = { "urandom", 0666, &urandom_fops, 0 },
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 8f1bce0b4fe5..adaec8fd4b16 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -116,8 +116,9 @@ struct cm4000_dev {
wait_queue_head_t atrq; /* wait for ATR valid */
wait_queue_head_t readq; /* used by write to wake blk.read */
- /* warning: do not move this fields.
+ /* warning: do not move this struct group.
* initialising to zero depends on it - see ZERO_DEV below. */
+ struct_group(init,
unsigned char atr_csum;
unsigned char atr_len_retry;
unsigned short atr_len;
@@ -140,12 +141,10 @@ struct cm4000_dev {
struct timer_list timer; /* used to keep monitor running */
int monitor_running;
+ );
};
-#define ZERO_DEV(dev) \
- memset(&dev->atr_csum,0, \
- sizeof(struct cm4000_dev) - \
- offsetof(struct cm4000_dev, atr_csum))
+#define ZERO_DEV(dev) memset(&((dev)->init), 0, sizeof((dev)->init))
static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index d6ba644f6b00..4a5516406c22 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -76,7 +76,7 @@ config TCG_TIS_SPI_CR50
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
- depends on ARCH_SYNQUACER
+ depends on ARCH_SYNQUACER || COMPILE_TEST
select TCG_TIS_CORE
help
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 784b8b3cb903..97e916856cf3 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -455,6 +455,9 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES)
return 0;
+ if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4)
+ return -EFAULT;
+
if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count))
return -EFAULT;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 69579efb247b..b2659a4c4016 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -48,6 +48,7 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
unsigned long timeout, wait_queue_head_t *queue,
bool check_cancel)
{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop;
long rc;
u8 status;
@@ -80,8 +81,8 @@ again:
}
} else {
do {
- usleep_range(TPM_TIMEOUT_USECS_MIN,
- TPM_TIMEOUT_USECS_MAX);
+ usleep_range(priv->timeout_min,
+ priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -945,7 +946,22 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+ priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ goto out_err;
+
+ priv->manufacturer_id = vendor;
+
+ if (priv->manufacturer_id == TPM_VID_ATML &&
+ !(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ priv->timeout_min = TIS_TIMEOUT_MIN_ATML;
+ priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
+ }
+
dev_set_drvdata(&chip->dev, priv);
if (is_bsw()) {
@@ -988,12 +1004,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (rc)
goto out_err;
- rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
- if (rc < 0)
- goto out_err;
-
- priv->manufacturer_id = vendor;
-
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
if (rc < 0)
goto out_err;
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index b2a3c6c72882..3be24f221e32 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,6 +54,8 @@ enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
+ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
/* Some timeout values are needed before it is known whether the chip is
@@ -98,6 +100,8 @@ struct tpm_tis_data {
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
unsigned short rng_quality;
+ unsigned int timeout_min; /* usecs */
+ unsigned int timeout_max; /* usecs */
};
struct tpm_tis_phy_ops {
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 54584b4b00d1..aaa59a00eeae 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -267,6 +267,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = {
{ "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
{ "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
{ "cr50", (unsigned long)cr50_spi_probe },
{}
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 7eaf303a7a86..660c5c388c29 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -28,6 +28,7 @@
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+#define VIRTCONS_MAX_PORTS 0x8000
/*
* This is a global struct for storing common data for all the devices
@@ -2036,6 +2037,14 @@ static int virtcons_probe(struct virtio_device *vdev)
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
struct virtio_console_config, max_nr_ports,
&portdev->max_nr_ports) == 0) {
+ if (portdev->max_nr_ports == 0 ||
+ portdev->max_nr_ports > VIRTCONS_MAX_PORTS) {
+ dev_err(&vdev->dev,
+ "Invalidate max_nr_ports %d",
+ portdev->max_nr_ports);
+ err = -EINVAL;
+ goto free;
+ }
multiport = true;
}
diff --git a/drivers/char/xillybus/xillybus.h b/drivers/char/xillybus/xillybus.h
index c63ffc56637c..51de7cbc579e 100644
--- a/drivers/char/xillybus/xillybus.h
+++ b/drivers/char/xillybus/xillybus.h
@@ -87,13 +87,8 @@ struct xilly_channel {
};
struct xilly_endpoint {
- /*
- * One of pdev and dev is always NULL, and the other is a valid
- * pointer, depending on the type of device
- */
- struct pci_dev *pdev;
struct device *dev;
- struct xilly_endpoint_hardware *ephw;
+ struct module *owner;
int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
__iomem void *registers;
@@ -113,25 +108,8 @@ struct xilly_endpoint {
unsigned int msg_buf_size;
};
-struct xilly_endpoint_hardware {
- struct module *owner;
- void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *,
- dma_addr_t,
- size_t,
- int);
- void (*hw_sync_sgl_for_device)(struct xilly_endpoint *,
- dma_addr_t,
- size_t,
- int);
- int (*map_single)(struct xilly_endpoint *,
- void *,
- size_t,
- int,
- dma_addr_t *);
-};
-
struct xilly_mapping {
- void *device;
+ struct device *device;
dma_addr_t dma_addr;
size_t size;
int direction;
@@ -139,10 +117,7 @@ struct xilly_mapping {
irqreturn_t xillybus_isr(int irq, void *data);
-struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
- struct device *dev,
- struct xilly_endpoint_hardware
- *ephw);
+struct xilly_endpoint *xillybus_init_endpoint(struct device *dev);
int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint);
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 931d0bf4cec6..11b7c4749274 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -122,10 +122,8 @@ irqreturn_t xillybus_isr(int irq, void *data)
buf = ep->msgbuf_addr;
buf_size = ep->msg_buf_size/sizeof(u32);
- ep->ephw->hw_sync_sgl_for_cpu(ep,
- ep->msgbuf_dma_addr,
- ep->msg_buf_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,
+ ep->msg_buf_size, DMA_FROM_DEVICE);
for (i = 0; i < buf_size; i += 2) {
if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
@@ -140,11 +138,10 @@ irqreturn_t xillybus_isr(int irq, void *data)
dev_err(ep->dev,
"Lost sync with interrupt messages. Stopping.\n");
} else {
- ep->ephw->hw_sync_sgl_for_device(
- ep,
- ep->msgbuf_dma_addr,
- ep->msg_buf_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ep->dev,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
iowrite32(0x01, /* Message NACK */
ep->registers + fpga_msg_ctrl_reg);
@@ -275,10 +272,8 @@ irqreturn_t xillybus_isr(int irq, void *data)
}
}
- ep->ephw->hw_sync_sgl_for_device(ep,
- ep->msgbuf_dma_addr,
- ep->msg_buf_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,
+ ep->msg_buf_size, DMA_FROM_DEVICE);
ep->msg_counter = (ep->msg_counter + 1) & 0xf;
ep->failed_messages = 0;
@@ -304,6 +299,47 @@ struct xilly_alloc_state {
u32 regdirection;
};
+static void xilly_unmap(void *ptr)
+{
+ struct xilly_mapping *data = ptr;
+
+ dma_unmap_single(data->device, data->dma_addr,
+ data->size, data->direction);
+
+ kfree(ptr);
+}
+
+static int xilly_map_single(struct xilly_endpoint *ep,
+ void *ptr,
+ size_t size,
+ int direction,
+ dma_addr_t *ret_dma_handle
+ )
+{
+ dma_addr_t addr;
+ struct xilly_mapping *this;
+
+ this = kzalloc(sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ addr = dma_map_single(ep->dev, ptr, size, direction);
+
+ if (dma_mapping_error(ep->dev, addr)) {
+ kfree(this);
+ return -ENODEV;
+ }
+
+ this->device = ep->dev;
+ this->dma_addr = addr;
+ this->size = size;
+ this->direction = direction;
+
+ *ret_dma_handle = addr;
+
+ return devm_add_action_or_reset(ep->dev, xilly_unmap, this);
+}
+
static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
struct xilly_alloc_state *s,
struct xilly_buffer **buffers,
@@ -355,9 +391,9 @@ static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
s->left_of_salami = allocsize;
}
- rc = ep->ephw->map_single(ep, s->salami,
- bytebufsize, s->direction,
- &dma_addr);
+ rc = xilly_map_single(ep, s->salami,
+ bytebufsize, s->direction,
+ &dma_addr);
if (rc)
return rc;
@@ -620,11 +656,10 @@ static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
return -ENODEV;
}
- endpoint->ephw->hw_sync_sgl_for_cpu(
- channel->endpoint,
- channel->wr_buffers[0]->dma_addr,
- channel->wr_buf_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(channel->endpoint->dev,
+ channel->wr_buffers[0]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
dev_err(endpoint->dev,
@@ -735,11 +770,10 @@ static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
if (!empty) { /* Go on, now without the spinlock */
if (bufpos == 0) /* Position zero means it's virgin */
- channel->endpoint->ephw->hw_sync_sgl_for_cpu(
- channel->endpoint,
- channel->wr_buffers[bufidx]->dma_addr,
- channel->wr_buf_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(channel->endpoint->dev,
+ channel->wr_buffers[bufidx]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
if (copy_to_user(
userbuf,
@@ -751,11 +785,10 @@ static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
bytes_done += howmany;
if (bufferdone) {
- channel->endpoint->ephw->hw_sync_sgl_for_device(
- channel->endpoint,
- channel->wr_buffers[bufidx]->dma_addr,
- channel->wr_buf_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_device(channel->endpoint->dev,
+ channel->wr_buffers[bufidx]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
/*
* Tell FPGA the buffer is done with. It's an
@@ -1055,11 +1088,10 @@ static int xillybus_myflush(struct xilly_channel *channel, long timeout)
else
channel->rd_host_buf_idx++;
- channel->endpoint->ephw->hw_sync_sgl_for_device(
- channel->endpoint,
- channel->rd_buffers[bufidx]->dma_addr,
- channel->rd_buf_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_device(channel->endpoint->dev,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
mutex_lock(&channel->endpoint->register_mutex);
@@ -1275,11 +1307,10 @@ static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
if ((bufpos == 0) || /* Zero means it's virgin */
(channel->rd_leftovers[3] != 0)) {
- channel->endpoint->ephw->hw_sync_sgl_for_cpu(
- channel->endpoint,
- channel->rd_buffers[bufidx]->dma_addr,
- channel->rd_buf_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(channel->endpoint->dev,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
/* Virgin, but leftovers are due */
for (i = 0; i < bufpos; i++)
@@ -1297,11 +1328,10 @@ static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
bytes_done += howmany;
if (bufferdone) {
- channel->endpoint->ephw->hw_sync_sgl_for_device(
- channel->endpoint,
- channel->rd_buffers[bufidx]->dma_addr,
- channel->rd_buf_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_device(channel->endpoint->dev,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
mutex_lock(&channel->endpoint->register_mutex);
@@ -1772,10 +1802,7 @@ static const struct file_operations xillybus_fops = {
.poll = xillybus_poll,
};
-struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
- struct device *dev,
- struct xilly_endpoint_hardware
- *ephw)
+struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
{
struct xilly_endpoint *endpoint;
@@ -1783,9 +1810,7 @@ struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
if (!endpoint)
return NULL;
- endpoint->pdev = pdev;
endpoint->dev = dev;
- endpoint->ephw = ephw;
endpoint->msg_counter = 0x0b;
endpoint->failed_messages = 0;
endpoint->fatal_error = 0;
@@ -1912,7 +1937,7 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
goto failed_idt;
rc = xillybus_init_chrdev(dev, &xillybus_fops,
- endpoint->ephw->owner, endpoint,
+ endpoint->owner, endpoint,
idt_handle.names,
idt_handle.names_len,
endpoint->num_channels,
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 1a20b286fd1d..e5372e45d211 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -31,102 +31,22 @@ static const struct of_device_id xillybus_of_match[] = {
MODULE_DEVICE_TABLE(of, xillybus_of_match);
-static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep,
- dma_addr_t dma_handle,
- size_t size,
- int direction)
-{
- dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction);
-}
-
-static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
- dma_addr_t dma_handle,
- size_t size,
- int direction)
-{
- dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
-}
-
-static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
- dma_addr_t dma_handle,
- size_t size,
- int direction)
-{
-}
-
-static void xilly_of_unmap(void *ptr)
-{
- struct xilly_mapping *data = ptr;
-
- dma_unmap_single(data->device, data->dma_addr,
- data->size, data->direction);
-
- kfree(ptr);
-}
-
-static int xilly_map_single_of(struct xilly_endpoint *ep,
- void *ptr,
- size_t size,
- int direction,
- dma_addr_t *ret_dma_handle
- )
-{
- dma_addr_t addr;
- struct xilly_mapping *this;
-
- this = kzalloc(sizeof(*this), GFP_KERNEL);
- if (!this)
- return -ENOMEM;
-
- addr = dma_map_single(ep->dev, ptr, size, direction);
-
- if (dma_mapping_error(ep->dev, addr)) {
- kfree(this);
- return -ENODEV;
- }
-
- this->device = ep->dev;
- this->dma_addr = addr;
- this->size = size;
- this->direction = direction;
-
- *ret_dma_handle = addr;
-
- return devm_add_action_or_reset(ep->dev, xilly_of_unmap, this);
-}
-
-static struct xilly_endpoint_hardware of_hw = {
- .owner = THIS_MODULE,
- .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
- .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
- .map_single = xilly_map_single_of,
-};
-
-static struct xilly_endpoint_hardware of_hw_coherent = {
- .owner = THIS_MODULE,
- .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
- .hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
- .map_single = xilly_map_single_of,
-};
-
static int xilly_drv_probe(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint;
int rc;
int irq;
- struct xilly_endpoint_hardware *ephw = &of_hw;
- if (of_property_read_bool(dev->of_node, "dma-coherent"))
- ephw = &of_hw_coherent;
-
- endpoint = xillybus_init_endpoint(NULL, dev, ephw);
+ endpoint = xillybus_init_endpoint(dev);
if (!endpoint)
return -ENOMEM;
dev_set_drvdata(dev, endpoint);
+ endpoint->owner = THIS_MODULE;
+
endpoint->registers = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index bdf1c366b4fc..9858711e3e79 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -32,110 +32,21 @@ static const struct pci_device_id xillyids[] = {
{ /* End: all zeroes */ }
};
-static int xilly_pci_direction(int direction)
-{
- switch (direction) {
- case DMA_TO_DEVICE:
- return PCI_DMA_TODEVICE;
- case DMA_FROM_DEVICE:
- return PCI_DMA_FROMDEVICE;
- default:
- return PCI_DMA_BIDIRECTIONAL;
- }
-}
-
-static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
- dma_addr_t dma_handle,
- size_t size,
- int direction)
-{
- pci_dma_sync_single_for_cpu(ep->pdev,
- dma_handle,
- size,
- xilly_pci_direction(direction));
-}
-
-static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
- dma_addr_t dma_handle,
- size_t size,
- int direction)
-{
- pci_dma_sync_single_for_device(ep->pdev,
- dma_handle,
- size,
- xilly_pci_direction(direction));
-}
-
-static void xilly_pci_unmap(void *ptr)
-{
- struct xilly_mapping *data = ptr;
-
- pci_unmap_single(data->device, data->dma_addr,
- data->size, data->direction);
-
- kfree(ptr);
-}
-
-/*
- * Map either through the PCI DMA mapper or the non_PCI one. Behind the
- * scenes exactly the same functions are called with the same parameters,
- * but that can change.
- */
-
-static int xilly_map_single_pci(struct xilly_endpoint *ep,
- void *ptr,
- size_t size,
- int direction,
- dma_addr_t *ret_dma_handle
- )
-{
- int pci_direction;
- dma_addr_t addr;
- struct xilly_mapping *this;
-
- this = kzalloc(sizeof(*this), GFP_KERNEL);
- if (!this)
- return -ENOMEM;
-
- pci_direction = xilly_pci_direction(direction);
-
- addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
-
- if (pci_dma_mapping_error(ep->pdev, addr)) {
- kfree(this);
- return -ENODEV;
- }
-
- this->device = ep->pdev;
- this->dma_addr = addr;
- this->size = size;
- this->direction = pci_direction;
-
- *ret_dma_handle = addr;
-
- return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this);
-}
-
-static struct xilly_endpoint_hardware pci_hw = {
- .owner = THIS_MODULE,
- .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
- .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
- .map_single = xilly_map_single_pci,
-};
-
static int xilly_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct xilly_endpoint *endpoint;
int rc;
- endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);
+ endpoint = xillybus_init_endpoint(&pdev->dev);
if (!endpoint)
return -ENOMEM;
pci_set_drvdata(pdev, endpoint);
+ endpoint->owner = THIS_MODULE;
+
rc = pcim_enable_device(pdev);
if (rc) {
dev_err(endpoint->dev,
@@ -185,9 +96,9 @@ static int xilly_probe(struct pci_dev *pdev,
* So go for the 64-bit mask only when failing is the other option.
*/
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
endpoint->dma_using_dac = 0;
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
endpoint->dma_using_dac = 1;
} else {
dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index e7f88f35c702..dc3551796e5e 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -1912,6 +1912,7 @@ static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
dealloc:
endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
+ xdev->msg_ep = NULL;
return -ENOMEM;
}
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 428a6f4b9ebc..fff4fdda974f 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -152,7 +152,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
"masterck_pres",
&at91rm9200_master_layout,
&rm9200_mck_characteristics,
- &rm9200_mck_lock, CLK_SET_RATE_GATE);
+ &rm9200_mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index b29843bea278..79802f864ee5 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -429,7 +429,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
&at91rm9200_master_layout,
data->mck_characteristics,
&at91sam9260_mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 15da0dfe3ef2..7ed984f8058c 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -164,7 +164,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
&at91rm9200_master_layout,
&mck_characteristics,
&at91sam9g45_mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 7fe435f4b46b..63cc58944b00 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -191,7 +191,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
&at91sam9x5_master_layout,
&mck_characteristics,
&at91sam9n12_mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index ecbabf5162bd..4d4faf6c61d8 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -132,7 +132,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
"masterck_pres",
&at91rm9200_master_layout,
&sam9rl_mck_characteristics,
- &sam9rl_mck_lock, CLK_SET_RATE_GATE);
+ &sam9rl_mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 5cce48c64ea2..bd8007b4f3e0 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -210,7 +210,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index b656d25a9767..23cc8297ec4c 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -27,6 +27,7 @@ struct clk_generated {
u32 id;
u32 gckdiv;
const struct clk_pcr_layout *layout;
+ struct at91_clk_pms pms;
u8 parent_id;
int chg_pid;
};
@@ -34,25 +35,35 @@ struct clk_generated {
#define to_clk_generated(hw) \
container_of(hw, struct clk_generated, hw)
-static int clk_generated_enable(struct clk_hw *hw)
+static int clk_generated_set(struct clk_generated *gck, int status)
{
- struct clk_generated *gck = to_clk_generated(hw);
unsigned long flags;
-
- pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
- __func__, gck->gckdiv, gck->parent_id);
+ unsigned int enable = status ? AT91_PMC_PCR_GCKEN : 0;
spin_lock_irqsave(gck->lock, flags);
regmap_write(gck->regmap, gck->layout->offset,
(gck->id & gck->layout->pid_mask));
regmap_update_bits(gck->regmap, gck->layout->offset,
AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
- gck->layout->cmd | AT91_PMC_PCR_GCKEN,
+ gck->layout->cmd | enable,
field_prep(gck->layout->gckcss_mask, gck->parent_id) |
gck->layout->cmd |
FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
- AT91_PMC_PCR_GCKEN);
+ enable);
spin_unlock_irqrestore(gck->lock, flags);
+
+ return 0;
+}
+
+static int clk_generated_enable(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
+ __func__, gck->gckdiv, gck->parent_id);
+
+ clk_generated_set(gck, 1);
+
return 0;
}
@@ -245,6 +256,23 @@ static int clk_generated_set_rate(struct clk_hw *hw,
return 0;
}
+static int clk_generated_save_context(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ gck->pms.status = clk_generated_is_enabled(&gck->hw);
+
+ return 0;
+}
+
+static void clk_generated_restore_context(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ if (gck->pms.status)
+ clk_generated_set(gck, gck->pms.status);
+}
+
static const struct clk_ops generated_ops = {
.enable = clk_generated_enable,
.disable = clk_generated_disable,
@@ -254,6 +282,8 @@ static const struct clk_ops generated_ops = {
.get_parent = clk_generated_get_parent,
.set_parent = clk_generated_set_parent,
.set_rate = clk_generated_set_rate,
+ .save_context = clk_generated_save_context,
+ .restore_context = clk_generated_restore_context,
};
/**
@@ -320,8 +350,6 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
if (ret) {
kfree(gck);
hw = ERR_PTR(ret);
- } else {
- pmc_register_id(id);
}
return hw;
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index cfae2f59df66..8601b27c1ae0 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -28,6 +28,7 @@
struct clk_main_osc {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
};
#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
@@ -37,6 +38,7 @@ struct clk_main_rc_osc {
struct regmap *regmap;
unsigned long frequency;
unsigned long accuracy;
+ struct at91_clk_pms pms;
};
#define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw)
@@ -51,6 +53,7 @@ struct clk_rm9200_main {
struct clk_sam9x5_main {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
u8 parent;
};
@@ -120,10 +123,29 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
}
+static int clk_main_osc_save_context(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+
+ osc->pms.status = clk_main_osc_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_main_osc_restore_context(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+
+ if (osc->pms.status)
+ clk_main_osc_prepare(hw);
+}
+
static const struct clk_ops main_osc_ops = {
.prepare = clk_main_osc_prepare,
.unprepare = clk_main_osc_unprepare,
.is_prepared = clk_main_osc_is_prepared,
+ .save_context = clk_main_osc_save_context,
+ .restore_context = clk_main_osc_restore_context,
};
struct clk_hw * __init
@@ -240,12 +262,31 @@ static unsigned long clk_main_rc_osc_recalc_accuracy(struct clk_hw *hw,
return osc->accuracy;
}
+static int clk_main_rc_osc_save_context(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ osc->pms.status = clk_main_rc_osc_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_main_rc_osc_restore_context(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ if (osc->pms.status)
+ clk_main_rc_osc_prepare(hw);
+}
+
static const struct clk_ops main_rc_osc_ops = {
.prepare = clk_main_rc_osc_prepare,
.unprepare = clk_main_rc_osc_unprepare,
.is_prepared = clk_main_rc_osc_is_prepared,
.recalc_rate = clk_main_rc_osc_recalc_rate,
.recalc_accuracy = clk_main_rc_osc_recalc_accuracy,
+ .save_context = clk_main_rc_osc_save_context,
+ .restore_context = clk_main_rc_osc_restore_context,
};
struct clk_hw * __init
@@ -465,12 +506,37 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
return clk_main_parent_select(status);
}
+static int clk_sam9x5_main_save_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ clkmain->pms.status = clk_main_rc_osc_is_prepared(&clkmain->hw);
+ clkmain->pms.parent = clk_sam9x5_main_get_parent(&clkmain->hw);
+
+ return 0;
+}
+
+static void clk_sam9x5_main_restore_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ int ret;
+
+ ret = clk_sam9x5_main_set_parent(hw, clkmain->pms.parent);
+ if (ret)
+ return;
+
+ if (clkmain->pms.status)
+ clk_sam9x5_main_prepare(hw);
+}
+
static const struct clk_ops sam9x5_main_ops = {
.prepare = clk_sam9x5_main_prepare,
.is_prepared = clk_sam9x5_main_is_prepared,
.recalc_rate = clk_sam9x5_main_recalc_rate,
.set_parent = clk_sam9x5_main_set_parent,
.get_parent = clk_sam9x5_main_get_parent,
+ .save_context = clk_sam9x5_main_save_context,
+ .restore_context = clk_sam9x5_main_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index a80427980bf7..b2d0a7f4f7f9 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
#include <linux/mfd/syscon.h>
@@ -17,15 +18,7 @@
#define MASTER_DIV_SHIFT 8
#define MASTER_DIV_MASK 0x7
-#define PMC_MCR 0x30
-#define PMC_MCR_ID_MSK GENMASK(3, 0)
-#define PMC_MCR_CMD BIT(7)
-#define PMC_MCR_DIV GENMASK(10, 8)
-#define PMC_MCR_CSS GENMASK(20, 16)
#define PMC_MCR_CSS_SHIFT (16)
-#define PMC_MCR_EN BIT(28)
-
-#define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK)
#define MASTER_MAX_ID 4
@@ -37,14 +30,19 @@ struct clk_master {
spinlock_t *lock;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
+ struct at91_clk_pms pms;
u32 *mux_table;
u32 mckr;
int chg_pid;
u8 id;
u8 parent;
u8 div;
+ u32 safe_div;
};
+/* MCK div reference to be used by notifier. */
+static struct clk_master *master_div;
+
static inline bool clk_master_ready(struct clk_master *master)
{
unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY;
@@ -112,97 +110,244 @@ static unsigned long clk_master_div_recalc_rate(struct clk_hw *hw,
return rate;
}
+static int clk_master_div_save_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ unsigned long flags;
+ unsigned int mckr, div;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ mckr &= master->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ div = master->characteristics->divisors[div];
+
+ master->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ master->pms.rate = DIV_ROUND_CLOSEST(master->pms.parent_rate, div);
+
+ return 0;
+}
+
+static void clk_master_div_restore_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int mckr;
+ u8 div;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ mckr &= master->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ div = master->characteristics->divisors[div];
+
+ if (div != DIV_ROUND_CLOSEST(master->pms.parent_rate, master->pms.rate))
+ pr_warn("MCKR DIV not configured properly by firmware!\n");
+}
+
static const struct clk_ops master_div_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_div_recalc_rate,
+ .save_context = clk_master_div_save_context,
+ .restore_context = clk_master_div_restore_context,
};
-static int clk_master_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+/* This function must be called with lock acquired. */
+static int clk_master_div_set(struct clk_master *master,
+ unsigned long parent_rate, int div)
{
- struct clk_master *master = to_clk_master(hw);
const struct clk_master_characteristics *characteristics =
master->characteristics;
- unsigned long flags;
- int div, i;
-
- div = DIV_ROUND_CLOSEST(parent_rate, rate);
- if (div > ARRAY_SIZE(characteristics->divisors))
- return -EINVAL;
+ unsigned long rate = parent_rate;
+ unsigned int max_div = 0, div_index = 0, max_div_index = 0;
+ unsigned int i, mckr, tmp;
+ int ret;
for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
if (!characteristics->divisors[i])
break;
- if (div == characteristics->divisors[i]) {
- div = i;
- break;
+ if (div == characteristics->divisors[i])
+ div_index = i;
+
+ if (max_div < characteristics->divisors[i]) {
+ max_div = characteristics->divisors[i];
+ max_div_index = i;
}
}
- if (i == ARRAY_SIZE(characteristics->divisors))
- return -EINVAL;
+ if (div > max_div)
+ div_index = max_div_index;
+
+ ret = regmap_read(master->regmap, master->layout->offset, &mckr);
+ if (ret)
+ return ret;
+
+ mckr &= master->layout->mask;
+ tmp = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ if (tmp == div_index)
+ return 0;
+
+ rate /= characteristics->divisors[div_index];
+ if (rate < characteristics->output.min)
+ pr_warn("master clk div is underclocked");
+ else if (rate > characteristics->output.max)
+ pr_warn("master clk div is overclocked");
+
+ mckr &= ~(MASTER_DIV_MASK << MASTER_DIV_SHIFT);
+ mckr |= (div_index << MASTER_DIV_SHIFT);
+ ret = regmap_write(master->regmap, master->layout->offset, mckr);
+ if (ret)
+ return ret;
- spin_lock_irqsave(master->lock, flags);
- regmap_update_bits(master->regmap, master->layout->offset,
- (MASTER_DIV_MASK << MASTER_DIV_SHIFT),
- (div << MASTER_DIV_SHIFT));
while (!clk_master_ready(master))
cpu_relax();
- spin_unlock_irqrestore(master->lock, flags);
+
+ master->div = characteristics->divisors[div_index];
return 0;
}
-static int clk_master_div_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
+static unsigned long clk_master_div_recalc_rate_chg(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct clk_master *master = to_clk_master(hw);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, master->div);
+}
+
+static void clk_master_div_restore_context_chg(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(master->lock, flags);
+ ret = clk_master_div_set(master, master->pms.parent_rate,
+ DIV_ROUND_CLOSEST(master->pms.parent_rate,
+ master->pms.rate));
+ spin_unlock_irqrestore(master->lock, flags);
+ if (ret)
+ pr_warn("Failed to restore MCK DIV clock\n");
+}
+
+static const struct clk_ops master_div_ops_chg = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_div_recalc_rate_chg,
+ .save_context = clk_master_div_save_context,
+ .restore_context = clk_master_div_restore_context_chg,
+};
+
+static int clk_master_div_notifier_fn(struct notifier_block *notifier,
+ unsigned long code, void *data)
+{
const struct clk_master_characteristics *characteristics =
- master->characteristics;
- struct clk_hw *parent;
- unsigned long parent_rate, tmp_rate, best_rate = 0;
- int i, best_diff = INT_MIN, tmp_diff;
+ master_div->characteristics;
+ struct clk_notifier_data *cnd = data;
+ unsigned long flags, new_parent_rate, new_rate;
+ unsigned int mckr, div, new_div = 0;
+ int ret, i;
+ long tmp_diff;
+ long best_diff = -1;
+
+ spin_lock_irqsave(master_div->lock, flags);
+ switch (code) {
+ case PRE_RATE_CHANGE:
+ /*
+ * We want to avoid any overclocking of MCK DIV domain. To do
+ * this we set a safe divider (the underclocking is not of
+ * interest as we can go as low as 32KHz). The relation
+ * b/w this clock and its parents are as follows:
+ *
+ * FRAC PLL -> DIV PLL -> MCK DIV
+ *
+ * With the proper safe divider we should be good even with FRAC
+ * PLL at its maximum value.
+ */
+ ret = regmap_read(master_div->regmap, master_div->layout->offset,
+ &mckr);
+ if (ret) {
+ ret = NOTIFY_STOP_MASK;
+ goto unlock;
+ }
- parent = clk_hw_get_parent(hw);
- if (!parent)
- return -EINVAL;
+ mckr &= master_div->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+
+ /* Switch to safe divider. */
+ clk_master_div_set(master_div,
+ cnd->old_rate * characteristics->divisors[div],
+ master_div->safe_div);
+ break;
+
+ case POST_RATE_CHANGE:
+ /*
+ * At this point we want to restore MCK DIV domain to its maximum
+ * allowed rate.
+ */
+ ret = regmap_read(master_div->regmap, master_div->layout->offset,
+ &mckr);
+ if (ret) {
+ ret = NOTIFY_STOP_MASK;
+ goto unlock;
+ }
- parent_rate = clk_hw_get_rate(parent);
- if (!parent_rate)
- return -EINVAL;
+ mckr &= master_div->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ new_parent_rate = cnd->new_rate * characteristics->divisors[div];
- for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
- if (!characteristics->divisors[i])
- break;
+ for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
+ if (!characteristics->divisors[i])
+ break;
+
+ new_rate = DIV_ROUND_CLOSEST_ULL(new_parent_rate,
+ characteristics->divisors[i]);
- tmp_rate = DIV_ROUND_CLOSEST_ULL(parent_rate,
- characteristics->divisors[i]);
- tmp_diff = abs(tmp_rate - req->rate);
+ tmp_diff = characteristics->output.max - new_rate;
+ if (tmp_diff < 0)
+ continue;
- if (!best_rate || best_diff > tmp_diff) {
- best_diff = tmp_diff;
- best_rate = tmp_rate;
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ new_div = characteristics->divisors[i];
+ best_diff = tmp_diff;
+ }
+
+ if (!tmp_diff)
+ break;
}
- if (!best_diff)
- break;
+ if (!new_div) {
+ ret = NOTIFY_STOP_MASK;
+ goto unlock;
+ }
+
+ /* Update the div to preserve MCK DIV clock rate. */
+ clk_master_div_set(master_div, new_parent_rate,
+ new_div);
+
+ ret = NOTIFY_OK;
+ break;
+
+ default:
+ ret = NOTIFY_DONE;
+ break;
}
- req->best_parent_rate = best_rate;
- req->best_parent_hw = parent;
- req->rate = best_rate;
+unlock:
+ spin_unlock_irqrestore(master_div->lock, flags);
- return 0;
+ return ret;
}
-static const struct clk_ops master_div_ops_chg = {
- .prepare = clk_master_prepare,
- .is_prepared = clk_master_is_prepared,
- .recalc_rate = clk_master_div_recalc_rate,
- .determine_rate = clk_master_div_determine_rate,
- .set_rate = clk_master_div_set_rate,
+static struct notifier_block clk_master_div_notifier = {
+ .notifier_call = clk_master_div_notifier_fn,
};
static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
@@ -272,7 +417,8 @@ static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
- unsigned int pres;
+ unsigned int pres, mckr, tmp;
+ int ret;
pres = DIV_ROUND_CLOSEST(parent_rate, rate);
if (pres > MASTER_PRES_MAX)
@@ -280,19 +426,31 @@ static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
else if (pres == 3)
pres = MASTER_PRES_MAX;
- else
+ else if (pres)
pres = ffs(pres) - 1;
spin_lock_irqsave(master->lock, flags);
- regmap_update_bits(master->regmap, master->layout->offset,
- (MASTER_PRES_MASK << master->layout->pres_shift),
- (pres << master->layout->pres_shift));
+ ret = regmap_read(master->regmap, master->layout->offset, &mckr);
+ if (ret)
+ goto unlock;
+
+ mckr &= master->layout->mask;
+ tmp = (mckr >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == tmp)
+ goto unlock;
+
+ mckr &= ~(MASTER_PRES_MASK << master->layout->pres_shift);
+ mckr |= (pres << master->layout->pres_shift);
+ ret = regmap_write(master->regmap, master->layout->offset, mckr);
+ if (ret)
+ goto unlock;
while (!clk_master_ready(master))
cpu_relax();
+unlock:
spin_unlock_irqrestore(master->lock, flags);
- return 0;
+ return ret;
}
static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
@@ -308,8 +466,9 @@ static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
regmap_read(master->regmap, master->layout->offset, &val);
spin_unlock_irqrestore(master->lock, flags);
+ val &= master->layout->mask;
pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
- if (pres == 3 && characteristics->have_div3_pres)
+ if (pres == MASTER_PRES_MAX && characteristics->have_div3_pres)
pres = 3;
else
pres = (1 << pres);
@@ -327,14 +486,73 @@ static u8 clk_master_pres_get_parent(struct clk_hw *hw)
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, flags);
+ mckr &= master->layout->mask;
+
return mckr & AT91_PMC_CSS;
}
+static int clk_master_pres_save_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ unsigned long flags;
+ unsigned int val, pres;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &val);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ val &= master->layout->mask;
+ pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres)
+ pres = 3;
+ else
+ pres = (1 << pres);
+
+ master->pms.parent = val & AT91_PMC_CSS;
+ master->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ master->pms.rate = DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres);
+
+ return 0;
+}
+
+static void clk_master_pres_restore_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int val, pres;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &val);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ val &= master->layout->mask;
+ pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres)
+ pres = 3;
+ else
+ pres = (1 << pres);
+
+ if (master->pms.rate !=
+ DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres) ||
+ (master->pms.parent != (val & AT91_PMC_CSS)))
+ pr_warn("MCKR PRES was not configured properly by firmware!\n");
+}
+
+static void clk_master_pres_restore_context_chg(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ clk_master_pres_set_rate(hw, master->pms.rate, master->pms.parent_rate);
+}
+
static const struct clk_ops master_pres_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_pres_recalc_rate,
.get_parent = clk_master_pres_get_parent,
+ .save_context = clk_master_pres_save_context,
+ .restore_context = clk_master_pres_restore_context,
};
static const struct clk_ops master_pres_ops_chg = {
@@ -344,6 +562,8 @@ static const struct clk_ops master_pres_ops_chg = {
.recalc_rate = clk_master_pres_recalc_rate,
.get_parent = clk_master_pres_get_parent,
.set_rate = clk_master_pres_set_rate,
+ .save_context = clk_master_pres_save_context,
+ .restore_context = clk_master_pres_restore_context_chg,
};
static struct clk_hw * __init
@@ -358,6 +578,8 @@ at91_clk_register_master_internal(struct regmap *regmap,
struct clk_master *master;
struct clk_init_data init;
struct clk_hw *hw;
+ unsigned int mckr;
+ unsigned long irqflags;
int ret;
if (!name || !num_parents || !parent_names || !lock)
@@ -380,6 +602,16 @@ at91_clk_register_master_internal(struct regmap *regmap,
master->chg_pid = chg_pid;
master->lock = lock;
+ if (ops == &master_div_ops_chg) {
+ spin_lock_irqsave(master->lock, irqflags);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, irqflags);
+
+ mckr &= layout->mask;
+ mckr = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ master->div = characteristics->divisors[mckr];
+ }
+
hw = &master->hw;
ret = clk_hw_register(NULL, &master->hw);
if (ret) {
@@ -416,19 +648,29 @@ at91_clk_register_master_div(struct regmap *regmap,
const char *name, const char *parent_name,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
- spinlock_t *lock, u32 flags)
+ spinlock_t *lock, u32 flags, u32 safe_div)
{
const struct clk_ops *ops;
+ struct clk_hw *hw;
if (flags & CLK_SET_RATE_GATE)
ops = &master_div_ops;
else
ops = &master_div_ops_chg;
- return at91_clk_register_master_internal(regmap, name, 1,
- &parent_name, layout,
- characteristics, ops,
- lock, flags, -EINVAL);
+ hw = at91_clk_register_master_internal(regmap, name, 1,
+ &parent_name, layout,
+ characteristics, ops,
+ lock, flags, -EINVAL);
+
+ if (!IS_ERR(hw) && safe_div) {
+ master_div = to_clk_master(hw);
+ master_div->safe_div = safe_div;
+ clk_notifier_register(hw->clk,
+ &clk_master_div_notifier);
+ }
+
+ return hw;
}
static unsigned long
@@ -539,30 +781,40 @@ static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-static int clk_sama7g5_master_enable(struct clk_hw *hw)
+static void clk_sama7g5_master_set(struct clk_master *master,
+ unsigned int status)
{
- struct clk_master *master = to_clk_master(hw);
unsigned long flags;
unsigned int val, cparent;
+ unsigned int enable = status ? AT91_PMC_MCR_V2_EN : 0;
+ unsigned int parent = master->parent << PMC_MCR_CSS_SHIFT;
+ unsigned int div = master->div << MASTER_DIV_SHIFT;
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id));
- regmap_read(master->regmap, PMC_MCR, &val);
- regmap_update_bits(master->regmap, PMC_MCR,
- PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV |
- PMC_MCR_CMD | PMC_MCR_ID_MSK,
- PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) |
- (master->div << MASTER_DIV_SHIFT) |
- PMC_MCR_CMD | PMC_MCR_ID(master->id));
+ regmap_write(master->regmap, AT91_PMC_MCR_V2,
+ AT91_PMC_MCR_V2_ID(master->id));
+ regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
+ regmap_update_bits(master->regmap, AT91_PMC_MCR_V2,
+ enable | AT91_PMC_MCR_V2_CSS | AT91_PMC_MCR_V2_DIV |
+ AT91_PMC_MCR_V2_CMD | AT91_PMC_MCR_V2_ID_MSK,
+ enable | parent | div | AT91_PMC_MCR_V2_CMD |
+ AT91_PMC_MCR_V2_ID(master->id));
- cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
+ cparent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT;
/* Wait here only if parent is being changed. */
while ((cparent != master->parent) && !clk_master_ready(master))
cpu_relax();
spin_unlock_irqrestore(master->lock, flags);
+}
+
+static int clk_sama7g5_master_enable(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ clk_sama7g5_master_set(master, 1);
return 0;
}
@@ -574,10 +826,12 @@ static void clk_sama7g5_master_disable(struct clk_hw *hw)
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, master->id);
- regmap_update_bits(master->regmap, PMC_MCR,
- PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK,
- PMC_MCR_CMD | PMC_MCR_ID(master->id));
+ regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
+ regmap_update_bits(master->regmap, AT91_PMC_MCR_V2,
+ AT91_PMC_MCR_V2_EN | AT91_PMC_MCR_V2_CMD |
+ AT91_PMC_MCR_V2_ID_MSK,
+ AT91_PMC_MCR_V2_CMD |
+ AT91_PMC_MCR_V2_ID(master->id));
spin_unlock_irqrestore(master->lock, flags);
}
@@ -590,12 +844,12 @@ static int clk_sama7g5_master_is_enabled(struct clk_hw *hw)
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, master->id);
- regmap_read(master->regmap, PMC_MCR, &val);
+ regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
+ regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
spin_unlock_irqrestore(master->lock, flags);
- return !!(val & PMC_MCR_EN);
+ return !!(val & AT91_PMC_MCR_V2_EN);
}
static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -610,7 +864,7 @@ static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
if (div == 3)
div = MASTER_PRES_MAX;
- else
+ else if (div)
div = ffs(div) - 1;
spin_lock_irqsave(master->lock, flags);
@@ -620,6 +874,23 @@ static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_sama7g5_master_save_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ master->pms.status = clk_sama7g5_master_is_enabled(hw);
+
+ return 0;
+}
+
+static void clk_sama7g5_master_restore_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ if (master->pms.status)
+ clk_sama7g5_master_set(master, master->pms.status);
+}
+
static const struct clk_ops sama7g5_master_ops = {
.enable = clk_sama7g5_master_enable,
.disable = clk_sama7g5_master_disable,
@@ -629,6 +900,8 @@ static const struct clk_ops sama7g5_master_ops = {
.set_rate = clk_sama7g5_master_set_rate,
.get_parent = clk_sama7g5_master_get_parent,
.set_parent = clk_sama7g5_master_set_parent,
+ .save_context = clk_sama7g5_master_save_context,
+ .restore_context = clk_sama7g5_master_restore_context,
};
struct clk_hw * __init
@@ -672,10 +945,10 @@ at91_clk_sama7g5_register_master(struct regmap *regmap,
master->mux_table = mux_table;
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, master->id);
- regmap_read(master->regmap, PMC_MCR, &val);
- master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
- master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT;
+ regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
+ regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
+ master->parent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT;
+ master->div = (val & AT91_PMC_MCR_V2_DIV) >> MASTER_DIV_SHIFT;
spin_unlock_irqrestore(master->lock, flags);
hw = &master->hw;
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 7a27ba8e0577..e14fa5ac734c 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -37,6 +37,7 @@ struct clk_sam9x5_peripheral {
u32 id;
u32 div;
const struct clk_pcr_layout *layout;
+ struct at91_clk_pms pms;
bool auto_div;
int chg_pid;
};
@@ -155,10 +156,11 @@ static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
periph->div = shift;
}
-static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+static int clk_sam9x5_peripheral_set(struct clk_sam9x5_peripheral *periph,
+ unsigned int status)
{
- struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
unsigned long flags;
+ unsigned int enable = status ? AT91_PMC_PCR_EN : 0;
if (periph->id < PERIPHERAL_ID_MIN)
return 0;
@@ -168,15 +170,21 @@ static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
(periph->id & periph->layout->pid_mask));
regmap_update_bits(periph->regmap, periph->layout->offset,
periph->layout->div_mask | periph->layout->cmd |
- AT91_PMC_PCR_EN,
+ enable,
field_prep(periph->layout->div_mask, periph->div) |
- periph->layout->cmd |
- AT91_PMC_PCR_EN);
+ periph->layout->cmd | enable);
spin_unlock_irqrestore(periph->lock, flags);
return 0;
}
+static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ return clk_sam9x5_peripheral_set(periph, 1);
+}
+
static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
@@ -393,6 +401,23 @@ static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
return -EINVAL;
}
+static int clk_sam9x5_peripheral_save_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ periph->pms.status = clk_sam9x5_peripheral_is_enabled(hw);
+
+ return 0;
+}
+
+static void clk_sam9x5_peripheral_restore_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ if (periph->pms.status)
+ clk_sam9x5_peripheral_set(periph, periph->pms.status);
+}
+
static const struct clk_ops sam9x5_peripheral_ops = {
.enable = clk_sam9x5_peripheral_enable,
.disable = clk_sam9x5_peripheral_disable,
@@ -400,6 +425,8 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
.round_rate = clk_sam9x5_peripheral_round_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
+ .save_context = clk_sam9x5_peripheral_save_context,
+ .restore_context = clk_sam9x5_peripheral_restore_context,
};
static const struct clk_ops sam9x5_peripheral_chg_ops = {
@@ -409,6 +436,8 @@ static const struct clk_ops sam9x5_peripheral_chg_ops = {
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
.determine_rate = clk_sam9x5_peripheral_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
+ .save_context = clk_sam9x5_peripheral_save_context,
+ .restore_context = clk_sam9x5_peripheral_restore_context,
};
struct clk_hw * __init
@@ -460,7 +489,6 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
hw = ERR_PTR(ret);
} else {
clk_sam9x5_peripheral_autodiv(periph);
- pmc_register_id(id);
}
return hw;
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 6ed986d3eee0..249d6a53cedf 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -40,6 +40,7 @@ struct clk_pll {
u16 mul;
const struct clk_pll_layout *layout;
const struct clk_pll_characteristics *characteristics;
+ struct at91_clk_pms pms;
};
static inline bool clk_pll_ready(struct regmap *regmap, int id)
@@ -260,6 +261,42 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_pll_save_context(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ pll->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ pll->pms.rate = clk_pll_recalc_rate(&pll->hw, pll->pms.parent_rate);
+ pll->pms.status = clk_pll_ready(pll->regmap, PLL_REG(pll->id));
+
+ return 0;
+}
+
+static void clk_pll_restore_context(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long calc_rate;
+ unsigned int pllr, pllr_out, pllr_count;
+ u8 out = 0;
+
+ if (pll->characteristics->out)
+ out = pll->characteristics->out[pll->range];
+
+ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
+
+ calc_rate = (pll->pms.parent_rate / PLL_DIV(pllr)) *
+ (PLL_MUL(pllr, pll->layout) + 1);
+ pllr_count = (pllr >> PLL_COUNT_SHIFT) & PLL_MAX_COUNT;
+ pllr_out = (pllr >> PLL_OUT_SHIFT) & out;
+
+ if (pll->pms.rate != calc_rate ||
+ pll->pms.status != clk_pll_ready(pll->regmap, PLL_REG(pll->id)) ||
+ pllr_count != PLL_MAX_COUNT ||
+ (out && pllr_out != out))
+ pr_warn("PLLAR was not configured properly by firmware\n");
+}
+
static const struct clk_ops pll_ops = {
.prepare = clk_pll_prepare,
.unprepare = clk_pll_unprepare,
@@ -267,6 +304,8 @@ static const struct clk_ops pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
+ .save_context = clk_pll_save_context,
+ .restore_context = clk_pll_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index fcf8f6a1c2c6..6c4b259d31d3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -24,6 +24,7 @@ struct clk_programmable {
u32 *mux_table;
u8 id;
const struct clk_programmable_layout *layout;
+ struct at91_clk_pms pms;
};
#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
@@ -177,12 +178,38 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_programmable_save_context(struct clk_hw *hw)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ prog->pms.parent = clk_programmable_get_parent(hw);
+ prog->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ prog->pms.rate = clk_programmable_recalc_rate(hw, prog->pms.parent_rate);
+
+ return 0;
+}
+
+static void clk_programmable_restore_context(struct clk_hw *hw)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ int ret;
+
+ ret = clk_programmable_set_parent(hw, prog->pms.parent);
+ if (ret)
+ return;
+
+ clk_programmable_set_rate(hw, prog->pms.rate, prog->pms.parent_rate);
+}
+
static const struct clk_ops programmable_ops = {
.recalc_rate = clk_programmable_recalc_rate,
.determine_rate = clk_programmable_determine_rate,
.get_parent = clk_programmable_get_parent,
.set_parent = clk_programmable_set_parent,
.set_rate = clk_programmable_set_rate,
+ .save_context = clk_programmable_save_context,
+ .restore_context = clk_programmable_restore_context,
};
struct clk_hw * __init
@@ -221,8 +248,6 @@ at91_clk_register_programmable(struct regmap *regmap,
if (ret) {
kfree(prog);
hw = ERR_PTR(ret);
- } else {
- pmc_register_pck(id);
}
return hw;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 34e3ab13741a..d757003004cb 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
@@ -38,19 +39,24 @@ struct sam9x60_pll_core {
struct sam9x60_frac {
struct sam9x60_pll_core core;
+ struct at91_clk_pms pms;
u32 frac;
u16 mul;
};
struct sam9x60_div {
struct sam9x60_pll_core core;
+ struct at91_clk_pms pms;
u8 div;
+ u8 safe_div;
};
#define to_sam9x60_pll_core(hw) container_of(hw, struct sam9x60_pll_core, hw)
#define to_sam9x60_frac(core) container_of(core, struct sam9x60_frac, core)
#define to_sam9x60_div(core) container_of(core, struct sam9x60_div, core)
+static struct sam9x60_div *notifier_div;
+
static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
{
unsigned int status;
@@ -71,13 +77,12 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
- return (parent_rate * (frac->mul + 1) +
- ((u64)parent_rate * frac->frac >> 22));
+ return parent_rate * (frac->mul + 1) +
+ DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22));
}
-static int sam9x60_frac_pll_prepare(struct clk_hw *hw)
+static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
{
- struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
struct regmap *regmap = core->regmap;
unsigned int val, cfrac, cmul;
@@ -141,6 +146,13 @@ unlock:
return 0;
}
+static int sam9x60_frac_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+
+ return sam9x60_frac_pll_set(core);
+}
+
static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
@@ -280,6 +292,25 @@ unlock:
return ret;
}
+static int sam9x60_frac_pll_save_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+
+ frac->pms.status = sam9x60_pll_ready(core->regmap, core->id);
+
+ return 0;
+}
+
+static void sam9x60_frac_pll_restore_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+
+ if (frac->pms.status)
+ sam9x60_frac_pll_set(core);
+}
+
static const struct clk_ops sam9x60_frac_pll_ops = {
.prepare = sam9x60_frac_pll_prepare,
.unprepare = sam9x60_frac_pll_unprepare,
@@ -287,6 +318,8 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.recalc_rate = sam9x60_frac_pll_recalc_rate,
.round_rate = sam9x60_frac_pll_round_rate,
.set_rate = sam9x60_frac_pll_set_rate,
+ .save_context = sam9x60_frac_pll_save_context,
+ .restore_context = sam9x60_frac_pll_restore_context,
};
static const struct clk_ops sam9x60_frac_pll_ops_chg = {
@@ -296,11 +329,32 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.recalc_rate = sam9x60_frac_pll_recalc_rate,
.round_rate = sam9x60_frac_pll_round_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
+ .save_context = sam9x60_frac_pll_save_context,
+ .restore_context = sam9x60_frac_pll_restore_context,
};
-static int sam9x60_div_pll_prepare(struct clk_hw *hw)
+/* This function should be called with spinlock acquired. */
+static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
+ bool enable)
+{
+ struct regmap *regmap = core->regmap;
+ u32 ena_msk = enable ? core->layout->endiv_mask : 0;
+ u32 ena_val = enable ? (1 << core->layout->endiv_shift) : 0;
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ core->layout->div_mask | ena_msk,
+ (div << core->layout->div_shift) | ena_val);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
+}
+
+static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
{
- struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_div *div = to_sam9x60_div(core);
struct regmap *regmap = core->regmap;
unsigned long flags;
@@ -316,17 +370,7 @@ static int sam9x60_div_pll_prepare(struct clk_hw *hw)
if (!!(val & core->layout->endiv_mask) && cdiv == div->div)
goto unlock;
- regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
- core->layout->div_mask | core->layout->endiv_mask,
- (div->div << core->layout->div_shift) |
- (1 << core->layout->endiv_shift));
-
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
-
- while (!sam9x60_pll_ready(regmap, core->id))
- cpu_relax();
+ sam9x60_div_pll_set_div(core, div->div, 1);
unlock:
spin_unlock_irqrestore(core->lock, flags);
@@ -334,6 +378,13 @@ unlock:
return 0;
}
+static int sam9x60_div_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+
+ return sam9x60_div_pll_set(core);
+}
+
static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
@@ -465,16 +516,7 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
if (cdiv == div->div)
goto unlock;
- regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
- core->layout->div_mask,
- (div->div << core->layout->div_shift));
-
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
-
- while (!sam9x60_pll_ready(regmap, core->id))
- cpu_relax();
+ sam9x60_div_pll_set_div(core, div->div, 0);
unlock:
spin_unlock_irqrestore(core->lock, irqflags);
@@ -482,6 +524,67 @@ unlock:
return 0;
}
+static int sam9x60_div_pll_save_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+
+ div->pms.status = sam9x60_div_pll_is_prepared(hw);
+
+ return 0;
+}
+
+static void sam9x60_div_pll_restore_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+
+ if (div->pms.status)
+ sam9x60_div_pll_set(core);
+}
+
+static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
+ unsigned long code, void *data)
+{
+ struct sam9x60_div *div = notifier_div;
+ struct sam9x60_pll_core core = div->core;
+ struct regmap *regmap = core.regmap;
+ unsigned long irqflags;
+ u32 val, cdiv;
+ int ret = NOTIFY_DONE;
+
+ if (code != PRE_RATE_CHANGE)
+ return ret;
+
+ /*
+ * We switch to safe divider to avoid overclocking of other domains
+ * feed by us while the frac PLL (our parent) is changed.
+ */
+ div->div = div->safe_div;
+
+ spin_lock_irqsave(core.lock, irqflags);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+ cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
+
+ /* Stop if nothing changed. */
+ if (cdiv == div->safe_div)
+ goto unlock;
+
+ sam9x60_div_pll_set_div(&core, div->div, 0);
+ ret = NOTIFY_OK;
+
+unlock:
+ spin_unlock_irqrestore(core.lock, irqflags);
+
+ return ret;
+}
+
+static struct notifier_block sam9x60_div_pll_notifier = {
+ .notifier_call = sam9x60_div_pll_notifier_fn,
+};
+
static const struct clk_ops sam9x60_div_pll_ops = {
.prepare = sam9x60_div_pll_prepare,
.unprepare = sam9x60_div_pll_unprepare,
@@ -489,6 +592,8 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.recalc_rate = sam9x60_div_pll_recalc_rate,
.round_rate = sam9x60_div_pll_round_rate,
.set_rate = sam9x60_div_pll_set_rate,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
};
static const struct clk_ops sam9x60_div_pll_ops_chg = {
@@ -498,6 +603,8 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.recalc_rate = sam9x60_div_pll_recalc_rate,
.round_rate = sam9x60_div_pll_round_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
};
struct clk_hw * __init
@@ -587,7 +694,8 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, u32 flags)
+ const struct clk_pll_layout *layout, u32 flags,
+ u32 safe_div)
{
struct sam9x60_div *div;
struct clk_hw *hw;
@@ -596,9 +704,13 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
unsigned int val;
int ret;
- if (id > PLL_MAX_ID || !lock)
+ /* We only support one changeable PLL. */
+ if (id > PLL_MAX_ID || !lock || (safe_div && notifier_div))
return ERR_PTR(-EINVAL);
+ if (safe_div >= PLL_DIV_MAX)
+ safe_div = PLL_DIV_MAX - 1;
+
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
@@ -618,6 +730,7 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
div->core.layout = layout;
div->core.regmap = regmap;
div->core.lock = lock;
+ div->safe_div = safe_div;
spin_lock_irqsave(div->core.lock, irqflags);
@@ -633,6 +746,9 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
if (ret) {
kfree(div);
hw = ERR_PTR(ret);
+ } else if (div->safe_div) {
+ notifier_div = div;
+ clk_notifier_register(hw->clk, &sam9x60_div_pll_notifier);
}
return hw;
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index f83ec0de86c3..80720fd1a9cf 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -20,6 +20,7 @@
struct clk_system {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
u8 id;
};
@@ -77,10 +78,29 @@ static int clk_system_is_prepared(struct clk_hw *hw)
return !!(status & (1 << sys->id));
}
+static int clk_system_save_context(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+
+ sys->pms.status = clk_system_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_system_restore_context(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+
+ if (sys->pms.status)
+ clk_system_prepare(&sys->hw);
+}
+
static const struct clk_ops system_ops = {
.prepare = clk_system_prepare,
.unprepare = clk_system_unprepare,
.is_prepared = clk_system_is_prepared,
+ .save_context = clk_system_save_context,
+ .restore_context = clk_system_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 31d5c45e30d7..b0696a928aa9 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -24,6 +24,7 @@
struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
u32 usbs_mask;
u8 num_parents;
};
@@ -148,12 +149,38 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int at91sam9x5_usb_save_context(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ usb->pms.parent = at91sam9x5_clk_usb_get_parent(hw);
+ usb->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ usb->pms.rate = at91sam9x5_clk_usb_recalc_rate(hw, usb->pms.parent_rate);
+
+ return 0;
+}
+
+static void at91sam9x5_usb_restore_context(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ int ret;
+
+ ret = at91sam9x5_clk_usb_set_parent(hw, usb->pms.parent);
+ if (ret)
+ return;
+
+ at91sam9x5_clk_usb_set_rate(hw, usb->pms.rate, usb->pms.parent_rate);
+}
+
static const struct clk_ops at91sam9x5_usb_ops = {
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
.determine_rate = at91sam9x5_clk_usb_determine_rate,
.get_parent = at91sam9x5_clk_usb_get_parent,
.set_parent = at91sam9x5_clk_usb_set_parent,
.set_rate = at91sam9x5_clk_usb_set_rate,
+ .save_context = at91sam9x5_usb_save_context,
+ .restore_context = at91sam9x5_usb_restore_context,
};
static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index df9f3fc3b6a6..a22c10d9a1b9 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -23,6 +23,7 @@ struct clk_utmi {
struct clk_hw hw;
struct regmap *regmap_pmc;
struct regmap *regmap_sfr;
+ struct at91_clk_pms pms;
};
#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
@@ -113,11 +114,30 @@ static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
return UTMI_RATE;
}
+static int clk_utmi_save_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ utmi->pms.status = clk_utmi_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_utmi_restore_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ if (utmi->pms.status)
+ clk_utmi_prepare(hw);
+}
+
static const struct clk_ops utmi_ops = {
.prepare = clk_utmi_prepare,
.unprepare = clk_utmi_unprepare,
.is_prepared = clk_utmi_is_prepared,
.recalc_rate = clk_utmi_recalc_rate,
+ .save_context = clk_utmi_save_context,
+ .restore_context = clk_utmi_restore_context,
};
static struct clk_hw * __init
@@ -232,10 +252,29 @@ static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw)
return 0;
}
+static int clk_utmi_sama7g5_save_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ utmi->pms.status = clk_utmi_sama7g5_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_utmi_sama7g5_restore_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ if (utmi->pms.status)
+ clk_utmi_sama7g5_prepare(hw);
+}
+
static const struct clk_ops sama7g5_utmi_ops = {
.prepare = clk_utmi_sama7g5_prepare,
.is_prepared = clk_utmi_sama7g5_is_prepared,
.recalc_rate = clk_utmi_recalc_rate,
+ .save_context = clk_utmi_sama7g5_save_context,
+ .restore_context = clk_utmi_sama7g5_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a97b99c2dc12..ca2dbb65b9df 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -399,7 +399,7 @@ of_at91_clk_master_setup(struct device_node *np,
hw = at91_clk_register_master_div(regmap, name, "masterck_pres",
layout, characteristics,
- &mck_lock, CLK_SET_RATE_GATE);
+ &mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto out_free_characteristics;
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 20ee9dccee78..5aa9c1f1c886 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -3,10 +3,12 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -14,8 +16,6 @@
#include <asm/proc-fns.h>
-#include <dt-bindings/clock/at91.h>
-
#include "pmc.h"
#define PMC_MAX_IDS 128
@@ -111,151 +111,46 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
}
#ifdef CONFIG_PM
-static struct regmap *pmcreg;
-static u8 registered_ids[PMC_MAX_IDS];
-static u8 registered_pcks[PMC_MAX_PCKS];
+/* Address in SECURAM that say if we suspend to backup mode. */
+static void __iomem *at91_pmc_backup_suspend;
-static struct
+static int at91_pmc_suspend(void)
{
- u32 scsr;
- u32 pcsr0;
- u32 uckr;
- u32 mor;
- u32 mcfr;
- u32 pllar;
- u32 mckr;
- u32 usb;
- u32 imr;
- u32 pcsr1;
- u32 pcr[PMC_MAX_IDS];
- u32 audio_pll0;
- u32 audio_pll1;
- u32 pckr[PMC_MAX_PCKS];
-} pmc_cache;
+ unsigned int backup;
-/*
- * As Peripheral ID 0 is invalid on AT91 chips, the identifier is stored
- * without alteration in the table, and 0 is for unused clocks.
- */
-void pmc_register_id(u8 id)
-{
- int i;
-
- for (i = 0; i < PMC_MAX_IDS; i++) {
- if (registered_ids[i] == 0) {
- registered_ids[i] = id;
- break;
- }
- if (registered_ids[i] == id)
- break;
- }
-}
+ if (!at91_pmc_backup_suspend)
+ return 0;
-/*
- * As Programmable Clock 0 is valid on AT91 chips, there is an offset
- * of 1 between the stored value and the real clock ID.
- */
-void pmc_register_pck(u8 pck)
-{
- int i;
-
- for (i = 0; i < PMC_MAX_PCKS; i++) {
- if (registered_pcks[i] == 0) {
- registered_pcks[i] = pck + 1;
- break;
- }
- if (registered_pcks[i] == (pck + 1))
- break;
- }
-}
-
-static int pmc_suspend(void)
-{
- int i;
- u8 num;
-
- regmap_read(pmcreg, AT91_PMC_SCSR, &pmc_cache.scsr);
- regmap_read(pmcreg, AT91_PMC_PCSR, &pmc_cache.pcsr0);
- regmap_read(pmcreg, AT91_CKGR_UCKR, &pmc_cache.uckr);
- regmap_read(pmcreg, AT91_CKGR_MOR, &pmc_cache.mor);
- regmap_read(pmcreg, AT91_CKGR_MCFR, &pmc_cache.mcfr);
- regmap_read(pmcreg, AT91_CKGR_PLLAR, &pmc_cache.pllar);
- regmap_read(pmcreg, AT91_PMC_MCKR, &pmc_cache.mckr);
- regmap_read(pmcreg, AT91_PMC_USB, &pmc_cache.usb);
- regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.imr);
- regmap_read(pmcreg, AT91_PMC_PCSR1, &pmc_cache.pcsr1);
-
- for (i = 0; registered_ids[i]; i++) {
- regmap_write(pmcreg, AT91_PMC_PCR,
- (registered_ids[i] & AT91_PMC_PCR_PID_MASK));
- regmap_read(pmcreg, AT91_PMC_PCR,
- &pmc_cache.pcr[registered_ids[i]]);
- }
- for (i = 0; registered_pcks[i]; i++) {
- num = registered_pcks[i] - 1;
- regmap_read(pmcreg, AT91_PMC_PCKR(num), &pmc_cache.pckr[num]);
- }
+ backup = readl_relaxed(at91_pmc_backup_suspend);
+ if (!backup)
+ return 0;
- return 0;
+ return clk_save_context();
}
-static bool pmc_ready(unsigned int mask)
+static void at91_pmc_resume(void)
{
- unsigned int status;
+ unsigned int backup;
- regmap_read(pmcreg, AT91_PMC_SR, &status);
+ if (!at91_pmc_backup_suspend)
+ return;
- return ((status & mask) == mask) ? 1 : 0;
-}
+ backup = readl_relaxed(at91_pmc_backup_suspend);
+ if (!backup)
+ return;
-static void pmc_resume(void)
-{
- int i;
- u8 num;
- u32 tmp;
- u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA;
-
- regmap_read(pmcreg, AT91_PMC_MCKR, &tmp);
- if (pmc_cache.mckr != tmp)
- pr_warn("MCKR was not configured properly by the firmware\n");
- regmap_read(pmcreg, AT91_CKGR_PLLAR, &tmp);
- if (pmc_cache.pllar != tmp)
- pr_warn("PLLAR was not configured properly by the firmware\n");
-
- regmap_write(pmcreg, AT91_PMC_SCER, pmc_cache.scsr);
- regmap_write(pmcreg, AT91_PMC_PCER, pmc_cache.pcsr0);
- regmap_write(pmcreg, AT91_CKGR_UCKR, pmc_cache.uckr);
- regmap_write(pmcreg, AT91_CKGR_MOR, pmc_cache.mor);
- regmap_write(pmcreg, AT91_CKGR_MCFR, pmc_cache.mcfr);
- regmap_write(pmcreg, AT91_PMC_USB, pmc_cache.usb);
- regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.imr);
- regmap_write(pmcreg, AT91_PMC_PCER1, pmc_cache.pcsr1);
-
- for (i = 0; registered_ids[i]; i++) {
- regmap_write(pmcreg, AT91_PMC_PCR,
- pmc_cache.pcr[registered_ids[i]] |
- AT91_PMC_PCR_CMD);
- }
- for (i = 0; registered_pcks[i]; i++) {
- num = registered_pcks[i] - 1;
- regmap_write(pmcreg, AT91_PMC_PCKR(num), pmc_cache.pckr[num]);
- }
-
- if (pmc_cache.uckr & AT91_PMC_UPLLEN)
- mask |= AT91_PMC_LOCKU;
-
- while (!pmc_ready(mask))
- cpu_relax();
+ clk_restore_context();
}
static struct syscore_ops pmc_syscore_ops = {
- .suspend = pmc_suspend,
- .resume = pmc_resume,
+ .suspend = at91_pmc_suspend,
+ .resume = at91_pmc_resume,
};
-static const struct of_device_id sama5d2_pmc_dt_ids[] = {
+static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "microchip,sama7g5-pmc", },
{ /* sentinel */ }
};
@@ -263,14 +158,31 @@ static int __init pmc_register_ops(void)
{
struct device_node *np;
- np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
+ np = of_find_matching_node(NULL, pmc_dt_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
if (!np)
return -ENODEV;
- pmcreg = device_node_to_regmap(np);
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
of_node_put(np);
- if (IS_ERR(pmcreg))
- return PTR_ERR(pmcreg);
+
+ at91_pmc_backup_suspend = of_iomap(np, 0);
+ if (!at91_pmc_backup_suspend) {
+ pr_warn("%s(): unable to map securam\n", __func__);
+ return -ENOMEM;
+ }
register_syscore_ops(&pmc_syscore_ops);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index a49076c804a9..3a1bf6194c28 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -13,6 +13,8 @@
#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <dt-bindings/clock/at91.h>
+
extern spinlock_t pmc_pcr_lock;
struct pmc_data {
@@ -98,6 +100,20 @@ struct clk_pcr_layout {
u32 pid_mask;
};
+/**
+ * struct at91_clk_pms - Power management state for AT91 clock
+ * @rate: clock rate
+ * @parent_rate: clock parent rate
+ * @status: clock status (enabled or disabled)
+ * @parent: clock parent index
+ */
+struct at91_clk_pms {
+ unsigned long rate;
+ unsigned long parent_rate;
+ unsigned int status;
+ unsigned int parent;
+};
+
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
@@ -166,7 +182,7 @@ at91_clk_register_master_div(struct regmap *regmap, const char *name,
const char *parent_names,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
- spinlock_t *lock, u32 flags);
+ spinlock_t *lock, u32 flags, u32 safe_div);
struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
@@ -198,7 +214,8 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, u32 flags);
+ const struct clk_pll_layout *layout, u32 flags,
+ u32 safe_div);
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
@@ -248,12 +265,4 @@ struct clk_hw * __init
at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name,
const char *parent_name);
-#ifdef CONFIG_PM
-void pmc_register_id(u8 id);
-void pmc_register_pck(u8 pck);
-#else
-static inline void pmc_register_id(u8 id) {}
-static inline void pmc_register_pck(u8 pck) {}
-#endif
-
#endif /* __PMC_H_ */
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 5f6fa89571b7..5c264185f261 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -242,7 +242,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
* This feeds CPU. It should not
* be disabled.
*/
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE);
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
@@ -260,7 +260,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
&pll_div_layout,
CLK_SET_RATE_GATE |
CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT);
+ CLK_SET_RATE_PARENT, 0);
if (IS_ERR(hw))
goto err_free;
@@ -279,7 +279,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_master_div(regmap, "masterck_div",
"masterck_pres", &sam9x60_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 3d1f78176c3e..d027294a0089 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -249,7 +249,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index d376257807d2..339d0f382ff0 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -184,7 +184,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 5cbaac68da44..4af75b1e39e9 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -199,7 +199,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index cf8c079aa086..369dfafabbca 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -127,6 +127,8 @@ static const struct clk_pll_characteristics pll_characteristics = {
* @t: clock type
* @f: clock flags
* @eid: export index in sama7g5->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
*/
static const struct {
const char *n;
@@ -136,6 +138,7 @@ static const struct {
unsigned long f;
u8 t;
u8 eid;
+ u8 safe_div;
} sama7g5_plls[][PLL_ID_MAX] = {
[PLL_ID_CPU] = {
{ .n = "cpupll_fracck",
@@ -156,7 +159,12 @@ static const struct {
.t = PLL_TYPE_DIV,
/* This feeds CPU. It should not be disabled. */
.f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .eid = PMC_CPUPLL, },
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15, },
},
[PLL_ID_SYS] = {
@@ -377,6 +385,7 @@ static const struct {
u8 id;
} sama7g5_periphck[] = {
{ .n = "pioA_clk", .p = "mck0", .id = 11, },
+ { .n = "securam_clk", .p = "mck0", .id = 18, },
{ .n = "sfr_clk", .p = "mck1", .id = 19, },
{ .n = "hsmc_clk", .p = "mck1", .id = 21, },
{ .n = "xdmac0_clk", .p = "mck1", .id = 22, },
@@ -841,7 +850,7 @@ static const struct {
/* MCK0 characteristics. */
static const struct clk_master_characteristics mck0_characteristics = {
- .output = { .min = 50000000, .max = 200000000 },
+ .output = { .min = 32768, .max = 200000000 },
.divisors = { 1, 2, 4, 3, 5 },
.have_div3_pres = 1,
};
@@ -966,7 +975,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_plls[i][j].p, i,
sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
- sama7g5_plls[i][j].f);
+ sama7g5_plls[i][j].f,
+ sama7g5_plls[i][j].safe_div);
break;
default:
@@ -982,18 +992,9 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
}
parent_names[0] = "cpupll_divpmcck";
- hw = at91_clk_register_master_pres(regmap, "cpuck", 1, parent_names,
- &mck0_layout, &mck0_characteristics,
- &pmc_mck0_lock,
- CLK_SET_RATE_PARENT, 0);
- if (IS_ERR(hw))
- goto err_free;
-
- sama7g5_pmc->chws[PMC_CPU] = hw;
-
- hw = at91_clk_register_master_div(regmap, "mck0", "cpuck",
+ hw = at91_clk_register_master_div(regmap, "mck0", "cpupll_divpmcck",
&mck0_layout, &mck0_characteristics,
- &pmc_mck0_lock, 0);
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 0506046a5f4b..c04ae0e7e4b4 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -42,6 +42,29 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
+static int clk_composite_determine_rate_for_parent(struct clk_hw *rate_hw,
+ struct clk_rate_request *req,
+ struct clk_hw *parent_hw,
+ const struct clk_ops *rate_ops)
+{
+ long rate;
+
+ req->best_parent_hw = parent_hw;
+ req->best_parent_rate = clk_hw_get_rate(parent_hw);
+
+ if (rate_ops->determine_rate)
+ return rate_ops->determine_rate(rate_hw, req);
+
+ rate = rate_ops->round_rate(rate_hw, req->rate,
+ &req->best_parent_rate);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+
+ return 0;
+}
+
static int clk_composite_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -51,54 +74,56 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
struct clk_hw *rate_hw = composite->rate_hw;
struct clk_hw *mux_hw = composite->mux_hw;
struct clk_hw *parent;
- unsigned long parent_rate;
- long tmp_rate, best_rate = 0;
unsigned long rate_diff;
unsigned long best_rate_diff = ULONG_MAX;
- long rate;
- int i;
+ unsigned long best_rate = 0;
+ int i, ret;
- if (rate_hw && rate_ops && rate_ops->determine_rate) {
- __clk_hw_set_clk(rate_hw, hw);
- return rate_ops->determine_rate(rate_hw, req);
- } else if (rate_hw && rate_ops && rate_ops->round_rate &&
- mux_hw && mux_ops && mux_ops->set_parent) {
+ if (rate_hw && rate_ops &&
+ (rate_ops->determine_rate || rate_ops->round_rate) &&
+ mux_hw && mux_ops && mux_ops->set_parent) {
req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
+ struct clk_rate_request tmp_req = *req;
+
parent = clk_hw_get_parent(mux_hw);
- req->best_parent_hw = parent;
- req->best_parent_rate = clk_hw_get_rate(parent);
- rate = rate_ops->round_rate(rate_hw, req->rate,
- &req->best_parent_rate);
- if (rate < 0)
- return rate;
+ ret = clk_composite_determine_rate_for_parent(rate_hw,
+ &tmp_req,
+ parent,
+ rate_ops);
+ if (ret)
+ return ret;
+
+ req->rate = tmp_req.rate;
+ req->best_parent_rate = tmp_req.best_parent_rate;
- req->rate = rate;
return 0;
}
for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
+ struct clk_rate_request tmp_req = *req;
+
parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent)
continue;
- parent_rate = clk_hw_get_rate(parent);
-
- tmp_rate = rate_ops->round_rate(rate_hw, req->rate,
- &parent_rate);
- if (tmp_rate < 0)
+ ret = clk_composite_determine_rate_for_parent(rate_hw,
+ &tmp_req,
+ parent,
+ rate_ops);
+ if (ret)
continue;
- rate_diff = abs(req->rate - tmp_rate);
+ rate_diff = abs(req->rate - tmp_req.rate);
if (!rate_diff || !req->best_parent_hw
|| best_rate_diff > rate_diff) {
req->best_parent_hw = parent;
- req->best_parent_rate = parent_rate;
+ req->best_parent_rate = tmp_req.best_parent_rate;
best_rate_diff = rate_diff;
- best_rate = tmp_rate;
+ best_rate = tmp_req.rate;
}
if (!rate_diff)
@@ -107,6 +132,9 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
req->rate = best_rate;
return 0;
+ } else if (rate_hw && rate_ops && rate_ops->determine_rate) {
+ __clk_hw_set_clk(rate_hw, hw);
+ return rate_ops->determine_rate(rate_hw, req);
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->determine_rate(mux_hw, req);
@@ -362,6 +390,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
return ERR_CAST(hw);
return hw->clk;
}
+EXPORT_SYMBOL_GPL(clk_register_composite);
struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
const struct clk_parent_data *parent_data,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 65508eb89ec9..f467d63bbf1e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3108,7 +3108,10 @@ static int clk_rate_get(void *data, u64 *val)
{
struct clk_core *core = data;
- *val = core->rate;
+ clk_prepare_lock();
+ *val = clk_core_get_rate_recalc(core);
+ clk_prepare_unlock();
+
return 0;
}
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 47d9ec3abd2f..c08edbd04d22 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -98,3 +98,10 @@ config CLK_IMX8QXP
select MXC_CLK_SCU
help
Build the driver for IMX8QXP SCU based clocks.
+
+config CLK_IMX8ULP
+ tristate "IMX8ULP CCM Clock Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
+ help
+ Build the driver for i.MX8ULP CCM Clock Driver
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index c24a2acbfa56..b5e040026dfb 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -31,6 +31,8 @@ clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o \
clk-imx8qxp-rsrc.o clk-imx8qm-rsrc.o
clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
+obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp.o
+
obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
obj-$(CONFIG_CLK_IMX27) += clk-imx27.o
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index d85ba78abbb1..4eedd45dbaa8 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include "../clk-fractional-divider.h"
@@ -23,17 +24,61 @@
#define PCG_PCD_WIDTH 3
#define PCG_PCD_MASK 0x7
-struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
+#define SW_RST BIT(28)
+
+static int pcc_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = clk_gate_ops.enable(hw);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(gate->lock, flags);
+ /*
+ * release the sw reset for peripherals associated with
+ * with this pcc clock.
+ */
+ val = readl(gate->reg);
+ val |= SW_RST;
+ writel(val, gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void pcc_gate_disable(struct clk_hw *hw)
+{
+ clk_gate_ops.disable(hw);
+}
+
+static int pcc_gate_is_enabled(struct clk_hw *hw)
+{
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops pcc_gate_ops = {
+ .enable = pcc_gate_enable,
+ .disable = pcc_gate_disable,
+ .is_enabled = pcc_gate_is_enabled,
+};
+
+static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, bool mux_present,
bool rate_present, bool gate_present,
- void __iomem *reg)
+ void __iomem *reg, bool has_swrst)
{
struct clk_hw *mux_hw = NULL, *fd_hw = NULL, *gate_hw = NULL;
struct clk_fractional_divider *fd = NULL;
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
struct clk_hw *hw;
+ u32 val;
if (mux_present) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
@@ -43,6 +88,8 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
mux->reg = reg;
mux->shift = PCG_PCS_SHIFT;
mux->mask = PCG_PCS_MASK;
+ if (has_swrst)
+ mux->lock = &imx_ccm_lock;
}
if (rate_present) {
@@ -60,6 +107,8 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
fd->nwidth = PCG_PCD_WIDTH;
fd->nmask = PCG_PCD_MASK;
fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED;
+ if (has_swrst)
+ fd->lock = &imx_ccm_lock;
}
if (gate_present) {
@@ -72,13 +121,27 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
gate_hw = &gate->hw;
gate->reg = reg;
gate->bit_idx = PCG_CGC_SHIFT;
+ if (has_swrst)
+ gate->lock = &imx_ccm_lock;
+ /*
+ * make sure clock is gated during clock tree initialization,
+ * the HW ONLY allow clock parent/rate changed with clock gated,
+ * during clock tree initialization, clocks could be enabled
+ * by bootloader, so the HW status will mismatch with clock tree
+ * prepare count, then clock core driver will allow parent/rate
+ * change since the prepare count is zero, but HW actually
+ * prevent the parent/rate change due to the clock is enabled.
+ */
+ val = readl_relaxed(reg);
+ val &= ~(1 << PCG_CGC_SHIFT);
+ writel_relaxed(val, reg);
}
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ops, fd_hw,
&clk_fractional_divider_ops, gate_hw,
- &clk_gate_ops, CLK_SET_RATE_GATE |
- CLK_SET_PARENT_GATE);
+ has_swrst ? &pcc_gate_ops : &clk_gate_ops, CLK_SET_RATE_GATE |
+ CLK_SET_PARENT_GATE | CLK_SET_RATE_NO_REPARENT);
if (IS_ERR(hw)) {
kfree(mux);
kfree(fd);
@@ -87,3 +150,20 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
return hw;
}
+
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name, const char * const *parent_names,
+ int num_parents, bool mux_present, bool rate_present,
+ bool gate_present, void __iomem *reg)
+{
+ return imx_ulp_clk_hw_composite(name, parent_names, num_parents, mux_present, rate_present,
+ gate_present, reg, false);
+}
+
+struct clk_hw *imx8ulp_clk_hw_composite(const char *name, const char * const *parent_names,
+ int num_parents, bool mux_present, bool rate_present,
+ bool gate_present, void __iomem *reg, bool has_swrst)
+{
+ return imx_ulp_clk_hw_composite(name, parent_names, num_parents, mux_present, rate_present,
+ gate_present, reg, has_swrst);
+}
+EXPORT_SYMBOL_GPL(imx8ulp_clk_hw_composite);
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 04e728538cef..2dfd6149e528 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -171,7 +171,7 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
.determine_rate = imx8m_clk_composite_mux_determine_rate,
};
-struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
u32 composite_flags,
@@ -246,4 +246,4 @@ fail:
kfree(mux);
return ERR_CAST(hw);
}
-EXPORT_SYMBOL_GPL(imx8m_clk_hw_composite_flags);
+EXPORT_SYMBOL_GPL(__imx8m_clk_hw_composite);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 5dbb6a937732..520b100bff4b 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -161,7 +161,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6UL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6UL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT);
/* Do not bypass PLLs initially */
clk_set_parent(hws[IMX6UL_PLL1_BYPASS]->clk, hws[IMX6UL_CLK_PLL1]->clk);
@@ -270,6 +269,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
hws[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT);
hws[IMX6UL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+ hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels));
hws[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
hws[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
@@ -380,7 +380,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6ULL_CLK_ESAI_IPG] = imx_clk_hw_gate2_shared("esai_ipg", "ahb", base + 0x70, 0, &share_count_esai);
hws[IMX6ULL_CLK_ESAI_MEM] = imx_clk_hw_gate2_shared("esai_mem", "ahb", base + 0x70, 0, &share_count_esai);
}
- hws[IMX6UL_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x70, 2);
hws[IMX6UL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6);
hws[IMX6UL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8);
hws[IMX6UL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10);
@@ -391,6 +390,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_PXP] = imx_clk_hw_gate2("pxp", "axi", base + 0x70, 30);
/* CCGR3 */
+ /*
+ * Although the imx6ull reference manual lists CCGR2 as the csi clk
+ * gate register, tests have shown that it is actually the CCGR3
+ * register bit 0/1, same as for the imx6ul.
+ */
+ hws[IMX6UL_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x74, 0);
hws[IMX6UL_CLK_UART5_IPG] = imx_clk_hw_gate2("uart5_ipg", "ipg", base + 0x74, 2);
hws[IMX6UL_CLK_UART5_SERIAL] = imx_clk_hw_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
if (clk_on_imx6ul()) {
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 779e09105da7..b6e45e77ee39 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -78,20 +78,20 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
hws[IMX7ULP_CLK_SPLL_PRE_DIV] = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608, 8, 3, CLK_SET_RATE_GATE);
/* name parent_name base */
- hws[IMX7ULP_CLK_APLL] = imx_clk_hw_pllv4("apll", "apll_pre_div", base + 0x500);
- hws[IMX7ULP_CLK_SPLL] = imx_clk_hw_pllv4("spll", "spll_pre_div", base + 0x600);
+ hws[IMX7ULP_CLK_APLL] = imx_clk_hw_pllv4(IMX_PLLV4_IMX7ULP, "apll", "apll_pre_div", base + 0x500);
+ hws[IMX7ULP_CLK_SPLL] = imx_clk_hw_pllv4(IMX_PLLV4_IMX7ULP, "spll", "spll_pre_div", base + 0x600);
/* APLL PFDs */
- hws[IMX7ULP_CLK_APLL_PFD0] = imx_clk_hw_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
- hws[IMX7ULP_CLK_APLL_PFD1] = imx_clk_hw_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
- hws[IMX7ULP_CLK_APLL_PFD2] = imx_clk_hw_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
- hws[IMX7ULP_CLK_APLL_PFD3] = imx_clk_hw_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
+ hws[IMX7ULP_CLK_APLL_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd0", "apll", base + 0x50c, 0);
+ hws[IMX7ULP_CLK_APLL_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd1", "apll", base + 0x50c, 1);
+ hws[IMX7ULP_CLK_APLL_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd2", "apll", base + 0x50c, 2);
+ hws[IMX7ULP_CLK_APLL_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd3", "apll", base + 0x50c, 3);
/* SPLL PFDs */
- hws[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_hw_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
- hws[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_hw_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
- hws[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_hw_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
- hws[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_hw_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
+ hws[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd0", "spll", base + 0x60C, 0);
+ hws[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd1", "spll", base + 0x60C, 1);
+ hws[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd2", "spll", base + 0x60C, 2);
+ hws[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd3", "spll", base + 0x60C, 3);
/* PLL Mux */
hws[IMX7ULP_CLK_APLL_PFD_SEL] = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
new file mode 100644
index 000000000000..6699437e17b8
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8ulp.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <dt-bindings/clock/imx8ulp-clock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+static const char * const pll_pre_sels[] = { "sosc", "frosc", };
+static const char * const a35_sels[] = { "frosc", "spll2", "sosc", "lvds", };
+static const char * const nic_sels[] = { "frosc", "spll3_pfd0", "sosc", "lvds", };
+static const char * const pcc3_periph_bus_sels[] = { "dummy", "lposc", "sosc_div2",
+ "frosc_div2", "xbar_divbus", "spll3_pfd1_div1",
+ "spll3_pfd0_div2", "spll3_pfd0_div1", };
+static const char * const pcc4_periph_bus_sels[] = { "dummy", "dummy", "lposc",
+ "sosc_div2", "frosc_div2", "xbar_divbus",
+ "spll3_vcodiv", "spll3_pfd0_div1", };
+static const char * const pcc4_periph_plat_sels[] = { "dummy", "sosc_div1", "frosc_div1",
+ "spll3_pfd3_div2", "spll3_pfd3_div1",
+ "spll3_pfd2_div2", "spll3_pfd2_div1",
+ "spll3_pfd1_div2", };
+static const char * const pcc5_periph_bus_sels[] = { "dummy", "dummy", "lposc",
+ "sosc_div2", "frosc_div2", "lpav_bus_clk",
+ "pll4_vcodiv", "pll4_pfd3_div1", };
+static const char * const pcc5_periph_plat_sels[] = { "dummy", "pll4_pfd3_div2", "pll4_pfd2_div2",
+ "pll4_pfd2_div1", "pll4_pfd1_div2",
+ "pll4_pfd1_div1", "pll4_pfd0_div2",
+ "pll4_pfd0_div1", };
+static const char * const hifi_sels[] = { "frosc", "pll4", "pll4_pfd0", "sosc",
+ "lvds", "dummy", "dummy", "dummy", };
+static const char * const ddr_sels[] = { "frosc", "pll4_pfd1", "sosc", "lvds",
+ "pll4", "pll4", "pll4", "pll4", };
+static const char * const lpav_sels[] = { "frosc", "pll4_pfd1", "sosc", "lvds", };
+static const char * const sai45_sels[] = { "spll3_pfd1_div1", "aud_clk1", "aud_clk2", "sosc", };
+static const char * const sai67_sels[] = { "spll1_pfd2_div", "spll3_pfd1_div1", "aud_clk0", "aud_clk1", "aud_clk2", "sosc", "dummy", "dummy", };
+static const char * const aud_clk1_sels[] = { "ext_aud_mclk2", "sai4_rx_bclk", "sai4_tx_bclk", "sai5_rx_bclk", "sai5_tx_bclk", "dummy", "dummy", "dummy", };
+static const char * const aud_clk2_sels[] = { "ext_aud_mclk3", "sai6_rx_bclk", "sai6_tx_bclk", "sai7_rx_bclk", "sai7_tx_bclk", "spdif_rx", "dummy", "dummy", };
+static const char * const enet_ts_sels[] = { "ext_rmii_clk", "ext_ts_clk", "rosc", "ext_aud_mclk", "sosc", "dummy", "dummy", "dummy"};
+static const char * const xbar_divbus[] = { "xbar_divbus" };
+static const char * const nic_per_divplat[] = { "nic_per_divplat" };
+static const char * const lpav_axi_div[] = { "lpav_axi_div" };
+static const char * const lpav_bus_div[] = { "lpav_bus_div" };
+
+struct pcc_reset_dev {
+ void __iomem *base;
+ struct reset_controller_dev rcdev;
+ const u32 *resets;
+ /* Set to imx_ccm_lock to protect register access shared with clock control */
+ spinlock_t *lock;
+};
+
+#define PCC_SW_RST BIT(28)
+#define to_pcc_reset_dev(_rcdev) container_of(_rcdev, struct pcc_reset_dev, rcdev)
+
+static const u32 pcc3_resets[] = {
+ 0xa8, 0xac, 0xc8, 0xcc, 0xd0,
+ 0xd4, 0xd8, 0xdc, 0xe0, 0xe4,
+ 0xe8, 0xec, 0xf0
+};
+
+static const u32 pcc4_resets[] = {
+ 0x4, 0x8, 0xc, 0x10, 0x14,
+ 0x18, 0x1c, 0x20, 0x24, 0x34,
+ 0x38, 0x3c, 0x40, 0x44, 0x48,
+ 0x4c, 0x54
+};
+
+static const u32 pcc5_resets[] = {
+ 0xa0, 0xa4, 0xa8, 0xac, 0xb0,
+ 0xb4, 0xbc, 0xc0, 0xc8, 0xcc,
+ 0xd0, 0xf0, 0xf4, 0xf8
+};
+
+static int imx8ulp_pcc_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct pcc_reset_dev *pcc_reset = to_pcc_reset_dev(rcdev);
+ u32 offset = pcc_reset->resets[id];
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(pcc_reset->lock, flags);
+
+ val = readl(pcc_reset->base + offset);
+ val &= ~PCC_SW_RST;
+ writel(val, pcc_reset->base + offset);
+
+ spin_unlock_irqrestore(pcc_reset->lock, flags);
+
+ return 0;
+}
+
+static int imx8ulp_pcc_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct pcc_reset_dev *pcc_reset = to_pcc_reset_dev(rcdev);
+ u32 offset = pcc_reset->resets[id];
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(pcc_reset->lock, flags);
+
+ val = readl(pcc_reset->base + offset);
+ val |= PCC_SW_RST;
+ writel(val, pcc_reset->base + offset);
+
+ spin_unlock_irqrestore(pcc_reset->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops imx8ulp_pcc_reset_ops = {
+ .assert = imx8ulp_pcc_assert,
+ .deassert = imx8ulp_pcc_deassert,
+};
+
+static int imx8ulp_pcc_reset_init(struct platform_device *pdev, void __iomem *base,
+ const u32 *resets, unsigned int nr_resets)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct pcc_reset_dev *pcc_reset;
+
+ pcc_reset = devm_kzalloc(dev, sizeof(*pcc_reset), GFP_KERNEL);
+ if (!pcc_reset)
+ return -ENOMEM;
+
+ pcc_reset->base = base;
+ pcc_reset->lock = &imx_ccm_lock;
+ pcc_reset->resets = resets;
+ pcc_reset->rcdev.owner = THIS_MODULE;
+ pcc_reset->rcdev.nr_resets = nr_resets;
+ pcc_reset->rcdev.ops = &imx8ulp_pcc_reset_ops;
+ pcc_reset->rcdev.of_node = np;
+
+ return devm_reset_controller_register(dev, &pcc_reset->rcdev);
+}
+
+static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_CGC1_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_CGC1_END;
+ clks = clk_data->hws;
+
+ clks[IMX8ULP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+
+ /* CGC1 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_SPLL2_PRE_SEL] = imx_clk_hw_mux_flags("spll2_pre_sel", base + 0x510, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ clks[IMX8ULP_CLK_SPLL3_PRE_SEL] = imx_clk_hw_mux_flags("spll3_pre_sel", base + 0x610, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+
+ clks[IMX8ULP_CLK_SPLL2] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll2", "spll2_pre_sel", base + 0x500);
+ clks[IMX8ULP_CLK_SPLL3] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll3", "spll3_pre_sel", base + 0x600);
+ clks[IMX8ULP_CLK_SPLL3_VCODIV] = imx_clk_hw_divider("spll3_vcodiv", "spll3", base + 0x604, 0, 6);
+
+ clks[IMX8ULP_CLK_SPLL3_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd0", "spll3_vcodiv", base + 0x614, 0);
+ clks[IMX8ULP_CLK_SPLL3_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd1", "spll3_vcodiv", base + 0x614, 1);
+ clks[IMX8ULP_CLK_SPLL3_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd2", "spll3_vcodiv", base + 0x614, 2);
+ clks[IMX8ULP_CLK_SPLL3_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd3", "spll3_vcodiv", base + 0x614, 3);
+
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd0_div1_gate", "spll3_pfd0", base + 0x608, 7);
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd0_div2_gate", "spll3_pfd0", base + 0x608, 15);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd1_div1_gate", "spll3_pfd1", base + 0x608, 23);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd1_div2_gate", "spll3_pfd1", base + 0x608, 31);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd2_div1_gate", "spll3_pfd2", base + 0x60c, 7);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd2_div2_gate", "spll3_pfd2", base + 0x60c, 15);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd3_div1_gate", "spll3_pfd3", base + 0x60c, 23);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd3_div2_gate", "spll3_pfd3", base + 0x60c, 31);
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV1] = imx_clk_hw_divider("spll3_pfd0_div1", "spll3_pfd0_div1_gate", base + 0x608, 0, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV2] = imx_clk_hw_divider("spll3_pfd0_div2", "spll3_pfd0_div2_gate", base + 0x608, 8, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV1] = imx_clk_hw_divider("spll3_pfd1_div1", "spll3_pfd1_div1_gate", base + 0x608, 16, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV2] = imx_clk_hw_divider("spll3_pfd1_div2", "spll3_pfd1_div2_gate", base + 0x608, 24, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV1] = imx_clk_hw_divider("spll3_pfd2_div1", "spll3_pfd2_div1_gate", base + 0x60c, 0, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV2] = imx_clk_hw_divider("spll3_pfd2_div2", "spll3_pfd2_div2_gate", base + 0x60c, 8, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV1] = imx_clk_hw_divider("spll3_pfd3_div1", "spll3_pfd3_div1_gate", base + 0x60c, 16, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV2] = imx_clk_hw_divider("spll3_pfd3_div2", "spll3_pfd3_div2_gate", base + 0x60c, 24, 6);
+
+ clks[IMX8ULP_CLK_A35_SEL] = imx_clk_hw_mux2("a35_sel", base + 0x14, 28, 2, a35_sels, ARRAY_SIZE(a35_sels));
+ clks[IMX8ULP_CLK_A35_DIV] = imx_clk_hw_divider_flags("a35_div", "a35_sel", base + 0x14, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+ clks[IMX8ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x34, 28, 2, nic_sels, ARRAY_SIZE(nic_sels));
+ clks[IMX8ULP_CLK_NIC_AD_DIVPLAT] = imx_clk_hw_divider_flags("nic_ad_divplat", "nic_sel", base + 0x34, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "nic_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "nic_ad_divplat", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+ clks[IMX8ULP_CLK_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("sosc_div1_gate", "sosc", base + 0x108, 7);
+ clks[IMX8ULP_CLK_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("sosc_div2_gate", "sosc", base + 0x108, 15);
+ clks[IMX8ULP_CLK_SOSC_DIV3_GATE] = imx_clk_hw_gate_dis("sosc_div3_gate", "sosc", base + 0x108, 23);
+ clks[IMX8ULP_CLK_SOSC_DIV1] = imx_clk_hw_divider("sosc_div1", "sosc_div1_gate", base + 0x108, 0, 6);
+ clks[IMX8ULP_CLK_SOSC_DIV2] = imx_clk_hw_divider("sosc_div2", "sosc_div2_gate", base + 0x108, 8, 6);
+ clks[IMX8ULP_CLK_SOSC_DIV3] = imx_clk_hw_divider("sosc_div3", "sosc_div3_gate", base + 0x108, 16, 6);
+
+ clks[IMX8ULP_CLK_FROSC_DIV1_GATE] = imx_clk_hw_gate_dis("frosc_div1_gate", "frosc", base + 0x208, 7);
+ clks[IMX8ULP_CLK_FROSC_DIV2_GATE] = imx_clk_hw_gate_dis("frosc_div2_gate", "frosc", base + 0x208, 15);
+ clks[IMX8ULP_CLK_FROSC_DIV3_GATE] = imx_clk_hw_gate_dis("frosc_div3_gate", "frosc", base + 0x208, 23);
+ clks[IMX8ULP_CLK_FROSC_DIV1] = imx_clk_hw_divider("frosc_div1", "frosc_div1_gate", base + 0x208, 0, 6);
+ clks[IMX8ULP_CLK_FROSC_DIV2] = imx_clk_hw_divider("frosc_div2", "frosc_div2_gate", base + 0x208, 8, 6);
+ clks[IMX8ULP_CLK_FROSC_DIV3] = imx_clk_hw_divider("frosc_div3", "frosc_div3_gate", base + 0x208, 16, 6);
+ clks[IMX8ULP_CLK_AUD_CLK1] = imx_clk_hw_mux2("aud_clk1", base + 0x900, 0, 3, aud_clk1_sels, ARRAY_SIZE(aud_clk1_sels));
+ clks[IMX8ULP_CLK_SAI4_SEL] = imx_clk_hw_mux2("sai4_sel", base + 0x904, 0, 2, sai45_sels, ARRAY_SIZE(sai45_sels));
+ clks[IMX8ULP_CLK_SAI5_SEL] = imx_clk_hw_mux2("sai5_sel", base + 0x904, 8, 2, sai45_sels, ARRAY_SIZE(sai45_sels));
+ clks[IMX8ULP_CLK_ENET_TS_SEL] = imx_clk_hw_mux2("enet_ts", base + 0x700, 24, 3, enet_ts_sels, ARRAY_SIZE(enet_ts_sels));
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+static int imx8ulp_clk_cgc2_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_CGC2_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_CGC2_END;
+ clks = clk_data->hws;
+
+ /* CGC2 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_PLL4_PRE_SEL] = imx_clk_hw_mux_flags("pll4_pre_sel", base + 0x610, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+
+ clks[IMX8ULP_CLK_PLL4] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "pll4", "pll4_pre_sel", base + 0x600);
+ clks[IMX8ULP_CLK_PLL4_VCODIV] = imx_clk_hw_divider("pll4_vcodiv", "pll4", base + 0x604, 0, 6);
+
+ clks[IMX8ULP_CLK_HIFI_SEL] = imx_clk_hw_mux_flags("hifi_sel", base + 0x14, 28, 3, hifi_sels, ARRAY_SIZE(hifi_sels), CLK_SET_PARENT_GATE);
+ clks[IMX8ULP_CLK_HIFI_DIVCORE] = imx_clk_hw_divider("hifi_core_div", "hifi_sel", base + 0x14, 21, 6);
+ clks[IMX8ULP_CLK_HIFI_DIVPLAT] = imx_clk_hw_divider("hifi_plat_div", "hifi_core_div", base + 0x14, 14, 6);
+
+ clks[IMX8ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x40, 28, 3, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_PARENT_GATE);
+ clks[IMX8ULP_CLK_DDR_DIV] = imx_clk_hw_divider_flags("ddr_div", "ddr_sel", base + 0x40, 21, 6, CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_LPAV_AXI_SEL] = imx_clk_hw_mux("lpav_sel", base + 0x3c, 28, 2, lpav_sels, ARRAY_SIZE(lpav_sels));
+ clks[IMX8ULP_CLK_LPAV_AXI_DIV] = imx_clk_hw_divider_flags("lpav_axi_div", "lpav_sel", base + 0x3c, 21, 6, CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_LPAV_AHB_DIV] = imx_clk_hw_divider_flags("lpav_ahb_div", "lpav_axi_div", base + 0x3c, 14, 6, CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_LPAV_BUS_DIV] = imx_clk_hw_divider_flags("lpav_bus_div", "lpav_axi_div", base + 0x3c, 7, 6, CLK_IS_CRITICAL);
+
+ clks[IMX8ULP_CLK_PLL4_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd0", "pll4_vcodiv", base + 0x614, 0);
+ clks[IMX8ULP_CLK_PLL4_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd1", "pll4_vcodiv", base + 0x614, 1);
+ clks[IMX8ULP_CLK_PLL4_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd2", "pll4_vcodiv", base + 0x614, 2);
+ clks[IMX8ULP_CLK_PLL4_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd3", "pll4_vcodiv", base + 0x614, 3);
+
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd0_div1_gate", "pll4_pfd0", base + 0x608, 7);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd0_div2_gate", "pll4_pfd0", base + 0x608, 15);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd1_div1_gate", "pll4_pfd1", base + 0x608, 23);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd1_div2_gate", "pll4_pfd1", base + 0x608, 31);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd2_div1_gate", "pll4_pfd2", base + 0x60c, 7);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd2_div2_gate", "pll4_pfd2", base + 0x60c, 15);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd3_div1_gate", "pll4_pfd3", base + 0x60c, 23);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd3_div2_gate", "pll4_pfd3", base + 0x60c, 31);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV1] = imx_clk_hw_divider("pll4_pfd0_div1", "pll4_pfd0_div1_gate", base + 0x608, 0, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV2] = imx_clk_hw_divider("pll4_pfd0_div2", "pll4_pfd0_div2_gate", base + 0x608, 8, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV1] = imx_clk_hw_divider("pll4_pfd1_div1", "pll4_pfd1_div1_gate", base + 0x608, 16, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV2] = imx_clk_hw_divider("pll4_pfd1_div2", "pll4_pfd1_div2_gate", base + 0x608, 24, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV1] = imx_clk_hw_divider("pll4_pfd2_div1", "pll4_pfd2_div1_gate", base + 0x60c, 0, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV2] = imx_clk_hw_divider("pll4_pfd2_div2", "pll4_pfd2_div2_gate", base + 0x60c, 8, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV1] = imx_clk_hw_divider("pll4_pfd3_div1", "pll4_pfd3_div1_gate", base + 0x60c, 16, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV2] = imx_clk_hw_divider("pll4_pfd3_div2", "pll4_pfd3_div2_gate", base + 0x60c, 24, 6);
+
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div1_gate", "sosc", base + 0x108, 7);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div2_gate", "sosc", base + 0x108, 15);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV3_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div3_gate", "sosc", base + 0x108, 23);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV1] = imx_clk_hw_divider("cgc2_sosc_div1", "cgc2_sosc_div1_gate", base + 0x108, 0, 6);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV2] = imx_clk_hw_divider("cgc2_sosc_div2", "cgc2_sosc_div2_gate", base + 0x108, 8, 6);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV3] = imx_clk_hw_divider("cgc2_sosc_div3", "cgc2_sosc_div3_gate", base + 0x108, 16, 6);
+
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV1_GATE] = imx_clk_hw_gate_dis("cgc2_frosc_div1_gate", "frosc", base + 0x208, 7);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV2_GATE] = imx_clk_hw_gate_dis("cgc2_frosc_div2_gate", "frosc", base + 0x208, 15);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV3_GATE] = imx_clk_hw_gate_dis("cgc2_frosc_div3_gate", "frosc", base + 0x208, 23);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV1] = imx_clk_hw_divider("cgc2_frosc_div1", "cgc2_frosc_div1_gate", base + 0x208, 0, 6);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV2] = imx_clk_hw_divider("cgc2_frosc_div2", "cgc2_frosc_div2_gate", base + 0x208, 8, 6);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV3] = imx_clk_hw_divider("cgc2_frosc_div3", "cgc2_frosc_div3_gate", base + 0x208, 16, 6);
+ clks[IMX8ULP_CLK_AUD_CLK2] = imx_clk_hw_mux2("aud_clk2", base + 0x900, 0, 3, aud_clk2_sels, ARRAY_SIZE(aud_clk2_sels));
+ clks[IMX8ULP_CLK_SAI6_SEL] = imx_clk_hw_mux2("sai6_sel", base + 0x904, 0, 3, sai67_sels, ARRAY_SIZE(sai67_sels));
+ clks[IMX8ULP_CLK_SAI7_SEL] = imx_clk_hw_mux2("sai7_sel", base + 0x904, 8, 3, sai67_sels, ARRAY_SIZE(sai67_sels));
+ clks[IMX8ULP_CLK_SPDIF_SEL] = imx_clk_hw_mux2("spdif_sel", base + 0x910, 0, 3, sai67_sels, ARRAY_SIZE(sai67_sels));
+ clks[IMX8ULP_CLK_DSI_PHY_REF] = imx_clk_hw_fixed("dsi_phy_ref", 24000000);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+static int imx8ulp_clk_pcc3_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_PCC3_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_PCC3_END;
+ clks = clk_data->hws;
+
+ /* PCC3 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_WDOG3] = imx8ulp_clk_hw_composite("wdog3", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xa8, 1);
+ clks[IMX8ULP_CLK_WDOG4] = imx8ulp_clk_hw_composite("wdog4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xac, 1);
+ clks[IMX8ULP_CLK_LPIT1] = imx8ulp_clk_hw_composite("lpit1", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xc8, 1);
+ clks[IMX8ULP_CLK_TPM4] = imx8ulp_clk_hw_composite("tpm4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xcc, 1);
+ clks[IMX8ULP_CLK_TPM5] = imx8ulp_clk_hw_composite("tpm5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd0, 1);
+ clks[IMX8ULP_CLK_FLEXIO1] = imx8ulp_clk_hw_composite("flexio1", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd4, 1);
+ clks[IMX8ULP_CLK_I3C2] = imx8ulp_clk_hw_composite("i3c2", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd8, 1);
+ clks[IMX8ULP_CLK_LPI2C4] = imx8ulp_clk_hw_composite("lpi2c4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xdc, 1);
+ clks[IMX8ULP_CLK_LPI2C5] = imx8ulp_clk_hw_composite("lpi2c5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xe0, 1);
+ clks[IMX8ULP_CLK_LPUART4] = imx8ulp_clk_hw_composite("lpuart4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xe4, 1);
+ clks[IMX8ULP_CLK_LPUART5] = imx8ulp_clk_hw_composite("lpuart5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xe8, 1);
+ clks[IMX8ULP_CLK_LPSPI4] = imx8ulp_clk_hw_composite("lpspi4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xec, 1);
+ clks[IMX8ULP_CLK_LPSPI5] = imx8ulp_clk_hw_composite("lpspi5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xf0, 1);
+
+ clks[IMX8ULP_CLK_DMA1_MP] = imx_clk_hw_gate("pcc_dma1_mp", "xbar_ad_divplat", base + 0x4, 30);
+ clks[IMX8ULP_CLK_DMA1_CH0] = imx_clk_hw_gate("pcc_dma1_ch0", "xbar_ad_divplat", base + 0x8, 30);
+ clks[IMX8ULP_CLK_DMA1_CH1] = imx_clk_hw_gate("pcc_dma1_ch1", "xbar_ad_divplat", base + 0xc, 30);
+ clks[IMX8ULP_CLK_DMA1_CH2] = imx_clk_hw_gate("pcc_dma1_ch2", "xbar_ad_divplat", base + 0x10, 30);
+ clks[IMX8ULP_CLK_DMA1_CH3] = imx_clk_hw_gate("pcc_dma1_ch3", "xbar_ad_divplat", base + 0x14, 30);
+ clks[IMX8ULP_CLK_DMA1_CH4] = imx_clk_hw_gate("pcc_dma1_ch4", "xbar_ad_divplat", base + 0x18, 30);
+ clks[IMX8ULP_CLK_DMA1_CH5] = imx_clk_hw_gate("pcc_dma1_ch5", "xbar_ad_divplat", base + 0x1c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH6] = imx_clk_hw_gate("pcc_dma1_ch6", "xbar_ad_divplat", base + 0x20, 30);
+ clks[IMX8ULP_CLK_DMA1_CH7] = imx_clk_hw_gate("pcc_dma1_ch7", "xbar_ad_divplat", base + 0x24, 30);
+ clks[IMX8ULP_CLK_DMA1_CH8] = imx_clk_hw_gate("pcc_dma1_ch8", "xbar_ad_divplat", base + 0x28, 30);
+ clks[IMX8ULP_CLK_DMA1_CH9] = imx_clk_hw_gate("pcc_dma1_ch9", "xbar_ad_divplat", base + 0x2c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH10] = imx_clk_hw_gate("pcc_dma1_ch10", "xbar_ad_divplat", base + 0x30, 30);
+ clks[IMX8ULP_CLK_DMA1_CH11] = imx_clk_hw_gate("pcc_dma1_ch11", "xbar_ad_divplat", base + 0x34, 30);
+ clks[IMX8ULP_CLK_DMA1_CH12] = imx_clk_hw_gate("pcc_dma1_ch12", "xbar_ad_divplat", base + 0x38, 30);
+ clks[IMX8ULP_CLK_DMA1_CH13] = imx_clk_hw_gate("pcc_dma1_ch13", "xbar_ad_divplat", base + 0x3c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH14] = imx_clk_hw_gate("pcc_dma1_ch14", "xbar_ad_divplat", base + 0x40, 30);
+ clks[IMX8ULP_CLK_DMA1_CH15] = imx_clk_hw_gate("pcc_dma1_ch15", "xbar_ad_divplat", base + 0x44, 30);
+ clks[IMX8ULP_CLK_DMA1_CH16] = imx_clk_hw_gate("pcc_dma1_ch16", "xbar_ad_divplat", base + 0x48, 30);
+ clks[IMX8ULP_CLK_DMA1_CH17] = imx_clk_hw_gate("pcc_dma1_ch17", "xbar_ad_divplat", base + 0x4c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH18] = imx_clk_hw_gate("pcc_dma1_ch18", "xbar_ad_divplat", base + 0x50, 30);
+ clks[IMX8ULP_CLK_DMA1_CH19] = imx_clk_hw_gate("pcc_dma1_ch19", "xbar_ad_divplat", base + 0x54, 30);
+ clks[IMX8ULP_CLK_DMA1_CH20] = imx_clk_hw_gate("pcc_dma1_ch20", "xbar_ad_divplat", base + 0x58, 30);
+ clks[IMX8ULP_CLK_DMA1_CH21] = imx_clk_hw_gate("pcc_dma1_ch21", "xbar_ad_divplat", base + 0x5c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH22] = imx_clk_hw_gate("pcc_dma1_ch22", "xbar_ad_divplat", base + 0x60, 30);
+ clks[IMX8ULP_CLK_DMA1_CH23] = imx_clk_hw_gate("pcc_dma1_ch23", "xbar_ad_divplat", base + 0x64, 30);
+ clks[IMX8ULP_CLK_DMA1_CH24] = imx_clk_hw_gate("pcc_dma1_ch24", "xbar_ad_divplat", base + 0x68, 30);
+ clks[IMX8ULP_CLK_DMA1_CH25] = imx_clk_hw_gate("pcc_dma1_ch25", "xbar_ad_divplat", base + 0x6c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH26] = imx_clk_hw_gate("pcc_dma1_ch26", "xbar_ad_divplat", base + 0x70, 30);
+ clks[IMX8ULP_CLK_DMA1_CH27] = imx_clk_hw_gate("pcc_dma1_ch27", "xbar_ad_divplat", base + 0x74, 30);
+ clks[IMX8ULP_CLK_DMA1_CH28] = imx_clk_hw_gate("pcc_dma1_ch28", "xbar_ad_divplat", base + 0x78, 30);
+ clks[IMX8ULP_CLK_DMA1_CH29] = imx_clk_hw_gate("pcc_dma1_ch29", "xbar_ad_divplat", base + 0x7c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH30] = imx_clk_hw_gate("pcc_dma1_ch30", "xbar_ad_divplat", base + 0x80, 30);
+ clks[IMX8ULP_CLK_DMA1_CH31] = imx_clk_hw_gate("pcc_dma1_ch31", "xbar_ad_divplat", base + 0x84, 30);
+ clks[IMX8ULP_CLK_MU0_B] = imx_clk_hw_gate("mu0_b", "xbar_ad_divplat", base + 0x88, 30);
+ clks[IMX8ULP_CLK_MU3_A] = imx_clk_hw_gate("mu3_a", "xbar_ad_divplat", base + 0x8c, 30);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ imx_register_uart_clocks(1);
+
+ /* register the pcc3 reset controller */
+ return imx8ulp_pcc_reset_init(pdev, base, pcc3_resets, ARRAY_SIZE(pcc3_resets));
+}
+
+static int imx8ulp_clk_pcc4_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_PCC4_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_PCC4_END;
+ clks = clk_data->hws;
+
+ /* PCC4 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_FLEXSPI2] = imx8ulp_clk_hw_composite("flexspi2", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, true, true, base + 0x4, 1);
+ clks[IMX8ULP_CLK_TPM6] = imx8ulp_clk_hw_composite("tpm6", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x8, 1);
+ clks[IMX8ULP_CLK_TPM7] = imx8ulp_clk_hw_composite("tpm7", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0xc, 1);
+ clks[IMX8ULP_CLK_LPI2C6] = imx8ulp_clk_hw_composite("lpi2c6", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x10, 1);
+ clks[IMX8ULP_CLK_LPI2C7] = imx8ulp_clk_hw_composite("lpi2c7", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x14, 1);
+ clks[IMX8ULP_CLK_LPUART6] = imx8ulp_clk_hw_composite("lpuart6", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x18, 1);
+ clks[IMX8ULP_CLK_LPUART7] = imx8ulp_clk_hw_composite("lpuart7", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x1c, 1);
+ clks[IMX8ULP_CLK_SAI4] = imx8ulp_clk_hw_composite("sai4", xbar_divbus, 1, false, false, true, base + 0x20, 1); /* sai ipg, NOT from sai sel */
+ clks[IMX8ULP_CLK_SAI5] = imx8ulp_clk_hw_composite("sai5", xbar_divbus, 1, false, false, true, base + 0x24, 1); /* sai ipg */
+ clks[IMX8ULP_CLK_PCTLE] = imx_clk_hw_gate("pctle", "xbar_divbus", base + 0x28, 30);
+ clks[IMX8ULP_CLK_PCTLF] = imx_clk_hw_gate("pctlf", "xbar_divbus", base + 0x2c, 30);
+ clks[IMX8ULP_CLK_USDHC0] = imx8ulp_clk_hw_composite("usdhc0", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, false, true, base + 0x34, 1);
+ clks[IMX8ULP_CLK_USDHC1] = imx8ulp_clk_hw_composite("usdhc1", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, false, true, base + 0x38, 1);
+ clks[IMX8ULP_CLK_USDHC2] = imx8ulp_clk_hw_composite("usdhc2", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, false, true, base + 0x3c, 1);
+ clks[IMX8ULP_CLK_USB0] = imx8ulp_clk_hw_composite("usb0", nic_per_divplat, 1, false, false, true, base + 0x40, 1);
+ clks[IMX8ULP_CLK_USB0_PHY] = imx8ulp_clk_hw_composite("usb0_phy", xbar_divbus, 1, false, false, true, base + 0x44, 1);
+ clks[IMX8ULP_CLK_USB1] = imx8ulp_clk_hw_composite("usb1", nic_per_divplat, 1, false, false, true, base + 0x48, 1);
+ clks[IMX8ULP_CLK_USB1_PHY] = imx8ulp_clk_hw_composite("usb1_phy", xbar_divbus, 1, false, false, true, base + 0x4c, 1);
+ clks[IMX8ULP_CLK_USB_XBAR] = imx_clk_hw_gate("usb_xbar", "xbar_divbus", base + 0x50, 30);
+ clks[IMX8ULP_CLK_ENET] = imx8ulp_clk_hw_composite("enet", nic_per_divplat, 1, false, false, true, base + 0x54, 1);
+ clks[IMX8ULP_CLK_RGPIOE] = imx_clk_hw_gate("rgpioe", "nic_per_divplat", base + 0x78, 30);
+ clks[IMX8ULP_CLK_RGPIOF] = imx_clk_hw_gate("rgpiof", "nic_per_divplat", base + 0x7c, 30);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ /* register the pcc4 reset controller */
+ return imx8ulp_pcc_reset_init(pdev, base, pcc4_resets, ARRAY_SIZE(pcc4_resets));
+
+}
+
+static int imx8ulp_clk_pcc5_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_PCC5_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_PCC5_END;
+ clks = clk_data->hws;
+
+ /* PCC5 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_DMA2_MP] = imx_clk_hw_gate("pcc_dma2_mp", "lpav_axi_div", base + 0x0, 30);
+ clks[IMX8ULP_CLK_DMA2_CH0] = imx_clk_hw_gate("pcc_dma2_ch0", "lpav_axi_div", base + 0x4, 30);
+ clks[IMX8ULP_CLK_DMA2_CH1] = imx_clk_hw_gate("pcc_dma2_ch1", "lpav_axi_div", base + 0x8, 30);
+ clks[IMX8ULP_CLK_DMA2_CH2] = imx_clk_hw_gate("pcc_dma2_ch2", "lpav_axi_div", base + 0xc, 30);
+ clks[IMX8ULP_CLK_DMA2_CH3] = imx_clk_hw_gate("pcc_dma2_ch3", "lpav_axi_div", base + 0x10, 30);
+ clks[IMX8ULP_CLK_DMA2_CH4] = imx_clk_hw_gate("pcc_dma2_ch4", "lpav_axi_div", base + 0x14, 30);
+ clks[IMX8ULP_CLK_DMA2_CH5] = imx_clk_hw_gate("pcc_dma2_ch5", "lpav_axi_div", base + 0x18, 30);
+ clks[IMX8ULP_CLK_DMA2_CH6] = imx_clk_hw_gate("pcc_dma2_ch6", "lpav_axi_div", base + 0x1c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH7] = imx_clk_hw_gate("pcc_dma2_ch7", "lpav_axi_div", base + 0x20, 30);
+ clks[IMX8ULP_CLK_DMA2_CH8] = imx_clk_hw_gate("pcc_dma2_ch8", "lpav_axi_div", base + 0x24, 30);
+ clks[IMX8ULP_CLK_DMA2_CH9] = imx_clk_hw_gate("pcc_dma2_ch9", "lpav_axi_div", base + 0x28, 30);
+ clks[IMX8ULP_CLK_DMA2_CH10] = imx_clk_hw_gate("pcc_dma2_ch10", "lpav_axi_div", base + 0x2c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH11] = imx_clk_hw_gate("pcc_dma2_ch11", "lpav_axi_div", base + 0x30, 30);
+ clks[IMX8ULP_CLK_DMA2_CH12] = imx_clk_hw_gate("pcc_dma2_ch12", "lpav_axi_div", base + 0x34, 30);
+ clks[IMX8ULP_CLK_DMA2_CH13] = imx_clk_hw_gate("pcc_dma2_ch13", "lpav_axi_div", base + 0x38, 30);
+ clks[IMX8ULP_CLK_DMA2_CH14] = imx_clk_hw_gate("pcc_dma2_ch14", "lpav_axi_div", base + 0x3c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH15] = imx_clk_hw_gate("pcc_dma2_ch15", "lpav_axi_div", base + 0x40, 30);
+ clks[IMX8ULP_CLK_DMA2_CH16] = imx_clk_hw_gate("pcc_dma2_ch16", "lpav_axi_div", base + 0x44, 30);
+ clks[IMX8ULP_CLK_DMA2_CH17] = imx_clk_hw_gate("pcc_dma2_ch17", "lpav_axi_div", base + 0x48, 30);
+ clks[IMX8ULP_CLK_DMA2_CH18] = imx_clk_hw_gate("pcc_dma2_ch18", "lpav_axi_div", base + 0x4c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH19] = imx_clk_hw_gate("pcc_dma2_ch19", "lpav_axi_div", base + 0x50, 30);
+ clks[IMX8ULP_CLK_DMA2_CH20] = imx_clk_hw_gate("pcc_dma2_ch20", "lpav_axi_div", base + 0x54, 30);
+ clks[IMX8ULP_CLK_DMA2_CH21] = imx_clk_hw_gate("pcc_dma2_ch21", "lpav_axi_div", base + 0x58, 30);
+ clks[IMX8ULP_CLK_DMA2_CH22] = imx_clk_hw_gate("pcc_dma2_ch22", "lpav_axi_div", base + 0x5c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH23] = imx_clk_hw_gate("pcc_dma2_ch23", "lpav_axi_div", base + 0x60, 30);
+ clks[IMX8ULP_CLK_DMA2_CH24] = imx_clk_hw_gate("pcc_dma2_ch24", "lpav_axi_div", base + 0x64, 30);
+ clks[IMX8ULP_CLK_DMA2_CH25] = imx_clk_hw_gate("pcc_dma2_ch25", "lpav_axi_div", base + 0x68, 30);
+ clks[IMX8ULP_CLK_DMA2_CH26] = imx_clk_hw_gate("pcc_dma2_ch26", "lpav_axi_div", base + 0x6c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH27] = imx_clk_hw_gate("pcc_dma2_ch27", "lpav_axi_div", base + 0x70, 30);
+ clks[IMX8ULP_CLK_DMA2_CH28] = imx_clk_hw_gate("pcc_dma2_ch28", "lpav_axi_div", base + 0x74, 30);
+ clks[IMX8ULP_CLK_DMA2_CH29] = imx_clk_hw_gate("pcc_dma2_ch29", "lpav_axi_div", base + 0x78, 30);
+ clks[IMX8ULP_CLK_DMA2_CH30] = imx_clk_hw_gate("pcc_dma2_ch30", "lpav_axi_div", base + 0x7c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH31] = imx_clk_hw_gate("pcc_dma2_ch31", "lpav_axi_div", base + 0x80, 30);
+
+ clks[IMX8ULP_CLK_AVD_SIM] = imx_clk_hw_gate("avd_sim", "lpav_bus_div", base + 0x94, 30);
+ clks[IMX8ULP_CLK_TPM8] = imx8ulp_clk_hw_composite("tpm8", pcc5_periph_bus_sels, ARRAY_SIZE(pcc5_periph_bus_sels), true, true, true, base + 0xa0, 1);
+ clks[IMX8ULP_CLK_MU2_B] = imx_clk_hw_gate("mu2_b", "lpav_bus_div", base + 0x84, 30);
+ clks[IMX8ULP_CLK_MU3_B] = imx_clk_hw_gate("mu3_b", "lpav_bus_div", base + 0x88, 30);
+ clks[IMX8ULP_CLK_SAI6] = imx8ulp_clk_hw_composite("sai6", lpav_bus_div, 1, false, false, true, base + 0xa4, 1);
+ clks[IMX8ULP_CLK_SAI7] = imx8ulp_clk_hw_composite("sai7", lpav_bus_div, 1, false, false, true, base + 0xa8, 1);
+ clks[IMX8ULP_CLK_SPDIF] = imx8ulp_clk_hw_composite("spdif", lpav_bus_div, 1, false, false, true, base + 0xac, 1);
+ clks[IMX8ULP_CLK_ISI] = imx8ulp_clk_hw_composite("isi", lpav_axi_div, 1, false, false, true, base + 0xb0, 1);
+ clks[IMX8ULP_CLK_CSI_REGS] = imx8ulp_clk_hw_composite("csi_regs", lpav_bus_div, 1, false, false, true, base + 0xb4, 1);
+ clks[IMX8ULP_CLK_CSI] = imx8ulp_clk_hw_composite("csi", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xbc, 1);
+ clks[IMX8ULP_CLK_DSI] = imx8ulp_clk_hw_composite("dsi", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xc0, 1);
+ clks[IMX8ULP_CLK_WDOG5] = imx8ulp_clk_hw_composite("wdog5", pcc5_periph_bus_sels, ARRAY_SIZE(pcc5_periph_bus_sels), true, true, true, base + 0xc8, 1);
+ clks[IMX8ULP_CLK_EPDC] = imx8ulp_clk_hw_composite("epdc", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xcc, 1);
+ clks[IMX8ULP_CLK_PXP] = imx8ulp_clk_hw_composite("pxp", lpav_axi_div, 1, false, false, true, base + 0xd0, 1);
+ clks[IMX8ULP_CLK_GPU2D] = imx8ulp_clk_hw_composite("gpu2d", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xf0, 1);
+ clks[IMX8ULP_CLK_GPU3D] = imx8ulp_clk_hw_composite("gpu3d", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xf4, 1);
+ clks[IMX8ULP_CLK_DC_NANO] = imx8ulp_clk_hw_composite("dc_nano", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xf8, 1);
+ clks[IMX8ULP_CLK_CSI_CLK_UI] = imx8ulp_clk_hw_composite("csi_clk_ui", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0x10c, 1);
+ clks[IMX8ULP_CLK_CSI_CLK_ESC] = imx8ulp_clk_hw_composite("csi_clk_esc", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0x110, 1);
+ clks[IMX8ULP_CLK_RGPIOD] = imx_clk_hw_gate("rgpiod", "lpav_axi_div", base + 0x114, 30);
+ clks[IMX8ULP_CLK_DSI_TX_ESC] = imx_clk_hw_fixed_factor("mipi_dsi_tx_esc", "dsi", 1, 4);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ /* register the pcc5 reset controller */
+ return imx8ulp_pcc_reset_init(pdev, base, pcc5_resets, ARRAY_SIZE(pcc5_resets));
+}
+
+static int imx8ulp_clk_probe(struct platform_device *pdev)
+{
+ int (*probe)(struct platform_device *pdev);
+
+ probe = of_device_get_match_data(&pdev->dev);
+
+ if (probe)
+ return probe(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id imx8ulp_clk_dt_ids[] = {
+ { .compatible = "fsl,imx8ulp-pcc3", .data = imx8ulp_clk_pcc3_init },
+ { .compatible = "fsl,imx8ulp-pcc4", .data = imx8ulp_clk_pcc4_init },
+ { .compatible = "fsl,imx8ulp-pcc5", .data = imx8ulp_clk_pcc5_init },
+ { .compatible = "fsl,imx8ulp-cgc2", .data = imx8ulp_clk_cgc2_init },
+ { .compatible = "fsl,imx8ulp-cgc1", .data = imx8ulp_clk_cgc1_init },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8ulp_clk_dt_ids);
+
+static struct platform_driver imx8ulp_clk_driver = {
+ .probe = imx8ulp_clk_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = imx8ulp_clk_dt_ids,
+ },
+};
+module_platform_driver(imx8ulp_clk_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8ULP clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index 6b744c84278e..6ca53a960eb7 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -161,8 +161,17 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate,
if (!rate)
return -EINVAL;
- /* PFD can NOT change rate without gating */
- WARN_ON(clk_pfdv2_is_enabled(hw));
+ /*
+ * PFD can NOT change rate without gating.
+ * as the PFDs may enabled in HW by default but no
+ * consumer used it, the enable count is '0', so the
+ * 'SET_RATE_GATE' can NOT help on blocking the set_rate
+ * ops especially for 'assigned-clock-xxx'. In order
+ * to simplify the case, just disable the PFD if it is
+ * enabled in HW but not in SW.
+ */
+ if (clk_pfdv2_is_enabled(hw))
+ clk_pfdv2_disable(hw);
tmp = tmp * 18 + rate / 2;
do_div(tmp, rate);
@@ -191,8 +200,8 @@ static const struct clk_ops clk_pfdv2_ops = {
.is_enabled = clk_pfdv2_is_enabled,
};
-struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
- void __iomem *reg, u8 idx)
+struct clk_hw *imx_clk_hw_pfdv2(enum imx_pfdv2_type type, const char *name,
+ const char *parent_name, void __iomem *reg, u8 idx)
{
struct clk_init_data init;
struct clk_pfdv2 *pfd;
@@ -214,7 +223,10 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
init.ops = &clk_pfdv2_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+ if (type == IMX_PFDV2_IMX7ULP)
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+ else
+ init.flags = CLK_SET_RATE_GATE;
pfd->hw.init = &init;
@@ -227,3 +239,4 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_pfdv2);
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 8ec703f27417..6e7e34571fc8 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -23,14 +23,17 @@
/* PLL Configuration Register (xPLLCFG) */
#define PLL_CFG_OFFSET 0x08
+#define IMX8ULP_PLL_CFG_OFFSET 0x10
#define BP_PLL_MULT 16
#define BM_PLL_MULT (0x7f << 16)
/* PLL Numerator Register (xPLLNUM) */
#define PLL_NUM_OFFSET 0x10
+#define IMX8ULP_PLL_NUM_OFFSET 0x1c
/* PLL Denominator Register (xPLLDENOM) */
#define PLL_DENOM_OFFSET 0x14
+#define IMX8ULP_PLL_DENOM_OFFSET 0x18
#define MAX_MFD 0x3fffffff
#define DEFAULT_MFD 1000000
@@ -38,6 +41,9 @@
struct clk_pllv4 {
struct clk_hw hw;
void __iomem *base;
+ u32 cfg_offset;
+ u32 num_offset;
+ u32 denom_offset;
};
/* Valid PLL MULT Table */
@@ -72,12 +78,12 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
u32 mult, mfn, mfd;
u64 temp64;
- mult = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ mult = readl_relaxed(pll->base + pll->cfg_offset);
mult &= BM_PLL_MULT;
mult >>= BP_PLL_MULT;
- mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
- mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ mfn = readl_relaxed(pll->base + pll->num_offset);
+ mfd = readl_relaxed(pll->base + pll->denom_offset);
temp64 = parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
@@ -165,13 +171,13 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
- val = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ val = readl_relaxed(pll->base + pll->cfg_offset);
val &= ~BM_PLL_MULT;
val |= mult << BP_PLL_MULT;
- writel_relaxed(val, pll->base + PLL_CFG_OFFSET);
+ writel_relaxed(val, pll->base + pll->cfg_offset);
- writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
- writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+ writel_relaxed(mfn, pll->base + pll->num_offset);
+ writel_relaxed(mfd, pll->base + pll->denom_offset);
return 0;
}
@@ -207,8 +213,8 @@ static const struct clk_ops clk_pllv4_ops = {
.is_prepared = clk_pllv4_is_prepared,
};
-struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
- void __iomem *base)
+struct clk_hw *imx_clk_hw_pllv4(enum imx_pllv4_type type, const char *name,
+ const char *parent_name, void __iomem *base)
{
struct clk_pllv4 *pll;
struct clk_hw *hw;
@@ -221,6 +227,16 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
pll->base = base;
+ if (type == IMX_PLLV4_IMX8ULP) {
+ pll->cfg_offset = IMX8ULP_PLL_CFG_OFFSET;
+ pll->num_offset = IMX8ULP_PLL_NUM_OFFSET;
+ pll->denom_offset = IMX8ULP_PLL_DENOM_OFFSET;
+ } else {
+ pll->cfg_offset = PLL_CFG_OFFSET;
+ pll->num_offset = PLL_NUM_OFFSET;
+ pll->denom_offset = PLL_DENOM_OFFSET;
+ }
+
init.name = name;
init.ops = &clk_pllv4_ops;
init.parent_names = &parent_name;
@@ -238,3 +254,4 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_pllv4);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index e144f983fd8c..819949973db1 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -42,6 +42,16 @@ enum imx_pll14xx_type {
PLL_1443X,
};
+enum imx_pllv4_type {
+ IMX_PLLV4_IMX7ULP,
+ IMX_PLLV4_IMX8ULP,
+};
+
+enum imx_pfdv2_type {
+ IMX_PFDV2_IMX7ULP,
+ IMX_PFDV2_IMX8ULP,
+};
+
/* NOTE: Rate table should be kept sorted in descending order. */
struct imx_pll14xx_rate_table {
unsigned int rate;
@@ -88,9 +98,6 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
#define imx_clk_divider(name, parent, reg, shift, width) \
to_clk(imx_clk_hw_divider(name, parent, reg, shift, width))
-#define imx_clk_divider2(name, parent, reg, shift, width) \
- to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
-
#define imx_clk_divider_flags(name, parent, reg, shift, width, flags) \
to_clk(imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags))
@@ -103,40 +110,93 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
#define imx_clk_gate2(name, parent, reg, shift) \
to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
+#define imx_clk_gate2_cgr(name, parent, reg, shift, cgr_val) \
+ to_clk(__imx_clk_hw_gate2(name, parent, reg, shift, cgr_val, 0, NULL))
+
#define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
-#define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
- to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
-
-#define imx_clk_gate3(name, parent, reg, shift) \
- to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
-
-#define imx_clk_gate4(name, parent, reg, shift) \
- to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
-
#define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
+#define imx_clk_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
+ to_clk(imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags))
+
+#define imx_clk_mux2_flags(name, reg, shift, width, parents, num_parents, flags) \
+ to_clk(imx_clk_hw_mux2_flags(name, reg, shift, width, parents, num_parents, flags))
+
#define imx_clk_pllv1(type, name, parent, base) \
to_clk(imx_clk_hw_pllv1(type, name, parent, base))
#define imx_clk_pllv2(name, parent, base) \
to_clk(imx_clk_hw_pllv2(name, parent, base))
-#define imx_clk_frac_pll(name, parent_name, base) \
- to_clk(imx_clk_hw_frac_pll(name, parent_name, base))
+#define imx_clk_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
+ to_clk(imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags))
+
+#define imx_clk_hw_gate(name, parent, reg, shift) \
+ imx_clk_hw_gate_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate2(name, parent, reg, shift) \
+ imx_clk_hw_gate2_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate_dis(name, parent, reg, shift) \
+ imx_clk_hw_gate_dis_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate_dis_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate(name, parent, reg, shift, flags, CLK_GATE_SET_TO_DISABLE)
+
+#define imx_clk_hw_gate_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate(name, parent, reg, shift, flags, 0)
+
+#define imx_clk_hw_gate2_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate2(name, parent, reg, shift, 0x3, flags, NULL)
+
+#define imx_clk_hw_gate2_shared(name, parent, reg, shift, shared_count) \
+ __imx_clk_hw_gate2(name, parent, reg, shift, 0x3, 0, shared_count)
-#define imx_clk_sscg_pll(name, parent_names, num_parents, parent,\
- bypass1, bypass2, base, flags) \
- to_clk(imx_clk_hw_sscg_pll(name, parent_names, num_parents, parent,\
- bypass1, bypass2, base, flags))
+#define imx_clk_hw_gate2_shared2(name, parent, reg, shift, shared_count) \
+ __imx_clk_hw_gate2(name, parent, reg, shift, 0x3, CLK_OPS_PARENT_ENABLE, shared_count)
-struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
- void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
+#define imx_clk_hw_gate3(name, parent, reg, shift) \
+ imx_clk_hw_gate3_flags(name, parent, reg, shift, 0)
-#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
- to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
+#define imx_clk_hw_gate3_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate(name, parent, reg, shift, flags | CLK_OPS_PARENT_ENABLE, 0)
+
+#define imx_clk_hw_gate4(name, parent, reg, shift) \
+ imx_clk_hw_gate4_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate4_flags(name, parent, reg, shift, flags) \
+ imx_clk_hw_gate2_flags(name, parent, reg, shift, flags | CLK_OPS_PARENT_ENABLE)
+
+#define imx_clk_hw_mux2(name, reg, shift, width, parents, num_parents) \
+ imx_clk_hw_mux2_flags(name, reg, shift, width, parents, num_parents, 0)
+
+#define imx_clk_hw_mux(name, reg, shift, width, parents, num_parents) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, 0, 0)
+
+#define imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, flags, 0)
+
+#define imx_clk_hw_mux_ldb(name, reg, shift, width, parents, num_parents) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, CLK_SET_RATE_PARENT, CLK_MUX_READ_ONLY)
+
+#define imx_clk_hw_mux2_flags(name, reg, shift, width, parents, num_parents, flags) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, flags | CLK_OPS_PARENT_ENABLE, 0)
+
+#define imx_clk_hw_divider(name, parent, reg, shift, width) \
+ __imx_clk_hw_divider(name, parent, reg, shift, width, CLK_SET_RATE_PARENT)
+
+#define imx_clk_hw_divider2(name, parent, reg, shift, width) \
+ __imx_clk_hw_divider(name, parent, reg, shift, width, \
+ CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE)
+
+#define imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags) \
+ __imx_clk_hw_divider(name, parent, reg, shift, width, flags)
+
+#define imx_clk_hw_pll14xx(name, parent_name, base, pll_clk) \
+ imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk)
struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
const char *parent_name, void __iomem *base,
@@ -191,8 +251,8 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
.kdiv = (_k), \
}
-struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
- void __iomem *base);
+struct clk_hw *imx_clk_hw_pllv4(enum imx_pllv4_type type, const char *name,
+ const char *parent_name, void __iomem *base);
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
@@ -215,8 +275,8 @@ struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name,
void __iomem *reg, u8 idx);
-struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
- void __iomem *reg, u8 idx);
+struct clk_hw *imx_clk_hw_pfdv2(enum imx_pfdv2_type type, const char *name,
+ const char *parent_name, void __iomem *reg, u8 idx);
struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
void __iomem *reg, u8 shift, u8 width,
@@ -232,6 +292,12 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
bool rate_present, bool gate_present,
void __iomem *reg);
+struct clk_hw *imx8ulp_clk_hw_composite(const char *name,
+ const char * const *parent_names,
+ int num_parents, bool mux_present,
+ bool rate_present, bool gate_present,
+ void __iomem *reg, bool has_swrst);
+
struct clk_hw *imx_clk_hw_fixup_divider(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width,
void (*fixup)(u32 *val));
@@ -247,27 +313,11 @@ static inline struct clk *to_clk(struct clk_hw *hw)
return hw->clk;
}
-static inline struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk)
-{
- return imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk);
-}
-
static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
{
return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
}
-static inline struct clk_hw *imx_clk_hw_mux_ldb(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char * const *parents,
- int num_parents)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
- shift, width, CLK_MUX_READ_ONLY, &imx_ccm_lock);
-}
-
static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
const char *parent, unsigned int mult, unsigned int div)
{
@@ -275,16 +325,7 @@ static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
CLK_SET_RATE_PARENT, mult, div);
}
-static inline struct clk_hw *imx_clk_hw_divider(const char *name,
- const char *parent,
- void __iomem *reg, u8 shift,
- u8 width)
-{
- return clk_hw_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
+static inline struct clk_hw *__imx_clk_hw_divider(const char *name,
const char *parent,
void __iomem *reg, u8 shift,
u8 width, unsigned long flags)
@@ -293,237 +334,31 @@ static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
-static inline struct clk_hw *imx_clk_hw_divider2(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width)
-{
- return clk_hw_register_divider(NULL, name, parent,
- CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk *imx_clk_divider2_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift, u8 width,
- unsigned long flags)
-{
- return clk_register_divider(NULL, name, parent,
- flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate_flags(const char *name, const char *parent,
- void __iomem *reg, u8 shift, unsigned long flags)
-{
- return clk_hw_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_gate(struct device *dev, const char *name,
- const char *parent, void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(dev, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate_dis(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate_dis_flags(const char *name, const char *parent,
- void __iomem *reg, u8 shift, unsigned long flags)
+static inline struct clk_hw *__imx_clk_hw_gate(const char *name, const char *parent,
+ void __iomem *reg, u8 shift,
+ unsigned long flags,
+ unsigned long clk_gate_flags)
{
return clk_hw_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
+ shift, clk_gate_flags, &imx_ccm_lock);
}
-static inline struct clk_hw *imx_clk_hw_gate2(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate2_flags(const char *name, const char *parent,
- void __iomem *reg, u8 shift, unsigned long flags)
+static inline struct clk_hw *__imx_clk_hw_gate2(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, u8 cgr_val,
+ unsigned long flags,
+ unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0x3, 0, &imx_ccm_lock, share_count);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0x3, 0,
- &imx_ccm_lock, share_count);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
- const char *name, const char *parent,
- void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x1,
- 0x1, 0, &imx_ccm_lock, share_count);
-}
-
-static inline struct clk *imx_clk_gate2_cgr(const char *name,
- const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
-{
- return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, cgr_val, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(NULL, name, parent,
- CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate3_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned long flags)
-{
- return clk_hw_register_gate(NULL, name, parent,
- flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0, &imx_ccm_lock);
-}
-
-#define imx_clk_gate3_flags(name, parent, reg, shift, flags) \
- to_clk(imx_clk_hw_gate3_flags(name, parent, reg, shift, flags))
-
-static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate2(NULL, name, parent,
- CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned long flags)
-{
- return clk_hw_register_gate2(NULL, name, parent,
- flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
- to_clk(imx_clk_hw_gate4_flags(name, parent, reg, shift, flags))
-
-static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char * const *parents,
- int num_parents)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT, reg, shift,
- width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_mux(struct device *dev,
- const char *name, void __iomem *reg, u8 shift,
- u8 width, const char * const *parents, int num_parents)
-{
- return clk_hw_register_mux(dev, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_SET_PARENT_GATE,
- reg, shift, width, 0, &imx_ccm_lock);
+ shift, cgr_val, 0x3, 0, &imx_ccm_lock, share_count);
}
-static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
+static inline struct clk_hw *__imx_clk_hw_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
- int num_parents)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_mux2(const char *name, void __iomem *reg,
- u8 shift, u8 width,
- const char * const *parents,
- int num_parents)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT |
- CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk *imx_clk_mux_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width,
- const char * const *parents, int num_parents,
- unsigned long flags)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
- &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_mux2_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width,
- const char * const *parents,
- int num_parents, unsigned long flags)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk *imx_clk_mux2_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width,
- const char * const *parents,
- int num_parents, unsigned long flags)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name,
- void __iomem *reg, u8 shift,
- u8 width,
- const char * const *parents,
- int num_parents,
- unsigned long flags)
+ int num_parents, unsigned long flags, unsigned long clk_mux_flags)
{
return clk_hw_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_mux_flags(struct device *dev,
- const char *name,
- void __iomem *reg, u8 shift,
- u8 width,
- const char * const *parents,
- int num_parents,
- unsigned long flags)
-{
- return clk_hw_register_mux(dev, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT,
- reg, shift, width, 0, &imx_ccm_lock);
+ flags | CLK_SET_RATE_NO_REPARENT, reg, shift,
+ width, clk_mux_flags, &imx_ccm_lock);
}
struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
@@ -534,65 +369,55 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
#define IMX_COMPOSITE_BUS BIT(1)
#define IMX_COMPOSITE_FW_MANAGED BIT(2)
-struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+#define IMX_COMPOSITE_CLK_FLAGS_DEFAULT \
+ (CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+#define IMX_COMPOSITE_CLK_FLAGS_CRITICAL \
+ (IMX_COMPOSITE_CLK_FLAGS_DEFAULT | CLK_IS_CRITICAL)
+#define IMX_COMPOSITE_CLK_FLAGS_GET_RATE_NO_CACHE \
+ (IMX_COMPOSITE_CLK_FLAGS_DEFAULT | CLK_GET_RATE_NOCACHE)
+#define IMX_COMPOSITE_CLK_FLAGS_CRITICAL_GET_RATE_NO_CACHE \
+ (IMX_COMPOSITE_CLK_FLAGS_GET_RATE_NO_CACHE | CLK_IS_CRITICAL)
+
+struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
u32 composite_flags,
unsigned long flags);
+#define _imx8m_clk_hw_composite(name, parent_names, reg, composite_flags, flags) \
+ __imx8m_clk_hw_composite(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, composite_flags, flags)
+
+#define imx8m_clk_hw_composite(name, parent_names, reg) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+
+#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
+
#define imx8m_clk_hw_composite_bus(name, parent_names, reg) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, \
- IMX_COMPOSITE_BUS, \
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
- imx8m_clk_hw_composite_flags(name, parent_names, ARRAY_SIZE(parent_names), reg, \
- IMX_COMPOSITE_BUS, \
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE | CLK_IS_CRITICAL)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, \
- IMX_COMPOSITE_CORE, \
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
-
-#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
- flags) \
- to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
- num_parents, reg, 0, flags))
-
-#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, 0, \
- flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
-
-#define __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, flags) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, IMX_COMPOSITE_FW_MANAGED, \
- flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
#define imx8m_clk_hw_fw_managed_composite(name, parent_names, reg) \
- __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, 0)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_FW_MANAGED, \
+ IMX_COMPOSITE_CLK_FLAGS_GET_RATE_NO_CACHE)
#define imx8m_clk_hw_fw_managed_composite_critical(name, parent_names, reg) \
- __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, CLK_IS_CRITICAL)
-
-#define __imx8m_clk_composite(name, parent_names, reg, flags) \
- to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
-
-#define imx8m_clk_hw_composite(name, parent_names, reg) \
- __imx8m_clk_hw_composite(name, parent_names, reg, 0)
-
-#define imx8m_clk_composite(name, parent_names, reg) \
- __imx8m_clk_composite(name, parent_names, reg, 0)
-
-#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
- __imx8m_clk_hw_composite(name, parent_names, reg, CLK_IS_CRITICAL)
-
-#define imx8m_clk_composite_critical(name, parent_names, reg) \
- __imx8m_clk_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_FW_MANAGED, \
+ IMX_COMPOSITE_CLK_FLAGS_CRITICAL_GET_RATE_NO_CACHE)
struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u8 shift, u8 width,
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 439b7c8d0d07..3ce6fb04d8ff 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -6,7 +6,7 @@ menu "Clock driver for MediaTek SoC"
depends on ARCH_MEDIATEK || COMPILE_TEST
config COMMON_CLK_MEDIATEK
- bool
+ tristate
select RESET_CONTROLLER
help
MediaTek SoCs' clock support.
@@ -204,7 +204,7 @@ config COMMON_CLK_MT6765_MIPI2BSYS
This driver supports MediaTek MT6765 mipi2bsys clocks.
config COMMON_CLK_MT6779
- bool "Clock driver for MediaTek MT6779"
+ tristate "Clock driver for MediaTek MT6779"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM64
@@ -212,49 +212,49 @@ config COMMON_CLK_MT6779
This driver supports MediaTek MT6779 basic clocks.
config COMMON_CLK_MT6779_MMSYS
- bool "Clock driver for MediaTek MT6779 mmsys"
+ tristate "Clock driver for MediaTek MT6779 mmsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 mmsys clocks.
config COMMON_CLK_MT6779_IMGSYS
- bool "Clock driver for MediaTek MT6779 imgsys"
+ tristate "Clock driver for MediaTek MT6779 imgsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 imgsys clocks.
config COMMON_CLK_MT6779_IPESYS
- bool "Clock driver for MediaTek MT6779 ipesys"
+ tristate "Clock driver for MediaTek MT6779 ipesys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 ipesys clocks.
config COMMON_CLK_MT6779_CAMSYS
- bool "Clock driver for MediaTek MT6779 camsys"
+ tristate "Clock driver for MediaTek MT6779 camsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 camsys clocks.
config COMMON_CLK_MT6779_VDECSYS
- bool "Clock driver for MediaTek MT6779 vdecsys"
+ tristate "Clock driver for MediaTek MT6779 vdecsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 vdecsys clocks.
config COMMON_CLK_MT6779_VENCSYS
- bool "Clock driver for MediaTek MT6779 vencsys"
+ tristate "Clock driver for MediaTek MT6779 vencsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 vencsys clocks.
config COMMON_CLK_MT6779_MFGCFG
- bool "Clock driver for MediaTek MT6779 mfgcfg"
+ tristate "Clock driver for MediaTek MT6779 mfgcfg"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 mfgcfg clocks.
config COMMON_CLK_MT6779_AUDSYS
- bool "Clock driver for Mediatek MT6779 audsys"
+ tristate "Clock driver for Mediatek MT6779 audsys"
depends on COMMON_CLK_MT6779
help
This driver supports Mediatek MT6779 audsys clocks.
@@ -575,6 +575,14 @@ config COMMON_CLK_MT8192_VENCSYS
help
This driver supports MediaTek MT8192 vencsys clocks.
+config COMMON_CLK_MT8195
+ bool "Clock driver for MediaTek MT8195"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8195 clocks.
+
config COMMON_CLK_MT8516
bool "Clock driver for MediaTek MT8516"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 15bc045f0b71..dc96038a0155 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -80,5 +80,13 @@ obj-$(CONFIG_COMMON_CLK_MT8192_MSDC) += clk-mt8192-msdc.o
obj-$(CONFIG_COMMON_CLK_MT8192_SCP_ADSP) += clk-mt8192-scp_adsp.o
obj-$(CONFIG_COMMON_CLK_MT8192_VDECSYS) += clk-mt8192-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8192_VENCSYS) += clk-mt8192-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8195) += clk-mt8195-apmixedsys.o clk-mt8195-topckgen.o \
+ clk-mt8195-peri_ao.o clk-mt8195-infra_ao.o \
+ clk-mt8195-cam.o clk-mt8195-ccu.o clk-mt8195-img.o \
+ clk-mt8195-ipe.o clk-mt8195-mfg.o clk-mt8195-scp_adsp.o \
+ clk-mt8195-vdec.o clk-mt8195-vdo0.o clk-mt8195-vdo1.o \
+ clk-mt8195-venc.o clk-mt8195-vpp0.o clk-mt8195-vpp1.o \
+ clk-mt8195-wpe.o clk-mt8195-imp_iic_wrap.o \
+ clk-mt8195-apusys_pll.o
obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c
index 258d128370f2..caa9119413f1 100644
--- a/drivers/clk/mediatek/clk-apmixed.c
+++ b/drivers/clk/mediatek/clk-apmixed.c
@@ -5,6 +5,7 @@
*/
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -97,3 +98,5 @@ struct clk * __init mtk_clk_register_ref2usb_tx(const char *name,
return clk;
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 61eeae4e60fb..e188018bc906 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -106,3 +107,5 @@ int mtk_clk_register_cpumuxes(struct device_node *node,
return 0;
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index a35cf0b22150..b02d2f74dd0d 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/clkdev.h>
+#include <linux/module.h>
#include "clk-mtk.h"
#include "clk-gate.h"
@@ -122,24 +123,28 @@ const struct clk_ops mtk_clk_gate_ops_setclr = {
.enable = mtk_cg_enable,
.disable = mtk_cg_disable,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr);
const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
.is_enabled = mtk_cg_bit_is_set,
.enable = mtk_cg_enable_inv,
.disable = mtk_cg_disable_inv,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
.disable = mtk_cg_disable_no_setclr,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr);
const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
.is_enabled = mtk_cg_bit_is_set,
.enable = mtk_cg_enable_inv_no_setclr,
.disable = mtk_cg_disable_inv_no_setclr,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
struct clk *mtk_clk_register_gate(
const char *name,
@@ -181,3 +186,6 @@ struct clk *mtk_clk_register_gate(
return clk;
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gate);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 11b209f95e25..9e889e4c361a 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -114,4 +115,5 @@ static struct platform_driver clk_mt6779_aud_drv = {
},
};
-builtin_platform_driver(clk_mt6779_aud_drv);
+module_platform_driver(clk_mt6779_aud_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 244d4208b7fb..7f07a2a139ac 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -63,4 +64,5 @@ static struct platform_driver clk_mt6779_cam_drv = {
},
};
-builtin_platform_driver(clk_mt6779_cam_drv);
+module_platform_driver(clk_mt6779_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 26292a45c613..f0961fa1a286 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -55,4 +56,5 @@ static struct platform_driver clk_mt6779_img_drv = {
},
};
-builtin_platform_driver(clk_mt6779_img_drv);
+module_platform_driver(clk_mt6779_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index bb519075639c..8c6f3e154bf3 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -57,4 +58,5 @@ static struct platform_driver clk_mt6779_ipe_drv = {
},
};
-builtin_platform_driver(clk_mt6779_ipe_drv);
+module_platform_driver(clk_mt6779_ipe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index c6ee2a89c070..9f3372886e6b 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -52,4 +53,5 @@ static struct platform_driver clk_mt6779_mfg_drv = {
},
};
-builtin_platform_driver(clk_mt6779_mfg_drv);
+module_platform_driver(clk_mt6779_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index 059c1a41ac7a..33946e647122 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -105,4 +106,5 @@ static struct platform_driver clk_mt6779_mm_drv = {
},
};
-builtin_platform_driver(clk_mt6779_mm_drv);
+module_platform_driver(clk_mt6779_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index 1900da2586a1..f4358844c2e0 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -64,4 +65,5 @@ static struct platform_driver clk_mt6779_vdec_drv = {
},
};
-builtin_platform_driver(clk_mt6779_vdec_drv);
+module_platform_driver(clk_mt6779_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index b41d1f859edc..ff67084af5aa 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -55,4 +56,5 @@ static struct platform_driver clk_mt6779_venc_drv = {
},
};
-builtin_platform_driver(clk_mt6779_venc_drv);
+module_platform_driver(clk_mt6779_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 6e0d3a166729..9825385c9f94 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -1314,3 +1315,4 @@ static int __init clk_mt6779_init(void)
}
arch_initcall(clk_mt6779_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
new file mode 100644
index 000000000000..6156ceeed71e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_PLL_SSUSB26M, "pll_ssusb26m", "clk26m", 1),
+};
+
+#define MT8195_PLL_FMAX (3800UL * MHZ)
+#define MT8195_PLL_FMIN (1500UL * MHZ)
+#define MT8195_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcw_chg_reg, \
+ _en_reg, _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8195_PLL_FMAX, \
+ .fmin = MT8195_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8195_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .en_reg = _en_reg, \
+ .pll_en_bit = _pll_en_bit, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_NNAPLL, "nnapll", 0x0390, 0x03a0, 0,
+ 0, 0, 22, 0x0398, 24, 0, 0, 0, 0x0398, 0, 0x0398, 0, 9),
+ PLL(CLK_APMIXED_RESPLL, "respll", 0x0190, 0x0320, 0,
+ 0, 0, 22, 0x0198, 24, 0, 0, 0, 0x0198, 0, 0x0198, 0, 9),
+ PLL(CLK_APMIXED_ETHPLL, "ethpll", 0x0360, 0x0370, 0,
+ 0, 0, 22, 0x0368, 24, 0, 0, 0, 0x0368, 0, 0x0368, 0, 9),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0710, 0x0720, 0,
+ 0, 0, 22, 0x0718, 24, 0, 0, 0, 0x0718, 0, 0x0718, 0, 9),
+ PLL(CLK_APMIXED_TVDPLL1, "tvdpll1", 0x00a0, 0x00b0, 0,
+ 0, 0, 22, 0x00a8, 24, 0, 0, 0, 0x00a8, 0, 0x00a8, 0, 9),
+ PLL(CLK_APMIXED_TVDPLL2, "tvdpll2", 0x00c0, 0x00d0, 0,
+ 0, 0, 22, 0x00c8, 24, 0, 0, 0, 0x00c8, 0, 0x00c8, 0, 9),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x00e0, 0x00f0, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x00e8, 24, 0, 0, 0, 0x00e8, 0, 0x00e8, 0, 9),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x01d0, 0x01e0, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x01d8, 24, 0, 0, 0, 0x01d8, 0, 0x01d8, 0, 9),
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x0890, 0x08a0, 0,
+ 0, 0, 22, 0x0898, 24, 0, 0, 0, 0x0898, 0, 0x0898, 0, 9),
+ PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0100, 0x0110, 0,
+ 0, 0, 22, 0x0108, 24, 0, 0, 0, 0x0108, 0, 0x0108, 0, 9),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x01f0, 0x0700, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x01f8, 24, 0, 0, 0, 0x01f8, 0, 0x01f8, 0, 9),
+ PLL(CLK_APMIXED_HDMIPLL1, "hdmipll1", 0x08c0, 0x08d0, 0,
+ 0, 0, 22, 0x08c8, 24, 0, 0, 0, 0x08c8, 0, 0x08c8, 0, 9),
+ PLL(CLK_APMIXED_HDMIPLL2, "hdmipll2", 0x0870, 0x0880, 0,
+ 0, 0, 22, 0x0878, 24, 0, 0, 0, 0x0878, 0, 0x0878, 0, 9),
+ PLL(CLK_APMIXED_HDMIRX_APLL, "hdmirx_apll", 0x08e0, 0x0dd4, 0,
+ 0, 0, 32, 0x08e8, 24, 0, 0, 0, 0x08ec, 0, 0x08e8, 0, 9),
+ PLL(CLK_APMIXED_USB1PLL, "usb1pll", 0x01a0, 0x01b0, 0,
+ 0, 0, 22, 0x01a8, 24, 0, 0, 0, 0x01a8, 0, 0x01a8, 0, 9),
+ PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x07e0, 0x07f0, 0,
+ 0, 0, 22, 0x07e8, 24, 0, 0, 0, 0x07e8, 0, 0x07e8, 0, 9),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x07c0, 0x0dc0, 0,
+ 0, 0, 32, 0x07c8, 24, 0x0470, 0x0000, 12, 0x07cc, 0, 0x07c8, 0, 9),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0780, 0x0dc4, 0,
+ 0, 0, 32, 0x0788, 24, 0x0474, 0x0000, 13, 0x078c, 0, 0x0788, 0, 9),
+ PLL(CLK_APMIXED_APLL3, "apll3", 0x0760, 0x0dc8, 0,
+ 0, 0, 32, 0x0768, 24, 0x0478, 0x0000, 14, 0x076c, 0, 0x0768, 0, 9),
+ PLL(CLK_APMIXED_APLL4, "apll4", 0x0740, 0x0dcc, 0,
+ 0, 0, 32, 0x0748, 24, 0x047C, 0x0000, 15, 0x074c, 0, 0x0748, 0, 9),
+ PLL(CLK_APMIXED_APLL5, "apll5", 0x07a0, 0x0dd0, 0x100000,
+ 0, 0, 32, 0x07a8, 24, 0x0480, 0x0000, 16, 0x07ac, 0, 0x07a8, 0, 9),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0340, 0x0350, 0,
+ 0, 0, 22, 0x0348, 24, 0, 0, 0, 0x0348, 0, 0x0348, 0, 9),
+ PLL(CLK_APMIXED_DGIPLL, "dgipll", 0x0150, 0x0160, 0,
+ 0, 0, 22, 0x0158, 24, 0, 0, 0, 0x0158, 0, 0x0158, 0, 9),
+};
+
+static const struct of_device_id of_match_clk_mt8195_apmixed[] = {
+ { .compatible = "mediatek,mt8195-apmixedsys", },
+ {}
+};
+
+static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ return r;
+
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_apmixed_drv = {
+ .probe = clk_mt8195_apmixed_probe,
+ .driver = {
+ .name = "clk-mt8195-apmixed",
+ .of_match_table = of_match_clk_mt8195_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8195_apmixed_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
new file mode 100644
index 000000000000..f1c84186346e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#define MT8195_PLL_FMAX (3800UL * MHZ)
+#define MT8195_PLL_FMIN (1500UL * MHZ)
+#define MT8195_INTEGER_BITS (8)
+#define MT8195_PCW_BITS (22)
+#define MT8195_POSDIV_SHIFT (24)
+#define MT8195_PLL_EN_BIT (0)
+#define MT8195_PCW_SHIFT (0)
+
+/*
+ * The "en_reg" and "pcw_chg_reg" fields are standard offset register compared
+ * with "reg" field, so set zero to imply it.
+ * No tuner control in apu pll, so set "tuner_XXX" as zero to imply it.
+ * No rst or post divider enable in apu pll, so set "rst_bar_mask" and "en_mask"
+ * as zero to imply it.
+ */
+#define PLL(_id, _name, _reg, _pwr_reg, _pd_reg, _pcw_reg) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = 0, \
+ .flags = 0, \
+ .rst_bar_mask = 0, \
+ .fmax = MT8195_PLL_FMAX, \
+ .fmin = MT8195_PLL_FMIN, \
+ .pcwbits = MT8195_PCW_BITS, \
+ .pcwibits = MT8195_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = MT8195_POSDIV_SHIFT, \
+ .tuner_reg = 0, \
+ .tuner_en_reg = 0, \
+ .tuner_en_bit = 0, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = MT8195_PCW_SHIFT, \
+ .pcw_chg_reg = 0, \
+ .en_reg = 0, \
+ .pll_en_bit = MT8195_PLL_EN_BIT, \
+ }
+
+static const struct mtk_pll_data apusys_plls[] = {
+ PLL(CLK_APUSYS_PLL_APUPLL, "apusys_pll_apupll", 0x008, 0x014, 0x00c, 0x00c),
+ PLL(CLK_APUSYS_PLL_NPUPLL, "apusys_pll_npupll", 0x018, 0x024, 0x01c, 0x01c),
+ PLL(CLK_APUSYS_PLL_APUPLL1, "apusys_pll_apupll1", 0x028, 0x034, 0x02c, 0x02c),
+ PLL(CLK_APUSYS_PLL_APUPLL2, "apusys_pll_apupll2", 0x038, 0x044, 0x03c, 0x03c),
+};
+
+static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APUSYS_PLL_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, apusys_plls, ARRAY_SIZE(apusys_plls), clk_data);
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_apusys_pll_data;
+
+ return r;
+
+free_apusys_pll_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt8195_apusys_pll[] = {
+ { .compatible = "mediatek,mt8195-apusys_pll", },
+ {}
+};
+
+static struct platform_driver clk_mt8195_apusys_pll_drv = {
+ .probe = clk_mt8195_apusys_pll_probe,
+ .driver = {
+ .name = "clk-mt8195-apusys_pll",
+ .of_match_table = of_match_clk_mt8195_apusys_pll,
+ },
+};
+builtin_platform_driver(clk_mt8195_apusys_pll_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c
new file mode 100644
index 000000000000..3d261fc3848e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-cam.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB13, "cam_larb13", "top_cam", 0),
+ GATE_CAM(CLK_CAM_LARB14, "cam_larb14", "top_cam", 1),
+ GATE_CAM(CLK_CAM_MAIN_CAM, "cam_main_cam", "top_cam", 3),
+ GATE_CAM(CLK_CAM_MAIN_CAMTG, "cam_main_camtg", "top_cam", 4),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "top_cam", 5),
+ GATE_CAM(CLK_CAM_GCAMSVA, "cam_gcamsva", "top_cam", 6),
+ GATE_CAM(CLK_CAM_GCAMSVB, "cam_gcamsvb", "top_cam", 7),
+ GATE_CAM(CLK_CAM_GCAMSVC, "cam_gcamsvc", "top_cam", 8),
+ GATE_CAM(CLK_CAM_SCAMSA, "cam_scamsa", "top_cam", 9),
+ GATE_CAM(CLK_CAM_SCAMSB, "cam_scamsb", "top_cam", 10),
+ GATE_CAM(CLK_CAM_CAMSV_TOP, "cam_camsv_top", "top_cam", 11),
+ GATE_CAM(CLK_CAM_CAMSV_CQ, "cam_camsv_cq", "top_cam", 12),
+ GATE_CAM(CLK_CAM_ADL, "cam_adl", "top_cam", 16),
+ GATE_CAM(CLK_CAM_ASG, "cam_asg", "top_cam", 17),
+ GATE_CAM(CLK_CAM_PDA, "cam_pda", "top_cam", 18),
+ GATE_CAM(CLK_CAM_FAKE_ENG, "cam_fake_eng", "top_cam", 19),
+ GATE_CAM(CLK_CAM_MAIN_MRAW0, "cam_main_mraw0", "top_cam", 20),
+ GATE_CAM(CLK_CAM_MAIN_MRAW1, "cam_main_mraw1", "top_cam", 21),
+ GATE_CAM(CLK_CAM_MAIN_MRAW2, "cam_main_mraw2", "top_cam", 22),
+ GATE_CAM(CLK_CAM_MAIN_MRAW3, "cam_main_mraw3", "top_cam", 23),
+ GATE_CAM(CLK_CAM_CAM2MM0_GALS, "cam_cam2mm0_gals", "top_cam", 24),
+ GATE_CAM(CLK_CAM_CAM2MM1_GALS, "cam_cam2mm1_gals", "top_cam", 25),
+ GATE_CAM(CLK_CAM_CAM2SYS_GALS, "cam_cam2sys_gals", "top_cam", 26),
+};
+
+static const struct mtk_gate cam_mraw_clks[] = {
+ GATE_CAM(CLK_CAM_MRAW_LARBX, "cam_mraw_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_MRAW_CAMTG, "cam_mraw_camtg", "top_cam", 2),
+ GATE_CAM(CLK_CAM_MRAW_MRAW0, "cam_mraw_mraw0", "top_cam", 3),
+ GATE_CAM(CLK_CAM_MRAW_MRAW1, "cam_mraw_mraw1", "top_cam", 4),
+ GATE_CAM(CLK_CAM_MRAW_MRAW2, "cam_mraw_mraw2", "top_cam", 5),
+ GATE_CAM(CLK_CAM_MRAW_MRAW3, "cam_mraw_mraw3", "top_cam", 6),
+};
+
+static const struct mtk_gate cam_rawa_clks[] = {
+ GATE_CAM(CLK_CAM_RAWA_LARBX, "cam_rawa_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWA_CAM, "cam_rawa_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWA_CAMTG, "cam_rawa_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_rawb_clks[] = {
+ GATE_CAM(CLK_CAM_RAWB_LARBX, "cam_rawb_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWB_CAM, "cam_rawb_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWB_CAMTG, "cam_rawb_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_yuva_clks[] = {
+ GATE_CAM(CLK_CAM_YUVA_LARBX, "cam_yuva_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_YUVA_CAM, "cam_yuva_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_YUVA_CAMTG, "cam_yuva_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_yuvb_clks[] = {
+ GATE_CAM(CLK_CAM_YUVB_LARBX, "cam_yuvb_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_YUVB_CAM, "cam_yuvb_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_YUVB_CAMTG, "cam_yuvb_camtg", "top_cam", 2),
+};
+
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
+
+static const struct mtk_clk_desc cam_mraw_desc = {
+ .clks = cam_mraw_clks,
+ .num_clks = ARRAY_SIZE(cam_mraw_clks),
+};
+
+static const struct mtk_clk_desc cam_rawa_desc = {
+ .clks = cam_rawa_clks,
+ .num_clks = ARRAY_SIZE(cam_rawa_clks),
+};
+
+static const struct mtk_clk_desc cam_rawb_desc = {
+ .clks = cam_rawb_clks,
+ .num_clks = ARRAY_SIZE(cam_rawb_clks),
+};
+
+static const struct mtk_clk_desc cam_yuva_desc = {
+ .clks = cam_yuva_clks,
+ .num_clks = ARRAY_SIZE(cam_yuva_clks),
+};
+
+static const struct mtk_clk_desc cam_yuvb_desc = {
+ .clks = cam_yuvb_clks,
+ .num_clks = ARRAY_SIZE(cam_yuvb_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_cam[] = {
+ {
+ .compatible = "mediatek,mt8195-camsys",
+ .data = &cam_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_mraw",
+ .data = &cam_mraw_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_rawa",
+ .data = &cam_rawa_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_rawb",
+ .data = &cam_rawb_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_yuva",
+ .data = &cam_yuva_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_yuvb",
+ .data = &cam_yuvb_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-cam",
+ .of_match_table = of_match_clk_mt8195_cam,
+ },
+};
+builtin_platform_driver(clk_mt8195_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c
new file mode 100644
index 000000000000..f846f1d73605
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-ccu.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs ccu_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CCU(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ccu_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ccu_clks[] = {
+ GATE_CCU(CLK_CCU_LARB18, "ccu_larb18", "top_ccu", 0),
+ GATE_CCU(CLK_CCU_AHB, "ccu_ahb", "top_ccu", 1),
+ GATE_CCU(CLK_CCU_CCU0, "ccu_ccu0", "top_ccu", 2),
+ GATE_CCU(CLK_CCU_CCU1, "ccu_ccu1", "top_ccu", 3),
+};
+
+static const struct mtk_clk_desc ccu_desc = {
+ .clks = ccu_clks,
+ .num_clks = ARRAY_SIZE(ccu_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_ccu[] = {
+ {
+ .compatible = "mediatek,mt8195-ccusys",
+ .data = &ccu_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_ccu_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-ccu",
+ .of_match_table = of_match_clk_mt8195_ccu,
+ },
+};
+builtin_platform_driver(clk_mt8195_ccu_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c
new file mode 100644
index 000000000000..22b52a8f15fe
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-img.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB9, "img_larb9", "top_img", 0),
+ GATE_IMG(CLK_IMG_TRAW0, "img_traw0", "top_img", 1),
+ GATE_IMG(CLK_IMG_TRAW1, "img_traw1", "top_img", 2),
+ GATE_IMG(CLK_IMG_TRAW2, "img_traw2", "top_img", 3),
+ GATE_IMG(CLK_IMG_TRAW3, "img_traw3", "top_img", 4),
+ GATE_IMG(CLK_IMG_DIP0, "img_dip0", "top_img", 8),
+ GATE_IMG(CLK_IMG_WPE0, "img_wpe0", "top_img", 9),
+ GATE_IMG(CLK_IMG_IPE, "img_ipe", "top_img", 10),
+ GATE_IMG(CLK_IMG_DIP1, "img_dip1", "top_img", 11),
+ GATE_IMG(CLK_IMG_WPE1, "img_wpe1", "top_img", 12),
+ GATE_IMG(CLK_IMG_GALS, "img_gals", "top_img", 31),
+};
+
+static const struct mtk_gate img1_dip_top_clks[] = {
+ GATE_IMG(CLK_IMG1_DIP_TOP_LARB10, "img1_dip_top_larb10", "top_img", 0),
+ GATE_IMG(CLK_IMG1_DIP_TOP_DIP_TOP, "img1_dip_top_dip_top", "top_img", 1),
+};
+
+static const struct mtk_gate img1_dip_nr_clks[] = {
+ GATE_IMG(CLK_IMG1_DIP_NR_RESERVE, "img1_dip_nr_reserve", "top_img", 0),
+ GATE_IMG(CLK_IMG1_DIP_NR_DIP_NR, "img1_dip_nr_dip_nr", "top_img", 1),
+};
+
+static const struct mtk_gate img1_wpe_clks[] = {
+ GATE_IMG(CLK_IMG1_WPE_LARB11, "img1_wpe_larb11", "top_img", 0),
+ GATE_IMG(CLK_IMG1_WPE_WPE, "img1_wpe_wpe", "top_img", 1),
+};
+
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
+
+static const struct mtk_clk_desc img1_dip_top_desc = {
+ .clks = img1_dip_top_clks,
+ .num_clks = ARRAY_SIZE(img1_dip_top_clks),
+};
+
+static const struct mtk_clk_desc img1_dip_nr_desc = {
+ .clks = img1_dip_nr_clks,
+ .num_clks = ARRAY_SIZE(img1_dip_nr_clks),
+};
+
+static const struct mtk_clk_desc img1_wpe_desc = {
+ .clks = img1_wpe_clks,
+ .num_clks = ARRAY_SIZE(img1_wpe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_img[] = {
+ {
+ .compatible = "mediatek,mt8195-imgsys",
+ .data = &img_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imgsys1_dip_top",
+ .data = &img1_dip_top_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imgsys1_dip_nr",
+ .data = &img1_dip_nr_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imgsys1_wpe",
+ .data = &img1_wpe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_img_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-img",
+ .of_match_table = of_match_clk_mt8195_img,
+ },
+};
+builtin_platform_driver(clk_mt8195_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
new file mode 100644
index 000000000000..0e2ac0a30aa0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mt8195-clk.h>
+
+static const struct mtk_gate_regs imp_iic_wrap_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP_IIC_WRAP(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &imp_iic_wrap_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_OPS_PARENT_ENABLE)
+
+static const struct mtk_gate imp_iic_wrap_s_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C5, "imp_iic_wrap_s_i2c5", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C6, "imp_iic_wrap_s_i2c6", "top_i2c", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C7, "imp_iic_wrap_s_i2c7", "top_i2c", 2),
+};
+
+static const struct mtk_gate imp_iic_wrap_w_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C0, "imp_iic_wrap_w_i2c0", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C1, "imp_iic_wrap_w_i2c1", "top_i2c", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C2, "imp_iic_wrap_w_i2c2", "top_i2c", 2),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C3, "imp_iic_wrap_w_i2c3", "top_i2c", 3),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C4, "imp_iic_wrap_w_i2c4", "top_i2c", 4),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_s_desc = {
+ .clks = imp_iic_wrap_s_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_s_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_w_desc = {
+ .clks = imp_iic_wrap_w_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_w_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_imp_iic_wrap[] = {
+ {
+ .compatible = "mediatek,mt8195-imp_iic_wrap_s",
+ .data = &imp_iic_wrap_s_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imp_iic_wrap_w",
+ .data = &imp_iic_wrap_w_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8195_imp_iic_wrap,
+ },
+};
+builtin_platform_driver(clk_mt8195_imp_iic_wrap_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
new file mode 100644
index 000000000000..5f9b69967459
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs infra_ao0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra_ao1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra_ao2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra_ao3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+static const struct mtk_gate_regs infra_ao4_cg_regs = {
+ .set_ofs = 0xe0,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe8,
+};
+
+#define GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO0(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_ao2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO3(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO4_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao4_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO4(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO4_FLAGS(_id, _name, _parent, _shift, 0)
+
+static const struct mtk_gate infra_ao_clks[] = {
+ /* INFRA_AO0 */
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_TMR, "infra_ao_pmic_tmr", "top_pwrap_ulposc", 0),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_AP, "infra_ao_pmic_ap", "top_pwrap_ulposc", 1),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_MD, "infra_ao_pmic_md", "top_pwrap_ulposc", 2),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_CONN, "infra_ao_pmic_conn", "top_pwrap_ulposc", 3),
+ /* infra_ao_sej is main clock is for secure engine with JTAG support */
+ GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SEJ, "infra_ao_sej", "top_axi", 5, CLK_IS_CRITICAL),
+ GATE_INFRA_AO0(CLK_INFRA_AO_APXGPT, "infra_ao_apxgpt", "top_axi", 6),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE, "infra_ao_gce", "top_axi", 8),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE2, "infra_ao_gce2", "top_axi", 9),
+ GATE_INFRA_AO0(CLK_INFRA_AO_THERM, "infra_ao_therm", "top_axi", 10),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM_H, "infra_ao_pwm_h", "top_axi", 15),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM1, "infra_ao_pwm1", "top_pwm", 16),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM2, "infra_ao_pwm2", "top_pwm", 17),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM3, "infra_ao_pwm3", "top_pwm", 18),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM4, "infra_ao_pwm4", "top_pwm", 19),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM, "infra_ao_pwm", "top_pwm", 21),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART0, "infra_ao_uart0", "top_uart", 22),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART1, "infra_ao_uart1", "top_uart", 23),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART2, "infra_ao_uart2", "top_uart", 24),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART3, "infra_ao_uart3", "top_uart", 25),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART4, "infra_ao_uart4", "top_uart", 26),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE_26M, "infra_ao_gce_26m", "clk26m", 27),
+ GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
+ /* INFRA_AO1 */
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CG1_MSDC2, "infra_ao_cg1_msdc2", "top_axi", 5),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0_SRC, "infra_ao_msdc0_src", "top_msdc50_0", 6),
+ GATE_INFRA_AO1(CLK_INFRA_AO_TRNG, "infra_ao_trng", "top_axi", 9),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC, "infra_ao_auxadc", "clk26m", 10),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CPUM, "infra_ao_cpum", "top_axi", 11),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_32K, "infra_ao_hdmi_32k", "clk32k", 12),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CEC_66M_H, "infra_ao_cec_66m_h", "top_axi", 13),
+ GATE_INFRA_AO1(CLK_INFRA_AO_IRRX, "infra_ao_irrx", "top_axi", 14),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_26M, "infra_ao_pcie_tl_26m", "clk26m", 15),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1_SRC, "infra_ao_msdc1_src", "top_msdc30_1", 16),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CEC_66M_B, "infra_ao_cec_66m_b", "top_axi", 17),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_96M, "infra_ao_pcie_tl_96m", "top_tl", 18),
+ /* infra_ao_device_apc is for device access permission control module */
+ GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DEVICE_APC, "infra_ao_device_apc", "top_axi", 20,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO1(CLK_INFRA_AO_ECC_66M_H, "infra_ao_ecc_66m_h", "top_axi", 23),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DEBUGSYS, "infra_ao_debugsys", "top_axi", 24),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUDIO, "infra_ao_audio", "top_axi", 25),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_32K, "infra_ao_pcie_tl_32k", "clk32k", 26),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DBG_TRACE, "infra_ao_dbg_trace", "top_axi", 29),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DRAMC_F26M, "infra_ao_dramc_f26m", "clk26m", 31),
+ /* INFRA_AO2 */
+ GATE_INFRA_AO2(CLK_INFRA_AO_IRTX, "infra_ao_irtx", "top_axi", 0),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB, "infra_ao_ssusb", "top_usb_top", 1),
+ GATE_INFRA_AO2(CLK_INFRA_AO_DISP_PWM, "infra_ao_disp_pwm", "top_disp_pwm0", 2),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CLDMA_B, "infra_ao_cldma_b", "top_axi", 3),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AUDIO_26M_B, "infra_ao_audio_26m_b", "clk26m", 4),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "top_spi", 6),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI2, "infra_ao_spi2", "top_spi", 9),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI3, "infra_ao_spi3", "top_spi", 10),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_SYS, "infra_ao_unipro_sys", "top_ufs", 11),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_TICK, "infra_ao_unipro_tick", "top_ufs_tick1us", 12),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UFS_MP_SAP_B, "infra_ao_ufs_mp_sap_b", "top_ufs_mp_sap_cfg", 13),
+ GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU, "infra_ao_pwrmcu", "top_pwrmcu", 15),
+ GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU_BUS_H, "infra_ao_pwrmcu_bus_h", "top_axi", 17),
+ GATE_INFRA_AO2(CLK_INFRA_AO_APDMA_B, "infra_ao_apdma_b", "top_axi", 18),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CQ_DMA, "infra_ao_cq_dma", "top_axi", 27),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AES_UFSFDE, "infra_ao_aes_ufsfde", "top_ufs", 28),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AES, "infra_ao_aes", "top_aes_ufsfde", 29),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UFS_TICK, "infra_ao_ufs_tick", "top_ufs_tick1us", 30),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_XHCI, "infra_ao_ssusb_xhci", "top_ssusb_xhci", 31),
+ /* INFRA_AO3 */
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SELF, "infra_ao_msdc0f", "top_msdc50_0", 0),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC1_SELF, "infra_ao_msdc1f", "top_msdc50_0", 1),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC2_SELF, "infra_ao_msdc2f", "top_msdc50_0", 2),
+ GATE_INFRA_AO3(CLK_INFRA_AO_I2S_DMA, "infra_ao_i2s_dma", "top_axi", 5),
+ GATE_INFRA_AO3(CLK_INFRA_AO_AP_MSDC0, "infra_ao_ap_msdc0", "top_msdc50_0", 7),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MD_MSDC0, "infra_ao_md_msdc0", "top_msdc50_0", 8),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CG3_MSDC2, "infra_ao_cg3_msdc2", "top_msdc30_2", 9),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU, "infra_ao_gcpu", "top_gcpu", 10),
+ GATE_INFRA_AO3(CLK_INFRA_AO_PCIE_PERI_26M, "infra_ao_pcie_peri_26m", "clk26m", 15),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU_66M_B, "infra_ao_gcpu_66m_b", "top_axi", 16),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU_133M_B, "infra_ao_gcpu_133m_b", "top_axi", 17),
+ GATE_INFRA_AO3(CLK_INFRA_AO_DISP_PWM1, "infra_ao_disp_pwm1", "top_disp_pwm1", 20),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FBIST2FPC, "infra_ao_fbist2fpc", "top_msdc50_0", 24),
+ /* infra_ao_device_apc_sync is for device access permission control module */
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_DEVICE_APC_SYNC, "infra_ao_device_apc_sync", "top_axi", 25,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO3(CLK_INFRA_AO_PCIE_P1_PERI_26M, "infra_ao_pcie_p1_peri_26m", "clk26m", 26),
+ GATE_INFRA_AO3(CLK_INFRA_AO_SPIS0, "infra_ao_spis0", "top_spis", 28),
+ GATE_INFRA_AO3(CLK_INFRA_AO_SPIS1, "infra_ao_spis1", "top_spis", 29),
+ /* INFRA_AO4 */
+ /* infra_ao_133m_m_peri infra_ao_66m_m_peri are main clocks of peripheral */
+ GATE_INFRA_AO4_FLAGS(CLK_INFRA_AO_133M_M_PERI, "infra_ao_133m_m_peri", "top_axi", 0,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO4_FLAGS(CLK_INFRA_AO_66M_M_PERI, "infra_ao_66m_m_peri", "top_axi", 1,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_PL_P_250M_P0, "infra_ao_pcie_pl_p_250m_p0", "pextp_pipe", 7),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_PL_P_250M_P1, "infra_ao_pcie_pl_p_250m_p1",
+ "ssusb_u3phy_p1_p_p0", 8),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_P1_TL_96M, "infra_ao_pcie_p1_tl_96m", "top_tl_p1", 17),
+ GATE_INFRA_AO4(CLK_INFRA_AO_AES_MSDCFDE_0P, "infra_ao_aes_msdcfde_0p", "top_aes_msdcfde", 18),
+ GATE_INFRA_AO4(CLK_INFRA_AO_UFS_TX_SYMBOL, "infra_ao_ufs_tx_symbol", "ufs_tx_symbol", 22),
+ GATE_INFRA_AO4(CLK_INFRA_AO_UFS_RX_SYMBOL, "infra_ao_ufs_rx_symbol", "ufs_rx_symbol", 23),
+ GATE_INFRA_AO4(CLK_INFRA_AO_UFS_RX_SYMBOL1, "infra_ao_ufs_rx_symbol1", "ufs_rx_symbol1", 24),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PERI_UFS_MEM_SUB, "infra_ao_peri_ufs_mem_sub", "mem_466m", 31),
+};
+
+static const struct mtk_clk_desc infra_ao_desc = {
+ .clks = infra_ao_clks,
+ .num_clks = ARRAY_SIZE(infra_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
+ {
+ .compatible = "mediatek,mt8195-infracfg_ao",
+ .data = &infra_ao_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_infra_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-infra_ao",
+ .of_match_table = of_match_clk_mt8195_infra_ao,
+ },
+};
+builtin_platform_driver(clk_mt8195_infra_ao_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c
new file mode 100644
index 000000000000..fc1d42b6ac84
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-ipe.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs ipe_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate ipe_clks[] = {
+ GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 0),
+ GATE_IPE(CLK_IPE_FDVT, "ipe_fdvt", "top_ipe", 1),
+ GATE_IPE(CLK_IPE_ME, "ipe_me", "top_ipe", 2),
+ GATE_IPE(CLK_IPE_TOP, "ipe_top", "top_ipe", 3),
+ GATE_IPE(CLK_IPE_SMI_LARB12, "ipe_smi_larb12", "top_ipe", 4),
+};
+
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_ipe[] = {
+ {
+ .compatible = "mediatek,mt8195-ipesys",
+ .data = &ipe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_ipe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-ipe",
+ .of_match_table = of_match_clk_mt8195_ipe,
+ },
+};
+builtin_platform_driver(clk_mt8195_ipe_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
new file mode 100644
index 000000000000..aca6d9c0837c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "top_mfg_core_tmp", 0),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_mfg[] = {
+ {
+ .compatible = "mediatek,mt8195-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_mfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-mfg",
+ .of_match_table = of_match_clk_mt8195_mfg,
+ },
+};
+builtin_platform_driver(clk_mt8195_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
new file mode 100644
index 000000000000..907a92b22de8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs peri_ao_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri_ao_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate peri_ao_clks[] = {
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET, "peri_ao_ethernet", "top_axi", 0),
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET_BUS, "peri_ao_ethernet_bus", "top_axi", 1),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIF_BUS, "peri_ao_flashif_bus", "top_axi", 3),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "top_spinor", 5),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_1P_BUS, "peri_ao_ssusb_1p_bus", "top_usb_top_1p", 7),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_1P_XHCI, "peri_ao_ssusb_1p_xhci", "top_ssusb_xhci_1p", 8),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_2P_BUS, "peri_ao_ssusb_2p_bus", "top_usb_top_2p", 9),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_2P_XHCI, "peri_ao_ssusb_2p_xhci", "top_ssusb_xhci_2p", 10),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_3P_BUS, "peri_ao_ssusb_3p_bus", "top_usb_top_3p", 11),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_3P_XHCI, "peri_ao_ssusb_3p_xhci", "top_ssusb_xhci_3p", 12),
+ GATE_PERI_AO(CLK_PERI_AO_SPINFI, "peri_ao_spinfi", "top_spinfi_bclk", 15),
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET_MAC, "peri_ao_ethernet_mac", "top_snps_eth_250m", 16),
+ GATE_PERI_AO(CLK_PERI_AO_NFI_H, "peri_ao_nfi_h", "top_axi", 19),
+ GATE_PERI_AO(CLK_PERI_AO_FNFI1X, "peri_ao_fnfi1x", "top_nfi1x", 20),
+ GATE_PERI_AO(CLK_PERI_AO_PCIE_P0_MEM, "peri_ao_pcie_p0_mem", "mem_466m", 24),
+ GATE_PERI_AO(CLK_PERI_AO_PCIE_P1_MEM, "peri_ao_pcie_p1_mem", "mem_466m", 25),
+};
+
+static const struct mtk_clk_desc peri_ao_desc = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_peri_ao[] = {
+ {
+ .compatible = "mediatek,mt8195-pericfg_ao",
+ .data = &peri_ao_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-peri_ao",
+ .of_match_table = of_match_clk_mt8195_peri_ao,
+ },
+};
+builtin_platform_driver(clk_mt8195_peri_ao_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
new file mode 100644
index 000000000000..26b4846c5894
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs scp_adsp_cg_regs = {
+ .set_ofs = 0x180,
+ .clr_ofs = 0x180,
+ .sta_ofs = 0x180,
+};
+
+#define GATE_SCP_ADSP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &scp_adsp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate scp_adsp_clks[] = {
+ GATE_SCP_ADSP(CLK_SCP_ADSP_AUDIODSP, "scp_adsp_audiodsp", "top_adsp", 0),
+};
+
+static const struct mtk_clk_desc scp_adsp_desc = {
+ .clks = scp_adsp_clks,
+ .num_clks = ARRAY_SIZE(scp_adsp_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_scp_adsp[] = {
+ {
+ .compatible = "mediatek,mt8195-scp_adsp",
+ .data = &scp_adsp_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_scp_adsp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-scp_adsp",
+ .of_match_table = of_match_clk_mt8195_scp_adsp,
+ },
+};
+builtin_platform_driver(clk_mt8195_scp_adsp_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
new file mode 100644
index 000000000000..3e2aba9c40bb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -0,0 +1,1273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+static DEFINE_SPINLOCK(mt8195_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_IN_DGI, "in_dgi", NULL, 165000000),
+ FIXED_CLK(CLK_TOP_ULPOSC1, "ulposc1", NULL, 248000000),
+ FIXED_CLK(CLK_TOP_ULPOSC2, "ulposc2", NULL, 326000000),
+ FIXED_CLK(CLK_TOP_MEM_466M, "mem_466m", NULL, 533000000),
+ FIXED_CLK(CLK_TOP_MPHONE_SLAVE_B, "mphone_slave_b", NULL, 49152000),
+ FIXED_CLK(CLK_TOP_PEXTP_PIPE, "pextp_pipe", NULL, 250000000),
+ FIXED_CLK(CLK_TOP_UFS_RX_SYMBOL, "ufs_rx_symbol", NULL, 166000000),
+ FIXED_CLK(CLK_TOP_UFS_TX_SYMBOL, "ufs_tx_symbol", NULL, 166000000),
+ FIXED_CLK(CLK_TOP_SSUSB_U3PHY_P1_P_P0, "ssusb_u3phy_p1_p_p0", NULL, 131000000),
+ FIXED_CLK(CLK_TOP_UFS_RX_SYMBOL1, "ufs_rx_symbol1", NULL, 166000000),
+ FIXED_CLK(CLK_TOP_FPC, "fpc", NULL, 50000000),
+ FIXED_CLK(CLK_TOP_HDMIRX_P, "hdmirx_p", NULL, 594000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_CLK26M_D52, "clk26m_d52", "clk26m", 1, 52),
+ FACTOR(CLK_TOP_IN_DGI_D2, "in_dgi_d2", "in_dgi", 1, 2),
+ FACTOR(CLK_TOP_IN_DGI_D4, "in_dgi_d4", "in_dgi", 1, 4),
+ FACTOR(CLK_TOP_IN_DGI_D6, "in_dgi_d6", "in_dgi", 1, 6),
+ FACTOR(CLK_TOP_IN_DGI_D8, "in_dgi_d8", "in_dgi", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll_d4", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll_d4", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll_d5", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D6_D4, "mainpll_d6_d4", "mainpll_d6", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D6_D8, "mainpll_d6_d8", "mainpll_d6", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll_d7", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll_d4", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll_d4", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll_d4", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll_d6", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll_d6", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll_d6", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll_d6", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32),
+ FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1", 1, 3),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2", 1, 3),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_APLL3_D4, "apll3_d4", "apll3", 1, 4),
+ FACTOR(CLK_TOP_APLL4_D4, "apll4_d4", "apll4", 1, 4),
+ FACTOR(CLK_TOP_APLL5_D4, "apll5_d4", "apll5", 1, 4),
+ FACTOR(CLK_TOP_HDMIRX_APLL_D3, "hdmirx_apll_d3", "hdmirx_apll", 1, 3),
+ FACTOR(CLK_TOP_HDMIRX_APLL_D4, "hdmirx_apll_d4", "hdmirx_apll", 1, 4),
+ FACTOR(CLK_TOP_HDMIRX_APLL_D6, "hdmirx_apll_d6", "hdmirx_apll", 1, 6),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5),
+ FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1, 6),
+ FACTOR(CLK_TOP_MMPLL_D6_D2, "mmpll_d6_d2", "mmpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7),
+ FACTOR(CLK_TOP_MMPLL_D9, "mmpll_d9", "mmpll", 1, 9),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 1, 16),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16),
+ FACTOR(CLK_TOP_ETHPLL_D2, "ethpll_d2", "ethpll", 1, 2),
+ FACTOR(CLK_TOP_ETHPLL_D8, "ethpll_d8", "ethpll", 1, 8),
+ FACTOR(CLK_TOP_ETHPLL_D10, "ethpll_d10", "ethpll", 1, 10),
+ FACTOR(CLK_TOP_DGIPLL_D2, "dgipll_d2", "dgipll", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1", 1, 4),
+ FACTOR(CLK_TOP_ULPOSC1_D7, "ulposc1_d7", "ulposc1", 1, 7),
+ FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D10, "ulposc1_d10", "ulposc1", 1, 10),
+ FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1", 1, 16),
+ FACTOR(CLK_TOP_ADSPPLL_D2, "adsppll_d2", "adsppll", 1, 2),
+ FACTOR(CLK_TOP_ADSPPLL_D4, "adsppll_d4", "adsppll", 1, 4),
+ FACTOR(CLK_TOP_ADSPPLL_D8, "adsppll_d8", "adsppll", 1, 8),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "ulposc1_d4"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "mainpll_d7_d4",
+ "clk32k"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "univpll_d4",
+ "mainpll_d6",
+ "univpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mainpll_d4",
+ "mainpll_d6_d2"
+};
+
+static const char * const bus_aximem_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6"
+};
+
+static const char * const vpp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5",
+ "tvdpll1",
+ "tvdpll2",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const ethdr_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5_d4",
+ "tvdpll1",
+ "tvdpll2",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "imgpll",
+ "mainpll_d4",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mmpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "imgpll"
+};
+
+static const char * const ccu_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d5",
+ "mainpll_d6",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "univpll_d7"
+};
+
+static const char * const img_parents[] = {
+ "clk26m",
+ "imgpll",
+ "univpll_d4",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d6_d4"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d5",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp1_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d5",
+ "mmpll_d5",
+ "univpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp2_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const ipu_if_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d5",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "univpll_d6",
+ "univpll_d7"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "clk26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4",
+ "mainpll_d6_d4",
+ "msdcpll_d4",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d4_d4",
+ "univpll_d5_d4"
+};
+
+static const char * const spis_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "univpll_d6_d2",
+ "univpll_d4_d4",
+ "univpll_d6_d4",
+ "mainpll_d7_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "msdcpll_d2",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7_d2",
+ "msdcpll_d2"
+};
+
+static const char * const intdir_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d4"
+};
+
+static const char * const audio_h_parents[] = {
+ "clk26m",
+ "univpll_d7",
+ "apll1",
+ "apll2"
+};
+
+static const char * const pwrap_ulposc_parents[] = {
+ "ulposc1_d10",
+ "clk26m",
+ "ulposc1_d4",
+ "ulposc1_d7",
+ "ulposc1_d8",
+ "ulposc1_d16",
+ "mainpll_d4_d8",
+ "univpll_d5_d8"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const pwrmcu_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2",
+ "mainpll_d9",
+ "mainpll_d4_d2"
+};
+
+static const char * const dp_parents[] = {
+ "clk26m",
+ "tvdpll1_d2",
+ "tvdpll2_d2",
+ "tvdpll1_d4",
+ "tvdpll2_d4",
+ "tvdpll1_d8",
+ "tvdpll2_d8",
+ "tvdpll1_d16",
+ "tvdpll2_d16"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "ulposc1_d2",
+ "ulposc1_d4",
+ "ulposc1_d16"
+};
+
+static const char * const usb_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mmpll_d5_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d4_d4",
+ "mainpll_d4_d8"
+};
+
+static const char * const dpmaif_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "mainpll_d6",
+ "mainpll_d4_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4_d4",
+ "univpll_d4_d2",
+ "univpll_d6"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d8",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const ufs_tick1us_parents[] = {
+ "clk26m_d52",
+ "clk26m"
+};
+
+static const char * const ufs_mp_sap_parents[] = {
+ "clk26m",
+ "msdcpll_d16"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mmpll_d6",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "mmpll_d9",
+ "univpll_d4_d4",
+ "mainpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d5_d2",
+ "mainpll_d5"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mainpll_d5",
+ "mmpll_d6",
+ "mmpll_d5",
+ "vdecpll",
+ "univpll_d4",
+ "mmpll_d4",
+ "univpll_d6_d2",
+ "mmpll_d9",
+ "univpll_d6",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d6_d2",
+ "mainpll_d7_d4",
+};
+
+static const char * const spmi_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "ulposc1_d8",
+ "ulposc1_d10",
+ "ulposc1_d16",
+ "ulposc1_d7",
+ "clk32k",
+ "mainpll_d7_d8",
+ "mainpll_d6_d8",
+ "mainpll_d5_d8"
+};
+
+static const char * const dvfsrc_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "univpll_d6_d8",
+ "msdcpll_d16"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const dsi_occ_parents[] = {
+ "clk26m",
+ "mainpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const wpe_vpp_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "tvdpll1",
+ "univpll_d4"
+};
+
+static const char * const hdcp_parents[] = {
+ "clk26m",
+ "univpll_d4_d8",
+ "mainpll_d5_d8",
+ "univpll_d6_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+ "clk26m",
+ "univpll_192m_d4",
+ "univpll_192m_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const hd20_dacr_ref_parents[] = {
+ "clk26m",
+ "univpll_d4_d2",
+ "univpll_d4_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const hd20_hdcp_c_parents[] = {
+ "clk26m",
+ "msdcpll_d4",
+ "univpll_d4_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const hdmi_xtal_parents[] = {
+ "clk26m",
+ "clk26m_d2"
+};
+
+static const char * const hdmi_apb_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "msdcpll_d2"
+};
+
+static const char * const snps_eth_250m_parents[] = {
+ "clk26m",
+ "ethpll_d2"
+};
+
+static const char * const snps_eth_62p4m_ptp_parents[] = {
+ "apll2_d3",
+ "apll1_d3",
+ "clk26m",
+ "ethpll_d8"
+};
+
+static const char * const snps_eth_50m_rmii_parents[] = {
+ "clk26m",
+ "ethpll_d10"
+};
+
+static const char * const dgi_out_parents[] = {
+ "clk26m",
+ "dgipll",
+ "dgipll_d2",
+ "in_dgi",
+ "in_dgi_d2",
+ "mmpll_d4_d4"
+};
+
+static const char * const nna_parents[] = {
+ "clk26m",
+ "nnapll",
+ "univpll_d4",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "mainpll_d6",
+ "mainpll_d5_d2",
+ "univpll_d4_d4",
+ "univpll_d4",
+ "univpll_d6",
+ "ulposc1",
+ "adsppll",
+ "adsppll_d2",
+ "adsppll_d4",
+ "adsppll_d8"
+};
+
+static const char * const asm_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const apll1_parents[] = {
+ "clk26m",
+ "apll1_d4"
+};
+
+static const char * const apll2_parents[] = {
+ "clk26m",
+ "apll2_d4"
+};
+
+static const char * const apll3_parents[] = {
+ "clk26m",
+ "apll3_d4"
+};
+
+static const char * const apll4_parents[] = {
+ "clk26m",
+ "apll4_d4"
+};
+
+static const char * const apll5_parents[] = {
+ "clk26m",
+ "apll5_d4"
+};
+
+static const char * const i2s_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5",
+ "hdmirx_apll"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clk26m",
+ "apll1_d4"
+};
+
+static const char * const a2sys_parents[] = {
+ "clk26m",
+ "apll2_d4"
+};
+
+static const char * const a3sys_parents[] = {
+ "clk26m",
+ "apll3_d4",
+ "apll4_d4",
+ "apll5_d4",
+ "hdmirx_apll_d3",
+ "hdmirx_apll_d4",
+ "hdmirx_apll_d6"
+};
+
+static const char * const spinfi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d5_d8",
+ "mainpll_d4_d8",
+ "mainpll_d7_d4",
+ "mainpll_d6_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d4"
+};
+
+static const char * const nfi1x_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "mainpll_d7_d4",
+ "mainpll_d6_d4",
+ "univpll_d6_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2"
+};
+
+static const char * const ecc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6"
+};
+
+static const char * const audio_local_bus_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7",
+ "univpll_d6",
+ "ulposc1",
+ "ulposc1_d4",
+ "ulposc1_d2"
+};
+
+static const char * const spinor_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const dvio_dgi_ref_parents[] = {
+ "clk26m",
+ "in_dgi",
+ "in_dgi_d2",
+ "in_dgi_d4",
+ "in_dgi_d6",
+ "in_dgi_d8",
+ "mmpll_d4_d4"
+};
+
+static const char * const ulposc_parents[] = {
+ "ulposc1",
+ "ethpll_d2",
+ "mainpll_d4_d2",
+ "ethpll_d10"
+};
+
+static const char * const ulposc_core_parents[] = {
+ "ulposc2",
+ "univpll_d7",
+ "mainpll_d6",
+ "ethpll_d10"
+};
+
+static const char * const srck_parents[] = {
+ "ulposc1_d10",
+ "clk26m"
+};
+
+static const char * const mfg_fast_parents[] = {
+ "top_mfg_core_tmp",
+ "mfgpll"
+};
+
+static const struct mtk_mux top_mtk_muxes[] = {
+ /*
+ * CLK_CFG_0
+ * top_axi and top_bus_aximem are bus clocks, should not be closed by Linux.
+ * top_spm and top_scp are main clocks in always-on co-processor.
+ */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI, "top_axi",
+ axi_parents, 0x020, 0x024, 0x028, 0, 3, 7, 0x04, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM, "top_spm",
+ spm_parents, 0x020, 0x024, 0x028, 8, 2, 15, 0x04, 1, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SCP, "top_scp",
+ scp_parents, 0x020, 0x024, 0x028, 16, 3, 23, 0x04, 2, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_BUS_AXIMEM, "top_bus_aximem",
+ bus_aximem_parents, 0x020, 0x024, 0x028, 24, 3, 31, 0x04, 3, CLK_IS_CRITICAL),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VPP, "top_vpp",
+ vpp_parents, 0x02C, 0x030, 0x034, 0, 4, 7, 0x04, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETHDR, "top_ethdr",
+ ethdr_parents, 0x02C, 0x030, 0x034, 8, 4, 15, 0x04, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE, "top_ipe",
+ ipe_parents, 0x02C, 0x030, 0x034, 16, 4, 23, 0x04, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM, "top_cam",
+ cam_parents, 0x02C, 0x030, 0x034, 24, 4, 31, 0x04, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CCU, "top_ccu",
+ ccu_parents, 0x038, 0x03C, 0x040, 0, 4, 7, 0x04, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG, "top_img",
+ img_parents, 0x038, 0x03C, 0x040, 8, 4, 15, 0x04, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM, "top_camtm",
+ camtm_parents, 0x038, 0x03C, 0x040, 16, 2, 23, 0x04, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP, "top_dsp",
+ dsp_parents, 0x038, 0x03C, 0x040, 24, 3, 31, 0x04, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP1, "top_dsp1",
+ dsp1_parents, 0x044, 0x048, 0x04C, 0, 3, 7, 0x04, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP2, "top_dsp2",
+ dsp1_parents, 0x044, 0x048, 0x04C, 8, 3, 15, 0x04, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP3, "top_dsp3",
+ dsp1_parents, 0x044, 0x048, 0x04C, 16, 3, 23, 0x04, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP4, "top_dsp4",
+ dsp2_parents, 0x044, 0x048, 0x04C, 24, 3, 31, 0x04, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP5, "top_dsp5",
+ dsp2_parents, 0x050, 0x054, 0x058, 0, 3, 7, 0x04, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP6, "top_dsp6",
+ dsp2_parents, 0x050, 0x054, 0x058, 8, 3, 15, 0x04, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP7, "top_dsp7",
+ dsp_parents, 0x050, 0x054, 0x058, 16, 3, 23, 0x04, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPU_IF, "top_ipu_if",
+ ipu_if_parents, 0x050, 0x054, 0x058, 24, 3, 31, 0x04, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_CORE_TMP, "top_mfg_core_tmp",
+ mfg_parents, 0x05C, 0x060, 0x064, 0, 2, 7, 0x04, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG, "top_camtg",
+ camtg_parents, 0x05C, 0x060, 0x064, 8, 3, 15, 0x04, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2, "top_camtg2",
+ camtg_parents, 0x05C, 0x060, 0x064, 16, 3, 23, 0x04, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3, "top_camtg3",
+ camtg_parents, 0x05C, 0x060, 0x064, 24, 3, 31, 0x04, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4, "top_camtg4",
+ camtg_parents, 0x068, 0x06C, 0x070, 0, 3, 7, 0x04, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5, "top_camtg5",
+ camtg_parents, 0x068, 0x06C, 0x070, 8, 3, 15, 0x04, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART, "top_uart",
+ uart_parents, 0x068, 0x06C, 0x070, 16, 1, 23, 0x04, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI, "top_spi",
+ spi_parents, 0x068, 0x06C, 0x070, 24, 3, 31, 0x04, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPIS, "top_spis",
+ spis_parents, 0x074, 0x078, 0x07C, 0, 3, 7, 0x04, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK, "top_msdc50_0_hclk",
+ msdc50_0_h_parents, 0x074, 0x078, 0x07C, 8, 2, 15, 0x04, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0, "top_msdc50_0",
+ msdc50_0_parents, 0x074, 0x078, 0x07C, 16, 3, 23, 0x04, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1, "top_msdc30_1",
+ msdc30_parents, 0x074, 0x078, 0x07C, 24, 3, 31, 0x04, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2, "top_msdc30_2",
+ msdc30_parents, 0x080, 0x084, 0x088, 0, 3, 7, 0x08, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_INTDIR, "top_intdir",
+ intdir_parents, 0x080, 0x084, 0x088, 8, 2, 15, 0x08, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS, "top_aud_intbus",
+ aud_intbus_parents, 0x080, 0x084, 0x088, 16, 2, 23, 0x08, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_H, "top_audio_h",
+ audio_h_parents, 0x080, 0x084, 0x088, 24, 2, 31, 0x08, 3),
+ /*
+ * CLK_CFG_9
+ * top_pwrmcu is main clock in other co-processor, should not be
+ * handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWRAP_ULPOSC, "top_pwrap_ulposc",
+ pwrap_ulposc_parents, 0x08C, 0x090, 0x094, 0, 3, 7, 0x08, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB, "top_atb",
+ atb_parents, 0x08C, 0x090, 0x094, 8, 2, 15, 0x08, 5),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_PWRMCU, "top_pwrmcu",
+ pwrmcu_parents, 0x08C, 0x090, 0x094, 16, 3, 23, 0x08, 6, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DP, "top_dp",
+ dp_parents, 0x08C, 0x090, 0x094, 24, 4, 31, 0x08, 7),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EDP, "top_edp",
+ dp_parents, 0x098, 0x09C, 0x0A0, 0, 4, 7, 0x08, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi",
+ dp_parents, 0x098, 0x09C, 0x0A0, 8, 4, 15, 0x08, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM0, "top_disp_pwm0",
+ disp_pwm_parents, 0x098, 0x09C, 0x0A0, 16, 3, 23, 0x08, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM1, "top_disp_pwm1",
+ disp_pwm_parents, 0x098, 0x09C, 0x0A0, 24, 3, 31, 0x08, 11),
+ /* CLK_CFG_11 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP, "top_usb_top",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 0, 2, 7, 0x08, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI, "top_ssusb_xhci",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 8, 2, 15, 0x08, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "top_usb_top_1p",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 16, 2, 23, 0x08, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_1P, "top_ssusb_xhci_1p",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 24, 2, 31, 0x08, 15),
+ /* CLK_CFG_12 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_2P, "top_usb_top_2p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 0, 2, 7, 0x08, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_2P, "top_ssusb_xhci_2p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 8, 2, 15, 0x08, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_3P, "top_usb_top_3p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 16, 2, 23, 0x08, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_3P, "top_ssusb_xhci_3p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 24, 2, 31, 0x08, 19),
+ /* CLK_CFG_13 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C, "top_i2c",
+ i2c_parents, 0x0BC, 0x0C0, 0x0C4, 0, 2, 7, 0x08, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF, "top_seninf",
+ seninf_parents, 0x0BC, 0x0C0, 0x0C4, 8, 3, 15, 0x08, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1, "top_seninf1",
+ seninf_parents, 0x0BC, 0x0C0, 0x0C4, 16, 3, 23, 0x08, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2, "top_seninf2",
+ seninf_parents, 0x0BC, 0x0C0, 0x0C4, 24, 3, 31, 0x08, 23),
+ /* CLK_CFG_14 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3, "top_seninf3",
+ seninf_parents, 0x0C8, 0x0CC, 0x0D0, 0, 3, 7, 0x08, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU, "top_gcpu",
+ gcpu_parents, 0x0C8, 0x0CC, 0x0D0, 8, 3, 15, 0x08, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC, "top_dxcc",
+ dxcc_parents, 0x0C8, 0x0CC, 0x0D0, 16, 2, 23, 0x08, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "top_dpmaif_main",
+ dpmaif_parents, 0x0C8, 0x0CC, 0x0D0, 24, 3, 31, 0x08, 27),
+ /* CLK_CFG_15 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "top_aes_ufsfde",
+ aes_fde_parents, 0x0D4, 0x0D8, 0x0DC, 0, 3, 7, 0x08, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS, "top_ufs",
+ ufs_parents, 0x0D4, 0x0D8, 0x0DC, 8, 3, 15, 0x08, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS_TICK1US, "top_ufs_tick1us",
+ ufs_tick1us_parents, 0x0D4, 0x0D8, 0x0DC, 16, 1, 23, 0x08, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS_MP_SAP_CFG, "top_ufs_mp_sap_cfg",
+ ufs_mp_sap_parents, 0x0D4, 0x0D8, 0x0DC, 24, 1, 31, 0x08, 31),
+ /*
+ * CLK_CFG_16
+ * top_mcupm is main clock in other co-processor, should not be
+ * handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC, "top_venc",
+ venc_parents, 0x0E0, 0x0E4, 0x0E8, 0, 4, 7, 0x0C, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC, "top_vdec",
+ vdec_parents, 0x0E0, 0x0E4, 0x0E8, 8, 4, 15, 0x0C, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM, "top_pwm",
+ pwm_parents, 0x0E0, 0x0E4, 0x0E8, 16, 1, 23, 0x0C, 2),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MCUPM, "top_mcupm",
+ mcupm_parents, 0x0E0, 0x0E4, 0x0E8, 24, 2, 31, 0x0C, 3, CLK_IS_CRITICAL),
+ /*
+ * CLK_CFG_17
+ * top_dvfsrc is for internal DVFS usage, should not be handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_P_MST, "top_spmi_p_mst",
+ spmi_parents, 0x0EC, 0x0F0, 0x0F4, 0, 4, 7, 0x0C, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_M_MST, "top_spmi_m_mst",
+ spmi_parents, 0x0EC, 0x0F0, 0x0F4, 8, 4, 15, 0x0C, 5),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DVFSRC, "top_dvfsrc",
+ dvfsrc_parents, 0x0EC, 0x0F0, 0x0F4, 16, 2, 23, 0x0C, 6, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TL, "top_tl",
+ tl_parents, 0x0EC, 0x0F0, 0x0F4, 24, 2, 31, 0x0C, 7),
+ /* CLK_CFG_18 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TL_P1, "top_tl_p1",
+ tl_parents, 0x0F8, 0x0FC, 0x0100, 0, 2, 7, 0x0C, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_MSDCFDE, "top_aes_msdcfde",
+ aes_fde_parents, 0x0F8, 0x0FC, 0x0100, 8, 3, 15, 0x0C, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSI_OCC, "top_dsi_occ",
+ dsi_occ_parents, 0x0F8, 0x0FC, 0x0100, 16, 2, 23, 0x0C, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_WPE_VPP, "top_wpe_vpp",
+ wpe_vpp_parents, 0x0F8, 0x0FC, 0x0100, 24, 4, 31, 0x0C, 11),
+ /* CLK_CFG_19 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDCP, "top_hdcp",
+ hdcp_parents, 0x0104, 0x0108, 0x010C, 0, 2, 7, 0x0C, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDCP_24M, "top_hdcp_24m",
+ hdcp_24m_parents, 0x0104, 0x0108, 0x010C, 8, 2, 15, 0x0C, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HD20_DACR_REF_CLK, "top_hd20_dacr_ref_clk",
+ hd20_dacr_ref_parents, 0x0104, 0x0108, 0x010C, 16, 2, 23, 0x0C, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HD20_HDCP_CCLK, "top_hd20_hdcp_cclk",
+ hd20_hdcp_c_parents, 0x0104, 0x0108, 0x010C, 24, 2, 31, 0x0C, 15),
+ /* CLK_CFG_20 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDMI_XTAL, "top_hdmi_xtal",
+ hdmi_xtal_parents, 0x0110, 0x0114, 0x0118, 0, 1, 7, 0x0C, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDMI_APB, "top_hdmi_apb",
+ hdmi_apb_parents, 0x0110, 0x0114, 0x0118, 8, 2, 15, 0x0C, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_250M, "top_snps_eth_250m",
+ snps_eth_250m_parents, 0x0110, 0x0114, 0x0118, 16, 1, 23, 0x0C, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_62P4M_PTP, "top_snps_eth_62p4m_ptp",
+ snps_eth_62p4m_ptp_parents, 0x0110, 0x0114, 0x0118, 24, 2, 31, 0x0C, 19),
+ /* CLK_CFG_21 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_50M_RMII, "snps_eth_50m_rmii",
+ snps_eth_50m_rmii_parents, 0x011C, 0x0120, 0x0124, 0, 1, 7, 0x0C, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DGI_OUT, "top_dgi_out",
+ dgi_out_parents, 0x011C, 0x0120, 0x0124, 8, 3, 15, 0x0C, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA0, "top_nna0",
+ nna_parents, 0x011C, 0x0120, 0x0124, 16, 4, 23, 0x0C, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA1, "top_nna1",
+ nna_parents, 0x011C, 0x0120, 0x0124, 24, 4, 31, 0x0C, 23),
+ /* CLK_CFG_22 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP, "top_adsp",
+ adsp_parents, 0x0128, 0x012C, 0x0130, 0, 4, 7, 0x0C, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_H, "top_asm_h",
+ asm_parents, 0x0128, 0x012C, 0x0130, 8, 2, 15, 0x0C, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_M, "top_asm_m",
+ asm_parents, 0x0128, 0x012C, 0x0130, 16, 2, 23, 0x0C, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_L, "top_asm_l",
+ asm_parents, 0x0128, 0x012C, 0x0130, 24, 2, 31, 0x0C, 27),
+ /* CLK_CFG_23 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL1, "top_apll1",
+ apll1_parents, 0x0134, 0x0138, 0x013C, 0, 1, 7, 0x0C, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL2, "top_apll2",
+ apll2_parents, 0x0134, 0x0138, 0x013C, 8, 1, 15, 0x0C, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL3, "top_apll3",
+ apll3_parents, 0x0134, 0x0138, 0x013C, 16, 1, 23, 0x0C, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL4, "top_apll4",
+ apll4_parents, 0x0134, 0x0138, 0x013C, 24, 1, 31, 0x0C, 31),
+ /*
+ * CLK_CFG_24
+ * i2so4_mck is not used in MT8195.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL5, "top_apll5",
+ apll5_parents, 0x0140, 0x0144, 0x0148, 0, 1, 7, 0x010, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SO1_MCK, "top_i2so1_mck",
+ i2s_parents, 0x0140, 0x0144, 0x0148, 8, 3, 15, 0x010, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SO2_MCK, "top_i2so2_mck",
+ i2s_parents, 0x0140, 0x0144, 0x0148, 16, 3, 23, 0x010, 2),
+ /*
+ * CLK_CFG_25
+ * i2so5_mck and i2si4_mck are not used in MT8195.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SI1_MCK, "top_i2si1_mck",
+ i2s_parents, 0x014C, 0x0150, 0x0154, 8, 3, 15, 0x010, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SI2_MCK, "top_i2si2_mck",
+ i2s_parents, 0x014C, 0x0150, 0x0154, 16, 3, 23, 0x010, 6),
+ /*
+ * CLK_CFG_26
+ * i2si5_mck is not used in MT8195.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPTX_MCK, "top_dptx_mck",
+ i2s_parents, 0x0158, 0x015C, 0x0160, 8, 3, 15, 0x010, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_IEC_CLK, "top_aud_iec_clk",
+ i2s_parents, 0x0158, 0x015C, 0x0160, 16, 3, 23, 0x010, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_HP, "top_a1sys_hp",
+ a1sys_hp_parents, 0x0158, 0x015C, 0x0160, 24, 1, 31, 0x010, 11),
+ /* CLK_CFG_27 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A2SYS_HF, "top_a2sys_hf",
+ a2sys_parents, 0x0164, 0x0168, 0x016C, 0, 1, 7, 0x010, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A3SYS_HF, "top_a3sys_hf",
+ a3sys_parents, 0x0164, 0x0168, 0x016C, 8, 3, 15, 0x010, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A4SYS_HF, "top_a4sys_hf",
+ a3sys_parents, 0x0164, 0x0168, 0x016C, 16, 3, 23, 0x010, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINFI_BCLK, "top_spinfi_bclk",
+ spinfi_b_parents, 0x0164, 0x0168, 0x016C, 24, 3, 31, 0x010, 15),
+ /* CLK_CFG_28 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI1X, "top_nfi1x",
+ nfi1x_parents, 0x0170, 0x0174, 0x0178, 0, 3, 7, 0x010, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ECC, "top_ecc",
+ ecc_parents, 0x0170, 0x0174, 0x0178, 8, 3, 15, 0x010, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_LOCAL_BUS, "top_audio_local_bus",
+ audio_local_bus_parents, 0x0170, 0x0174, 0x0178, 16, 4, 23, 0x010, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINOR, "top_spinor",
+ spinor_parents, 0x0170, 0x0174, 0x0178, 24, 2, 31, 0x010, 19),
+ /*
+ * CLK_CFG_29
+ * top_ulposc/top_ulposc_core/top_srck are clock source of always on co-processor,
+ * should not be closed by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DVIO_DGI_REF, "top_dvio_dgi_ref",
+ dvio_dgi_ref_parents, 0x017C, 0x0180, 0x0184, 0, 3, 7, 0x010, 20),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_ULPOSC, "top_ulposc",
+ ulposc_parents, 0x017C, 0x0180, 0x0184, 8, 2, 15, 0x010, 21, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_ULPOSC_CORE, "top_ulposc_core",
+ ulposc_core_parents, 0x017C, 0x0180, 0x0184, 16, 2, 23, 0x010, 22, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SRCK, "top_srck",
+ srck_parents, 0x017C, 0x0180, 0x0184, 24, 1, 31, 0x010, 23, CLK_IS_CRITICAL),
+ /*
+ * the clocks in CLK_CFG_30 ~ 37 are backup clock source, no need to handled
+ * by Linux.
+ */
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_MISC_CFG_3 */
+ MUX(CLK_TOP_MFG_CK_FAST_REF, "mfg_ck_fast_ref", mfg_fast_parents, 0x0250, 8, 1),
+};
+
+static const struct mtk_composite top_adj_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "top_i2si1_mck", 0x0320, 0, 0x0328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "top_i2si2_mck", 0x0320, 1, 0x0328, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "top_i2so1_mck", 0x0320, 2, 0x0328, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "top_i2so2_mck", 0x0320, 3, 0x0328, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "top_aud_iec_clk", 0x0320, 4, 0x0334, 8, 0),
+ /* apll12_div5 ~ 8 are not used in MT8195. */
+ DIV_GATE(CLK_TOP_APLL12_DIV9, "apll12_div9", "top_dptx_mck", 0x0320, 9, 0x0338, 8, 8),
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x238,
+ .clr_ofs = 0x238,
+ .sta_ofs = 0x238,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x250,
+ .clr_ofs = 0x250,
+ .sta_ofs = 0x250,
+};
+
+#define GATE_TOP0_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &top0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv, _flag)
+
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_TOP0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_CFG_VPP0, "cfg_vpp0", "top_vpp", 0),
+ GATE_TOP0(CLK_TOP_CFG_VPP1, "cfg_vpp1", "top_vpp", 1),
+ GATE_TOP0(CLK_TOP_CFG_VDO0, "cfg_vdo0", "top_vpp", 2),
+ GATE_TOP0(CLK_TOP_CFG_VDO1, "cfg_vdo1", "top_vpp", 3),
+ GATE_TOP0(CLK_TOP_CFG_UNIPLL_SES, "cfg_unipll_ses", "univpll_d2", 4),
+ GATE_TOP0(CLK_TOP_CFG_26M_VPP0, "cfg_26m_vpp0", "clk26m", 5),
+ GATE_TOP0(CLK_TOP_CFG_26M_VPP1, "cfg_26m_vpp1", "clk26m", 6),
+ GATE_TOP0(CLK_TOP_CFG_26M_AUD, "cfg_26m_aud", "clk26m", 9),
+ /*
+ * cfg_axi_east, cfg_axi_east_north, cfg_axi_north and cfg_axi_south
+ * are peripheral bus clock branches.
+ */
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_EAST, "cfg_axi_east", "top_axi", 10, CLK_IS_CRITICAL),
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_EAST_NORTH, "cfg_axi_east_north", "top_axi", 11,
+ CLK_IS_CRITICAL),
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_NORTH, "cfg_axi_north", "top_axi", 12, CLK_IS_CRITICAL),
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_SOUTH, "cfg_axi_south", "top_axi", 13, CLK_IS_CRITICAL),
+ GATE_TOP0(CLK_TOP_CFG_EXT_TEST, "cfg_ext_test", "msdcpll_d2", 15),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_SSUSB_REF, "ssusb_ref", "clk26m", 0),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 1),
+ GATE_TOP1(CLK_TOP_SSUSB_P1_REF, "ssusb_p1_ref", "clk26m", 2),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P1_REF, "ssusb_phy_p1_ref", "clk26m", 3),
+ GATE_TOP1(CLK_TOP_SSUSB_P2_REF, "ssusb_p2_ref", "clk26m", 4),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P2_REF, "ssusb_phy_p2_ref", "clk26m", 5),
+ GATE_TOP1(CLK_TOP_SSUSB_P3_REF, "ssusb_p3_ref", "clk26m", 6),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P3_REF, "ssusb_phy_p3_ref", "clk26m", 7),
+};
+
+static const struct of_device_id of_match_clk_mt8195_topck[] = {
+ { .compatible = "mediatek,mt8195-topckgen", },
+ {}
+};
+
+static int clk_mt8195_topck_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *top_clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!top_clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ r = PTR_ERR(base);
+ goto free_top_data;
+ }
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ top_clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+ &mt8195_clk_lock, top_clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt8195_clk_lock, top_clk_data);
+ mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ &mt8195_clk_lock, top_clk_data);
+ r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ if (r)
+ goto free_top_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ if (r)
+ goto free_top_data;
+
+ return r;
+
+free_top_data:
+ mtk_free_clk_data(top_clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_topck_drv = {
+ .probe = clk_mt8195_topck_probe,
+ .driver = {
+ .name = "clk-mt8195-topck",
+ .of_match_table = of_match_clk_mt8195_topck,
+ },
+};
+builtin_platform_driver(clk_mt8195_topck_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c
new file mode 100644
index 000000000000..a1df04f42a90
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vdec.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vdec2_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_VDEC, "vdec_vdec", "top_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LAT, "vdec_lat", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_LARB1, "vdec_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_gate vdec_core1_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_CORE1_VDEC, "vdec_core1_vdec", "top_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_CORE1_LAT, "vdec_core1_lat", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_CORE1_LARB1, "vdec_core1_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_gate vdec_soc_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_SOC_VDEC, "vdec_soc_vdec", "top_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_SOC_LAT, "vdec_soc_lat", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_SOC_LARB1, "vdec_soc_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct mtk_clk_desc vdec_core1_desc = {
+ .clks = vdec_core1_clks,
+ .num_clks = ARRAY_SIZE(vdec_core1_clks),
+};
+
+static const struct mtk_clk_desc vdec_soc_desc = {
+ .clks = vdec_soc_clks,
+ .num_clks = ARRAY_SIZE(vdec_soc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_vdec[] = {
+ {
+ .compatible = "mediatek,mt8195-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ .compatible = "mediatek,mt8195-vdecsys_core1",
+ .data = &vdec_core1_desc,
+ }, {
+ .compatible = "mediatek,mt8195-vdecsys_soc",
+ .data = &vdec_soc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-vdec",
+ .of_match_table = of_match_clk_mt8195_vdec,
+ },
+};
+builtin_platform_driver(clk_mt8195_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
new file mode 100644
index 000000000000..f7ff7618c714
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vdo0_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vdo0_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs vdo0_2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_VDO0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vdo0_clks[] = {
+ /* VDO0_0 */
+ GATE_VDO0_0(CLK_VDO0_DISP_OVL0, "vdo0_disp_ovl0", "top_vpp", 0),
+ GATE_VDO0_0(CLK_VDO0_DISP_COLOR0, "vdo0_disp_color0", "top_vpp", 2),
+ GATE_VDO0_0(CLK_VDO0_DISP_COLOR1, "vdo0_disp_color1", "top_vpp", 3),
+ GATE_VDO0_0(CLK_VDO0_DISP_CCORR0, "vdo0_disp_ccorr0", "top_vpp", 4),
+ GATE_VDO0_0(CLK_VDO0_DISP_CCORR1, "vdo0_disp_ccorr1", "top_vpp", 5),
+ GATE_VDO0_0(CLK_VDO0_DISP_AAL0, "vdo0_disp_aal0", "top_vpp", 6),
+ GATE_VDO0_0(CLK_VDO0_DISP_AAL1, "vdo0_disp_aal1", "top_vpp", 7),
+ GATE_VDO0_0(CLK_VDO0_DISP_GAMMA0, "vdo0_disp_gamma0", "top_vpp", 8),
+ GATE_VDO0_0(CLK_VDO0_DISP_GAMMA1, "vdo0_disp_gamma1", "top_vpp", 9),
+ GATE_VDO0_0(CLK_VDO0_DISP_DITHER0, "vdo0_disp_dither0", "top_vpp", 10),
+ GATE_VDO0_0(CLK_VDO0_DISP_DITHER1, "vdo0_disp_dither1", "top_vpp", 11),
+ GATE_VDO0_0(CLK_VDO0_DISP_OVL1, "vdo0_disp_ovl1", "top_vpp", 16),
+ GATE_VDO0_0(CLK_VDO0_DISP_WDMA0, "vdo0_disp_wdma0", "top_vpp", 17),
+ GATE_VDO0_0(CLK_VDO0_DISP_WDMA1, "vdo0_disp_wdma1", "top_vpp", 18),
+ GATE_VDO0_0(CLK_VDO0_DISP_RDMA0, "vdo0_disp_rdma0", "top_vpp", 19),
+ GATE_VDO0_0(CLK_VDO0_DISP_RDMA1, "vdo0_disp_rdma1", "top_vpp", 20),
+ GATE_VDO0_0(CLK_VDO0_DSI0, "vdo0_dsi0", "top_vpp", 21),
+ GATE_VDO0_0(CLK_VDO0_DSI1, "vdo0_dsi1", "top_vpp", 22),
+ GATE_VDO0_0(CLK_VDO0_DSC_WRAP0, "vdo0_dsc_wrap0", "top_vpp", 23),
+ GATE_VDO0_0(CLK_VDO0_VPP_MERGE0, "vdo0_vpp_merge0", "top_vpp", 24),
+ GATE_VDO0_0(CLK_VDO0_DP_INTF0, "vdo0_dp_intf0", "top_vpp", 25),
+ GATE_VDO0_0(CLK_VDO0_DISP_MUTEX0, "vdo0_disp_mutex0", "top_vpp", 26),
+ GATE_VDO0_0(CLK_VDO0_DISP_IL_ROT0, "vdo0_disp_il_rot0", "top_vpp", 27),
+ GATE_VDO0_0(CLK_VDO0_APB_BUS, "vdo0_apb_bus", "top_vpp", 28),
+ GATE_VDO0_0(CLK_VDO0_FAKE_ENG0, "vdo0_fake_eng0", "top_vpp", 29),
+ GATE_VDO0_0(CLK_VDO0_FAKE_ENG1, "vdo0_fake_eng1", "top_vpp", 30),
+ /* VDO0_1 */
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC0, "vdo0_dl_async0", "top_vpp", 0),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC1, "vdo0_dl_async1", "top_vpp", 1),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC2, "vdo0_dl_async2", "top_vpp", 2),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC3, "vdo0_dl_async3", "top_vpp", 3),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC4, "vdo0_dl_async4", "top_vpp", 4),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR0, "vdo0_disp_monitor0", "top_vpp", 5),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR1, "vdo0_disp_monitor1", "top_vpp", 6),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR2, "vdo0_disp_monitor2", "top_vpp", 7),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR3, "vdo0_disp_monitor3", "top_vpp", 8),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR4, "vdo0_disp_monitor4", "top_vpp", 9),
+ GATE_VDO0_1(CLK_VDO0_SMI_GALS, "vdo0_smi_gals", "top_vpp", 10),
+ GATE_VDO0_1(CLK_VDO0_SMI_COMMON, "vdo0_smi_common", "top_vpp", 11),
+ GATE_VDO0_1(CLK_VDO0_SMI_EMI, "vdo0_smi_emi", "top_vpp", 12),
+ GATE_VDO0_1(CLK_VDO0_SMI_IOMMU, "vdo0_smi_iommu", "top_vpp", 13),
+ GATE_VDO0_1(CLK_VDO0_SMI_LARB, "vdo0_smi_larb", "top_vpp", 14),
+ GATE_VDO0_1(CLK_VDO0_SMI_RSI, "vdo0_smi_rsi", "top_vpp", 15),
+ /* VDO0_2 */
+ GATE_VDO0_2(CLK_VDO0_DSI0_DSI, "vdo0_dsi0_dsi", "top_dsi_occ", 0),
+ GATE_VDO0_2(CLK_VDO0_DSI1_DSI, "vdo0_dsi1_dsi", "top_dsi_occ", 8),
+ GATE_VDO0_2(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf", "top_edp", 16),
+};
+
+static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDO0_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
+ if (r)
+ goto free_vdo0_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_vdo0_data;
+
+ return r;
+
+free_vdo0_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_vdo0_drv = {
+ .probe = clk_mt8195_vdo0_probe,
+ .driver = {
+ .name = "clk-mt8195-vdo0",
+ },
+};
+builtin_platform_driver(clk_mt8195_vdo0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
new file mode 100644
index 000000000000..03df8eae8838
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vdo1_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vdo1_1_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+static const struct mtk_gate_regs vdo1_2_cg_regs = {
+ .set_ofs = 0x134,
+ .clr_ofs = 0x138,
+ .sta_ofs = 0x130,
+};
+
+static const struct mtk_gate_regs vdo1_3_cg_regs = {
+ .set_ofs = 0x144,
+ .clr_ofs = 0x148,
+ .sta_ofs = 0x140,
+};
+
+#define GATE_VDO1_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vdo1_clks[] = {
+ /* VDO1_0 */
+ GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
+ GATE_VDO1_0(CLK_VDO1_SMI_LARB3, "vdo1_smi_larb3", "top_vpp", 1),
+ GATE_VDO1_0(CLK_VDO1_GALS, "vdo1_gals", "top_vpp", 2),
+ GATE_VDO1_0(CLK_VDO1_FAKE_ENG0, "vdo1_fake_eng0", "top_vpp", 3),
+ GATE_VDO1_0(CLK_VDO1_FAKE_ENG, "vdo1_fake_eng", "top_vpp", 4),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA0, "vdo1_mdp_rdma0", "top_vpp", 5),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA1, "vdo1_mdp_rdma1", "top_vpp", 6),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA2, "vdo1_mdp_rdma2", "top_vpp", 7),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA3, "vdo1_mdp_rdma3", "top_vpp", 8),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE0, "vdo1_vpp_merge0", "top_vpp", 9),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE1, "vdo1_vpp_merge1", "top_vpp", 10),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE2, "vdo1_vpp_merge2", "top_vpp", 11),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE3, "vdo1_vpp_merge3", "top_vpp", 12),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE4, "vdo1_vpp_merge4", "top_vpp", 13),
+ GATE_VDO1_0(CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC, "vdo1_vpp2_to_vdo1_dl_async", "top_vpp", 14),
+ GATE_VDO1_0(CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC, "vdo1_vpp3_to_vdo1_dl_async", "top_vpp", 15),
+ GATE_VDO1_0(CLK_VDO1_DISP_MUTEX, "vdo1_disp_mutex", "top_vpp", 16),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA4, "vdo1_mdp_rdma4", "top_vpp", 17),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA5, "vdo1_mdp_rdma5", "top_vpp", 18),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA6, "vdo1_mdp_rdma6", "top_vpp", 19),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA7, "vdo1_mdp_rdma7", "top_vpp", 20),
+ GATE_VDO1_0(CLK_VDO1_DP_INTF0_MM, "vdo1_dp_intf0_mm", "top_vpp", 21),
+ GATE_VDO1_0(CLK_VDO1_DPI0_MM, "vdo1_dpi0_mm", "top_vpp", 22),
+ GATE_VDO1_0(CLK_VDO1_DPI1_MM, "vdo1_dpi1_mm", "top_vpp", 23),
+ GATE_VDO1_0(CLK_VDO1_DISP_MONITOR, "vdo1_disp_monitor", "top_vpp", 24),
+ GATE_VDO1_0(CLK_VDO1_MERGE0_DL_ASYNC, "vdo1_merge0_dl_async", "top_vpp", 25),
+ GATE_VDO1_0(CLK_VDO1_MERGE1_DL_ASYNC, "vdo1_merge1_dl_async", "top_vpp", 26),
+ GATE_VDO1_0(CLK_VDO1_MERGE2_DL_ASYNC, "vdo1_merge2_dl_async", "top_vpp", 27),
+ GATE_VDO1_0(CLK_VDO1_MERGE3_DL_ASYNC, "vdo1_merge3_dl_async", "top_vpp", 28),
+ GATE_VDO1_0(CLK_VDO1_MERGE4_DL_ASYNC, "vdo1_merge4_dl_async", "top_vpp", 29),
+ GATE_VDO1_0(CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC, "vdo1_vdo0_dsc_to_vdo1_dl_async",
+ "top_vpp", 30),
+ GATE_VDO1_0(CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC, "vdo1_vdo0_merge_to_vdo1_dl_async",
+ "top_vpp", 31),
+ /* VDO1_1 */
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE0, "vdo1_hdr_vdo_fe0", "top_vpp", 0),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE0, "vdo1_hdr_gfx_fe0", "top_vpp", 1),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_BE, "vdo1_hdr_vdo_be", "top_vpp", 2),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE1, "vdo1_hdr_vdo_fe1", "top_vpp", 16),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE1, "vdo1_hdr_gfx_fe1", "top_vpp", 17),
+ GATE_VDO1_1(CLK_VDO1_DISP_MIXER, "vdo1_disp_mixer", "top_vpp", 18),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE0_DL_ASYNC, "vdo1_hdr_vdo_fe0_dl_async", "top_vpp", 19),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE1_DL_ASYNC, "vdo1_hdr_vdo_fe1_dl_async", "top_vpp", 20),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE0_DL_ASYNC, "vdo1_hdr_gfx_fe0_dl_async", "top_vpp", 21),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE1_DL_ASYNC, "vdo1_hdr_gfx_fe1_dl_async", "top_vpp", 22),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_BE_DL_ASYNC, "vdo1_hdr_vdo_be_dl_async", "top_vpp", 23),
+ /* VDO1_2 */
+ GATE_VDO1_2(CLK_VDO1_DPI0, "vdo1_dpi0", "top_vpp", 0),
+ GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI0, "vdo1_disp_monitor_dpi0", "top_vpp", 1),
+ GATE_VDO1_2(CLK_VDO1_DPI1, "vdo1_dpi1", "top_vpp", 8),
+ GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI1, "vdo1_disp_monitor_dpi1", "top_vpp", 9),
+ GATE_VDO1_2(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_vpp", 16),
+ GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf", "top_vpp", 17),
+ /* VDO1_3 */
+ GATE_VDO1_3(CLK_VDO1_26M_SLOW, "vdo1_26m_slow", "clk26m", 8),
+};
+
+static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDO1_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
+ if (r)
+ goto free_vdo1_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_vdo1_data;
+
+ return r;
+
+free_vdo1_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_vdo1_drv = {
+ .probe = clk_mt8195_vdo1_probe,
+ .driver = {
+ .name = "clk-mt8195-vdo1",
+ },
+};
+builtin_platform_driver(clk_mt8195_vdo1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c
new file mode 100644
index 000000000000..7339851a0856
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-venc.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_LARB, "venc_larb", "top_venc", 0),
+ GATE_VENC(CLK_VENC_VENC, "venc_venc", "top_venc", 4),
+ GATE_VENC(CLK_VENC_JPGENC, "venc_jpgenc", "top_venc", 8),
+ GATE_VENC(CLK_VENC_JPGDEC, "venc_jpgdec", "top_venc", 12),
+ GATE_VENC(CLK_VENC_JPGDEC_C1, "venc_jpgdec_c1", "top_venc", 16),
+ GATE_VENC(CLK_VENC_GALS, "venc_gals", "top_venc", 28),
+};
+
+static const struct mtk_gate venc_core1_clks[] = {
+ GATE_VENC(CLK_VENC_CORE1_LARB, "venc_core1_larb", "top_venc", 0),
+ GATE_VENC(CLK_VENC_CORE1_VENC, "venc_core1_venc", "top_venc", 4),
+ GATE_VENC(CLK_VENC_CORE1_JPGENC, "venc_core1_jpgenc", "top_venc", 8),
+ GATE_VENC(CLK_VENC_CORE1_JPGDEC, "venc_core1_jpgdec", "top_venc", 12),
+ GATE_VENC(CLK_VENC_CORE1_JPGDEC_C1, "venc_core1_jpgdec_c1", "top_venc", 16),
+ GATE_VENC(CLK_VENC_CORE1_GALS, "venc_core1_gals", "top_venc", 28),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct mtk_clk_desc venc_core1_desc = {
+ .clks = venc_core1_clks,
+ .num_clks = ARRAY_SIZE(venc_core1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_venc[] = {
+ {
+ .compatible = "mediatek,mt8195-vencsys",
+ .data = &venc_desc,
+ }, {
+ .compatible = "mediatek,mt8195-vencsys_core1",
+ .data = &venc_core1_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-venc",
+ .of_match_table = of_match_clk_mt8195_venc,
+ },
+};
+builtin_platform_driver(clk_mt8195_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c
new file mode 100644
index 000000000000..c3241466a8d0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vpp0_0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x20,
+};
+
+static const struct mtk_gate_regs vpp0_1_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x34,
+ .sta_ofs = 0x2c,
+};
+
+static const struct mtk_gate_regs vpp0_2_cg_regs = {
+ .set_ofs = 0x3c,
+ .clr_ofs = 0x40,
+ .sta_ofs = 0x38,
+};
+
+#define GATE_VPP0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP0_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vpp0_clks[] = {
+ /* VPP0_0 */
+ GATE_VPP0_0(CLK_VPP0_MDP_FG, "vpp0_mdp_fg", "top_vpp", 1),
+ GATE_VPP0_0(CLK_VPP0_STITCH, "vpp0_stitch", "top_vpp", 2),
+ GATE_VPP0_0(CLK_VPP0_PADDING, "vpp0_padding", "top_vpp", 7),
+ GATE_VPP0_0(CLK_VPP0_MDP_TCC, "vpp0_mdp_tcc", "top_vpp", 8),
+ GATE_VPP0_0(CLK_VPP0_WARP0_ASYNC_TX, "vpp0_warp0_async_tx", "top_vpp", 10),
+ GATE_VPP0_0(CLK_VPP0_WARP1_ASYNC_TX, "vpp0_warp1_async_tx", "top_vpp", 11),
+ GATE_VPP0_0(CLK_VPP0_MUTEX, "vpp0_mutex", "top_vpp", 13),
+ GATE_VPP0_0(CLK_VPP0_VPP02VPP1_RELAY, "vpp0_vpp02vpp1_relay", "top_vpp", 14),
+ GATE_VPP0_0(CLK_VPP0_VPP12VPP0_ASYNC, "vpp0_vpp12vpp0_async", "top_vpp", 15),
+ GATE_VPP0_0(CLK_VPP0_MMSYSRAM_TOP, "vpp0_mmsysram_top", "top_vpp", 16),
+ GATE_VPP0_0(CLK_VPP0_MDP_AAL, "vpp0_mdp_aal", "top_vpp", 17),
+ GATE_VPP0_0(CLK_VPP0_MDP_RSZ, "vpp0_mdp_rsz", "top_vpp", 18),
+ /* VPP0_1 */
+ GATE_VPP0_1(CLK_VPP0_SMI_COMMON, "vpp0_smi_common", "top_vpp", 0),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_LARB0, "vpp0_gals_vdo0_larb0", "top_vpp", 1),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_LARB1, "vpp0_gals_vdo0_larb1", "top_vpp", 2),
+ GATE_VPP0_1(CLK_VPP0_GALS_VENCSYS, "vpp0_gals_vencsys", "top_vpp", 3),
+ GATE_VPP0_1(CLK_VPP0_GALS_VENCSYS_CORE1, "vpp0_gals_vencsys_core1", "top_vpp", 4),
+ GATE_VPP0_1(CLK_VPP0_GALS_INFRA, "vpp0_gals_infra", "top_vpp", 5),
+ GATE_VPP0_1(CLK_VPP0_GALS_CAMSYS, "vpp0_gals_camsys", "top_vpp", 6),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_LARB5, "vpp0_gals_vpp1_larb5", "top_vpp", 7),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_LARB6, "vpp0_gals_vpp1_larb6", "top_vpp", 8),
+ GATE_VPP0_1(CLK_VPP0_SMI_REORDER, "vpp0_smi_reorder", "top_vpp", 9),
+ GATE_VPP0_1(CLK_VPP0_SMI_IOMMU, "vpp0_smi_iommu", "top_vpp", 10),
+ GATE_VPP0_1(CLK_VPP0_GALS_IMGSYS_CAMSYS, "vpp0_gals_imgsys_camsys", "top_vpp", 11),
+ GATE_VPP0_1(CLK_VPP0_MDP_RDMA, "vpp0_mdp_rdma", "top_vpp", 12),
+ GATE_VPP0_1(CLK_VPP0_MDP_WROT, "vpp0_mdp_wrot", "top_vpp", 13),
+ GATE_VPP0_1(CLK_VPP0_GALS_EMI0_EMI1, "vpp0_gals_emi0_emi1", "top_vpp", 16),
+ GATE_VPP0_1(CLK_VPP0_SMI_SUB_COMMON_REORDER, "vpp0_smi_sub_common_reorder", "top_vpp", 17),
+ GATE_VPP0_1(CLK_VPP0_SMI_RSI, "vpp0_smi_rsi", "top_vpp", 18),
+ GATE_VPP0_1(CLK_VPP0_SMI_COMMON_LARB4, "vpp0_smi_common_larb4", "top_vpp", 19),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDEC_VDEC_CORE1, "vpp0_gals_vdec_vdec_core1", "top_vpp", 20),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_WPE, "vpp0_gals_vpp1_wpe", "top_vpp", 21),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1, "vpp0_gals_vdo0_vdo1_vencsys_core1",
+ "top_vpp", 22),
+ GATE_VPP0_1(CLK_VPP0_FAKE_ENG, "vpp0_fake_eng", "top_vpp", 23),
+ GATE_VPP0_1(CLK_VPP0_MDP_HDR, "vpp0_mdp_hdr", "top_vpp", 24),
+ GATE_VPP0_1(CLK_VPP0_MDP_TDSHP, "vpp0_mdp_tdshp", "top_vpp", 25),
+ GATE_VPP0_1(CLK_VPP0_MDP_COLOR, "vpp0_mdp_color", "top_vpp", 26),
+ GATE_VPP0_1(CLK_VPP0_MDP_OVL, "vpp0_mdp_ovl", "top_vpp", 27),
+ /* VPP0_2 */
+ GATE_VPP0_2(CLK_VPP0_WARP0_RELAY, "vpp0_warp0_relay", "top_wpe_vpp", 0),
+ GATE_VPP0_2(CLK_VPP0_WARP0_MDP_DL_ASYNC, "vpp0_warp0_mdp_dl_async", "top_wpe_vpp", 1),
+ GATE_VPP0_2(CLK_VPP0_WARP1_RELAY, "vpp0_warp1_relay", "top_wpe_vpp", 2),
+ GATE_VPP0_2(CLK_VPP0_WARP1_MDP_DL_ASYNC, "vpp0_warp1_mdp_dl_async", "top_wpe_vpp", 3),
+};
+
+static const struct mtk_clk_desc vpp0_desc = {
+ .clks = vpp0_clks,
+ .num_clks = ARRAY_SIZE(vpp0_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_vpp0[] = {
+ {
+ .compatible = "mediatek,mt8195-vppsys0",
+ .data = &vpp0_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_vpp0_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-vpp0",
+ .of_match_table = of_match_clk_mt8195_vpp0,
+ },
+};
+builtin_platform_driver(clk_mt8195_vpp0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c
new file mode 100644
index 000000000000..ce0b9a40a179
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vpp1_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vpp1_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_VPP1_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP1_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp1_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vpp1_clks[] = {
+ /* VPP1_0 */
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_OVL, "vpp1_svpp1_mdp_ovl", "top_vpp", 0),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_TCC, "vpp1_svpp1_mdp_tcc", "top_vpp", 1),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_WROT, "vpp1_svpp1_mdp_wrot", "top_vpp", 2),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_VPP_PAD, "vpp1_svpp1_vpp_pad", "top_vpp", 3),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_WROT, "vpp1_svpp2_mdp_wrot", "top_vpp", 4),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VPP_PAD, "vpp1_svpp2_vpp_pad", "top_vpp", 5),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_WROT, "vpp1_svpp3_mdp_wrot", "top_vpp", 6),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VPP_PAD, "vpp1_svpp3_vpp_pad", "top_vpp", 7),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_RDMA, "vpp1_svpp1_mdp_rdma", "top_vpp", 8),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_FG, "vpp1_svpp1_mdp_fg", "top_vpp", 9),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_RDMA, "vpp1_svpp2_mdp_rdma", "top_vpp", 10),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_FG, "vpp1_svpp2_mdp_fg", "top_vpp", 11),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_RDMA, "vpp1_svpp3_mdp_rdma", "top_vpp", 12),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_FG, "vpp1_svpp3_mdp_fg", "top_vpp", 13),
+ GATE_VPP1_0(CLK_VPP1_VPP_SPLIT, "vpp1_vpp_split", "top_vpp", 14),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VDO0_DL_RELAY, "vpp1_svpp2_vdo0_dl_relay", "top_vpp", 15),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_TDSHP, "vpp1_svpp1_mdp_tdshp", "top_vpp", 16),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_COLOR, "vpp1_svpp1_mdp_color", "top_vpp", 17),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VDO1_DL_RELAY, "vpp1_svpp3_vdo1_dl_relay", "top_vpp", 18),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VPP_MERGE, "vpp1_svpp2_vpp_merge", "top_vpp", 19),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_COLOR, "vpp1_svpp2_mdp_color", "top_vpp", 20),
+ GATE_VPP1_0(CLK_VPP1_VPPSYS1_GALS, "vpp1_vppsys1_gals", "top_vpp", 21),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VPP_MERGE, "vpp1_svpp3_vpp_merge", "top_vpp", 22),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_COLOR, "vpp1_svpp3_mdp_color", "top_vpp", 23),
+ GATE_VPP1_0(CLK_VPP1_VPPSYS1_LARB, "vpp1_vppsys1_larb", "top_vpp", 24),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_RSZ, "vpp1_svpp1_mdp_rsz", "top_vpp", 25),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_HDR, "vpp1_svpp1_mdp_hdr", "top_vpp", 26),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_AAL, "vpp1_svpp1_mdp_aal", "top_vpp", 27),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_HDR, "vpp1_svpp2_mdp_hdr", "top_vpp", 28),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_AAL, "vpp1_svpp2_mdp_aal", "top_vpp", 29),
+ GATE_VPP1_0(CLK_VPP1_DL_ASYNC, "vpp1_dl_async", "top_vpp", 30),
+ GATE_VPP1_0(CLK_VPP1_LARB5_FAKE_ENG, "vpp1_larb5_fake_eng", "top_vpp", 31),
+ /* VPP1_1 */
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_HDR, "vpp1_svpp3_mdp_hdr", "top_vpp", 0),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_AAL, "vpp1_svpp3_mdp_aal", "top_vpp", 1),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_VDO1_DL_RELAY, "vpp1_svpp2_vdo1_dl_relay", "top_vpp", 2),
+ GATE_VPP1_1(CLK_VPP1_LARB6_FAKE_ENG, "vpp1_larb6_fake_eng", "top_vpp", 3),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_MDP_RSZ, "vpp1_svpp2_mdp_rsz", "top_vpp", 4),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_RSZ, "vpp1_svpp3_mdp_rsz", "top_vpp", 5),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_VDO0_DL_RELAY, "vpp1_svpp3_vdo0_dl_relay", "top_vpp", 6),
+ GATE_VPP1_1(CLK_VPP1_DISP_MUTEX, "vpp1_disp_mutex", "top_vpp", 7),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_MDP_TDSHP, "vpp1_svpp2_mdp_tdshp", "top_vpp", 8),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_TDSHP, "vpp1_svpp3_mdp_tdshp", "top_vpp", 9),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL1_RELAY, "vpp1_vpp0_dl1_relay", "top_vpp", 10),
+ GATE_VPP1_1(CLK_VPP1_HDMI_META, "vpp1_hdmi_meta", "hdmirx_p", 11),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_HDMI, "vpp1_vpp_split_hdmi", "hdmirx_p", 12),
+ GATE_VPP1_1(CLK_VPP1_DGI_IN, "vpp1_dgi_in", "in_dgi", 13),
+ GATE_VPP1_1(CLK_VPP1_DGI_OUT, "vpp1_dgi_out", "top_dgi_out", 14),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_DGI, "vpp1_vpp_split_dgi", "top_dgi_out", 15),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL_ASYNC, "vpp1_vpp0_dl_async", "top_vpp", 16),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL_RELAY, "vpp1_vpp0_dl_relay", "top_vpp", 17),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_26M, "vpp1_vpp_split_26m", "clk26m", 26),
+};
+
+static const struct mtk_clk_desc vpp1_desc = {
+ .clks = vpp1_clks,
+ .num_clks = ARRAY_SIZE(vpp1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_vpp1[] = {
+ {
+ .compatible = "mediatek,mt8195-vppsys1",
+ .data = &vpp1_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_vpp1_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-vpp1",
+ .of_match_table = of_match_clk_mt8195_vpp1,
+ },
+};
+builtin_platform_driver(clk_mt8195_vpp1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c
new file mode 100644
index 000000000000..274d60838d8e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-wpe.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs wpe_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs wpe_vpp0_cg_regs = {
+ .set_ofs = 0x58,
+ .clr_ofs = 0x58,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs wpe_vpp1_cg_regs = {
+ .set_ofs = 0x5c,
+ .clr_ofs = 0x5c,
+ .sta_ofs = 0x5c,
+};
+
+#define GATE_WPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_WPE_VPP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_vpp0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_WPE_VPP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_vpp1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate wpe_clks[] = {
+ GATE_WPE(CLK_WPE_VPP0, "wpe_vpp0", "top_wpe_vpp", 16),
+ GATE_WPE(CLK_WPE_VPP1, "wpe_vpp1", "top_wpe_vpp", 17),
+ GATE_WPE(CLK_WPE_SMI_LARB7, "wpe_smi_larb7", "top_wpe_vpp", 18),
+ GATE_WPE(CLK_WPE_SMI_LARB8, "wpe_smi_larb8", "top_wpe_vpp", 19),
+ GATE_WPE(CLK_WPE_EVENT_TX, "wpe_event_tx", "top_wpe_vpp", 20),
+ GATE_WPE(CLK_WPE_SMI_LARB7_P, "wpe_smi_larb7_p", "top_wpe_vpp", 24),
+ GATE_WPE(CLK_WPE_SMI_LARB8_P, "wpe_smi_larb8_p", "top_wpe_vpp", 25),
+};
+
+static const struct mtk_gate wpe_vpp0_clks[] = {
+ /* WPE_VPP0 */
+ GATE_WPE_VPP0(CLK_WPE_VPP0_VGEN, "wpe_vpp0_vgen", "top_img", 0),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_EXT, "wpe_vpp0_ext", "top_img", 1),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_VFC, "wpe_vpp0_vfc", "top_img", 2),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH0_TOP, "wpe_vpp0_cach0_top", "top_img", 3),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH0_DMA, "wpe_vpp0_cach0_dma", "top_img", 4),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH1_TOP, "wpe_vpp0_cach1_top", "top_img", 5),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH1_DMA, "wpe_vpp0_cach1_dma", "top_img", 6),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH2_TOP, "wpe_vpp0_cach2_top", "top_img", 7),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH2_DMA, "wpe_vpp0_cach2_dma", "top_img", 8),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH3_TOP, "wpe_vpp0_cach3_top", "top_img", 9),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH3_DMA, "wpe_vpp0_cach3_dma", "top_img", 10),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_PSP, "wpe_vpp0_psp", "top_img", 11),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_PSP2, "wpe_vpp0_psp2", "top_img", 12),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_SYNC, "wpe_vpp0_sync", "top_img", 13),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_C24, "wpe_vpp0_c24", "top_img", 14),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_MDP_CROP, "wpe_vpp0_mdp_crop", "top_img", 15),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_ISP_CROP, "wpe_vpp0_isp_crop", "top_img", 16),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_TOP, "wpe_vpp0_top", "top_img", 17),
+ /* WPE_VPP1 */
+ GATE_WPE_VPP1(CLK_WPE_VPP0_VECI, "wpe_vpp0_veci", "top_img", 0),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_VEC2I, "wpe_vpp0_vec2i", "top_img", 1),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_VEC3I, "wpe_vpp0_vec3i", "top_img", 2),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_WPEO, "wpe_vpp0_wpeo", "top_img", 3),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_MSKO, "wpe_vpp0_msko", "top_img", 4),
+};
+
+static const struct mtk_gate wpe_vpp1_clks[] = {
+ /* WPE_VPP0 */
+ GATE_WPE_VPP0(CLK_WPE_VPP1_VGEN, "wpe_vpp1_vgen", "top_img", 0),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_EXT, "wpe_vpp1_ext", "top_img", 1),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_VFC, "wpe_vpp1_vfc", "top_img", 2),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH0_TOP, "wpe_vpp1_cach0_top", "top_img", 3),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH0_DMA, "wpe_vpp1_cach0_dma", "top_img", 4),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH1_TOP, "wpe_vpp1_cach1_top", "top_img", 5),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH1_DMA, "wpe_vpp1_cach1_dma", "top_img", 6),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH2_TOP, "wpe_vpp1_cach2_top", "top_img", 7),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH2_DMA, "wpe_vpp1_cach2_dma", "top_img", 8),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH3_TOP, "wpe_vpp1_cach3_top", "top_img", 9),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH3_DMA, "wpe_vpp1_cach3_dma", "top_img", 10),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_PSP, "wpe_vpp1_psp", "top_img", 11),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_PSP2, "wpe_vpp1_psp2", "top_img", 12),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_SYNC, "wpe_vpp1_sync", "top_img", 13),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_C24, "wpe_vpp1_c24", "top_img", 14),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_MDP_CROP, "wpe_vpp1_mdp_crop", "top_img", 15),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_ISP_CROP, "wpe_vpp1_isp_crop", "top_img", 16),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_TOP, "wpe_vpp1_top", "top_img", 17),
+ /* WPE_VPP1 */
+ GATE_WPE_VPP1(CLK_WPE_VPP1_VECI, "wpe_vpp1_veci", "top_img", 0),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_VEC2I, "wpe_vpp1_vec2i", "top_img", 1),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_VEC3I, "wpe_vpp1_vec3i", "top_img", 2),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_WPEO, "wpe_vpp1_wpeo", "top_img", 3),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_MSKO, "wpe_vpp1_msko", "top_img", 4),
+};
+
+static const struct mtk_clk_desc wpe_desc = {
+ .clks = wpe_clks,
+ .num_clks = ARRAY_SIZE(wpe_clks),
+};
+
+static const struct mtk_clk_desc wpe_vpp0_desc = {
+ .clks = wpe_vpp0_clks,
+ .num_clks = ARRAY_SIZE(wpe_vpp0_clks),
+};
+
+static const struct mtk_clk_desc wpe_vpp1_desc = {
+ .clks = wpe_vpp1_clks,
+ .num_clks = ARRAY_SIZE(wpe_vpp1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_wpe[] = {
+ {
+ .compatible = "mediatek,mt8195-wpesys",
+ .data = &wpe_desc,
+ }, {
+ .compatible = "mediatek,mt8195-wpesys_vpp0",
+ .data = &wpe_vpp0_desc,
+ }, {
+ .compatible = "mediatek,mt8195-wpesys_vpp1",
+ .data = &wpe_vpp1_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_wpe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-wpe",
+ .of_match_table = of_match_clk_mt8195_wpe,
+ },
+};
+builtin_platform_driver(clk_mt8195_wpe_drv);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 4b6096c44d74..8d5791b3f460 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/clkdev.h>
+#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/device.h>
#include <linux/of_device.h>
@@ -42,6 +43,16 @@ err_out:
return NULL;
}
+EXPORT_SYMBOL_GPL(mtk_alloc_clk_data);
+
+void mtk_free_clk_data(struct clk_onecell_data *clk_data)
+{
+ if (!clk_data)
+ return;
+
+ kfree(clk_data->clks);
+ kfree(clk_data);
+}
void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
int num, struct clk_onecell_data *clk_data)
@@ -68,6 +79,7 @@ void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
clk_data->clks[rc->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_fixed_clks);
void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
int num, struct clk_onecell_data *clk_data)
@@ -94,6 +106,7 @@ void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
clk_data->clks[ff->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_factors);
int mtk_clk_register_gates_with_dev(struct device_node *node,
const struct mtk_gate *clks,
@@ -146,6 +159,7 @@ int mtk_clk_register_gates(struct device_node *node,
return mtk_clk_register_gates_with_dev(node,
clks, num, clk_data, NULL);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
void __iomem *base, spinlock_t *lock)
@@ -259,6 +273,7 @@ void mtk_clk_register_composites(const struct mtk_composite *mcs,
clk_data->clks[mc->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_composites);
void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
int num, void __iomem *base, spinlock_t *lock,
@@ -305,7 +320,17 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
r = mtk_clk_register_gates(node, mcd->clks, mcd->num_clks, clk_data);
if (r)
- return r;
+ goto free_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_data;
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return r;
+
+free_data:
+ mtk_free_clk_data(clk_data);
+ return r;
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 7de41c3b3206..0ff289d93452 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -202,6 +202,7 @@ void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
struct clk_onecell_data *clk_data);
struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
+void mtk_free_clk_data(struct clk_onecell_data *clk_data);
#define HAVE_RST_BAR BIT(0)
#define PLL_AO BIT(1)
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 855b0a1f7eb9..6d3a50eb7d6f 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "clk-mtk.h"
#include "clk-mux.h"
@@ -120,6 +121,7 @@ const struct clk_ops mtk_mux_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
};
+EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.enable = mtk_clk_mux_enable_setclr,
@@ -128,6 +130,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
struct regmap *regmap,
@@ -195,3 +198,6 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 7fb001a4e7d8..60d7ffa0b924 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -7,6 +7,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
@@ -332,7 +333,7 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->pcw_chg_addr = pll->base_addr + REG_CON1;
if (data->tuner_reg)
pll->tuner_addr = base + data->tuner_reg;
- if (data->tuner_en_reg)
+ if (data->tuner_en_reg || data->tuner_en_bit)
pll->tuner_en_addr = base + data->tuner_en_reg;
if (data->en_reg)
pll->en_addr = base + data->en_reg;
@@ -385,3 +386,6 @@ void mtk_clk_register_plls(struct device_node *node,
clk_data->clks[pll->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_plls);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index e562dc3c10a4..ffe464ce7ff8 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -137,3 +137,5 @@ void mtk_register_reset_controller_set_clr(struct device_node *np,
mtk_register_reset_controller_common(np, num_regs, regofs,
&mtk_reset_ops_set_clr);
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index a844d35b553a..cd0f5bae24d4 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -118,6 +118,56 @@ static struct clk_regmap meson8b_fixed_pll = {
},
};
+static struct clk_fixed_factor hdmi_pll_dco_in = {
+ .mult = 2,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco_in",
+ .ops = &clk_fixed_factor_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .index = -1,
+ },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * Taken from the vendor driver for the 2970/2975MHz (both only differ in the
+ * FRAC part in HHI_VID_PLL_CNTL2) where these values are identical for Meson8,
+ * Meson8b and Meson8m2. This doubles the input (or output - it's not clear
+ * which one but the result is the same) clock. The vendor driver additionally
+ * has the following comment about: "optimise HPLL VCO 2.97GHz performance".
+ */
+static const struct reg_sequence meson8b_hdmi_pll_init_regs[] = {
+ { .reg = HHI_VID_PLL_CNTL2, .def = 0x69c84000 },
+ { .reg = HHI_VID_PLL_CNTL3, .def = 0x8a46c023 },
+ { .reg = HHI_VID_PLL_CNTL4, .def = 0x4123b100 },
+ { .reg = HHI_VID_PLL_CNTL5, .def = 0x00012385 },
+ { .reg = HHI_VID2_PLL_CNTL2, .def = 0x0430a800 },
+};
+
+static const struct pll_params_table hdmi_pll_params_table[] = {
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(66, 1),
+ PLL_PARAMS(68, 1),
+ PLL_PARAMS(71, 1),
+ PLL_PARAMS(82, 1),
+ { /* sentinel */ }
+};
+
static struct clk_regmap meson8b_hdmi_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -150,15 +200,16 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
.shift = 29,
.width = 1,
},
+ .table = hdmi_pll_params_table,
+ .init_regs = meson8b_hdmi_pll_init_regs,
+ .init_count = ARRAY_SIZE(meson8b_hdmi_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
/* sometimes also called "HPLL" or "HPLL PLL" */
.name = "hdmi_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- .name = "xtal",
- .index = -1,
+ .ops = &meson_clk_pll_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &hdmi_pll_dco_in.hw
},
.num_parents = 1,
},
@@ -173,7 +224,7 @@ static struct clk_regmap meson8b_hdmi_pll_lvds_out = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_lvds_out",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_pll_dco.hw
},
@@ -191,7 +242,7 @@ static struct clk_regmap meson8b_hdmi_pll_hdmi_out = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_hdmi_out",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_pll_dco.hw
},
@@ -1045,6 +1096,23 @@ static struct clk_regmap meson8b_l2_dram_clk_gate = {
},
};
+/* also called LVDS_CLK_EN */
+static struct clk_regmap meson8b_vid_pll_lvds_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_lvds_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_pll_lvds_out.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap meson8b_vid_pll_in_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VID_DIVIDER_CNTL,
@@ -1053,7 +1121,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* TODO: depending on the SoC there is also a second parent:
* Meson8: unknown
@@ -1061,7 +1129,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
* Meson8m2: vid2_pll
*/
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_hdmi_pll_lvds_out.hw
+ &meson8b_vid_pll_lvds_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1075,7 +1143,7 @@ static struct clk_regmap meson8b_vid_pll_in_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_in_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_in_sel.hw
},
@@ -1092,7 +1160,7 @@ static struct clk_regmap meson8b_vid_pll_pre_div = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_pre_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_in_en.hw
},
@@ -1109,7 +1177,7 @@ static struct clk_regmap meson8b_vid_pll_post_div = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_post_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_pre_div.hw
},
@@ -1126,7 +1194,7 @@ static struct clk_regmap meson8b_vid_pll = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
/* TODO: parent 0x2 is vid_pll_pre_div_mult7_div2 */
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_pre_div.hw,
@@ -1145,7 +1213,7 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_final_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll.hw
},
@@ -1172,10 +1240,10 @@ static struct clk_regmap meson8b_vclk_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1186,7 +1254,7 @@ static struct clk_regmap meson8b_vclk_in_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_in_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_in_sel.hw
},
@@ -1202,7 +1270,7 @@ static struct clk_regmap meson8b_vclk_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_in_en.hw
},
@@ -1218,7 +1286,7 @@ static struct clk_regmap meson8b_vclk_div1_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div1_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
},
@@ -1248,7 +1316,7 @@ static struct clk_regmap meson8b_vclk_div2_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div2_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div2_div.hw
},
@@ -1278,7 +1346,7 @@ static struct clk_regmap meson8b_vclk_div4_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div4_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div4_div.hw
},
@@ -1308,7 +1376,7 @@ static struct clk_regmap meson8b_vclk_div6_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div6_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div6_div.hw
},
@@ -1338,7 +1406,7 @@ static struct clk_regmap meson8b_vclk_div12_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div12_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div12_div.hw
},
@@ -1355,10 +1423,10 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1369,7 +1437,7 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_in_sel.hw
},
@@ -1385,7 +1453,7 @@ static struct clk_regmap meson8b_vclk2_clk_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_clk_in_en.hw
},
@@ -1401,7 +1469,7 @@ static struct clk_regmap meson8b_vclk2_div1_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div1_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_clk_en.hw
},
@@ -1431,7 +1499,7 @@ static struct clk_regmap meson8b_vclk2_div2_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div2_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div2_div.hw
},
@@ -1461,7 +1529,7 @@ static struct clk_regmap meson8b_vclk2_div4_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div4_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div4_div.hw
},
@@ -1491,7 +1559,7 @@ static struct clk_regmap meson8b_vclk2_div6_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div6_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div6_div.hw
},
@@ -1521,7 +1589,7 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div12_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div12_div.hw
},
@@ -1546,7 +1614,7 @@ static struct clk_regmap meson8b_cts_enct_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1560,7 +1628,7 @@ static struct clk_regmap meson8b_cts_enct = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enct",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_enct_sel.hw
},
@@ -1577,7 +1645,7 @@ static struct clk_regmap meson8b_cts_encp_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1591,7 +1659,7 @@ static struct clk_regmap meson8b_cts_encp = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_encp_sel.hw
},
@@ -1608,7 +1676,7 @@ static struct clk_regmap meson8b_cts_enci_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1622,7 +1690,7 @@ static struct clk_regmap meson8b_cts_enci = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_enci_sel.hw
},
@@ -1639,7 +1707,7 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1653,7 +1721,7 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_tx_pixel_sel.hw
},
@@ -1678,7 +1746,7 @@ static struct clk_regmap meson8b_cts_encl_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk2_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1692,7 +1760,7 @@ static struct clk_regmap meson8b_cts_encl = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_encl_sel.hw
},
@@ -1709,7 +1777,7 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk2_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1723,7 +1791,7 @@ static struct clk_regmap meson8b_cts_vdac0 = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_vdac0_sel.hw
},
@@ -2905,6 +2973,8 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw,
[CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw,
[CLKID_CTS_I958] = &meson8b_cts_i958.hw,
+ [CLKID_VID_PLL_LVDS_EN] = &meson8b_vid_pll_lvds_en.hw,
+ [CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -3122,6 +3192,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw,
[CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw,
[CLKID_CTS_I958] = &meson8b_cts_i958.hw,
+ [CLKID_VID_PLL_LVDS_EN] = &meson8b_vid_pll_lvds_en.hw,
+ [CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -3341,6 +3413,8 @@ static struct clk_hw_onecell_data meson8m2_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw,
[CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw,
[CLKID_CTS_I958] = &meson8b_cts_i958.hw,
+ [CLKID_VID_PLL_LVDS_EN] = &meson8b_vid_pll_lvds_en.hw,
+ [CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -3539,6 +3613,7 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_cts_mclk_i958_div,
&meson8b_cts_mclk_i958,
&meson8b_cts_i958,
+ &meson8b_vid_pll_lvds_en,
};
static const struct meson8b_clk_reset_line {
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index b1a5074cf148..ce62ed47cbfc 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -51,6 +51,16 @@
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
#define HHI_VID_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
+#define HHI_VID_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
+#define HHI_VID_PLL_CNTL4 0x32c /* 0xcb offset in data sheet */
+#define HHI_VID_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
+#define HHI_VID_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+#define HHI_VID2_PLL_CNTL 0x380 /* 0xe0 offset in data sheet */
+#define HHI_VID2_PLL_CNTL2 0x384 /* 0xe1 offset in data sheet */
+#define HHI_VID2_PLL_CNTL3 0x388 /* 0xe2 offset in data sheet */
+#define HHI_VID2_PLL_CNTL4 0x38c /* 0xe3 offset in data sheet */
+#define HHI_VID2_PLL_CNTL5 0x390 /* 0xe4 offset in data sheet */
+#define HHI_VID2_PLL_CNTL6 0x394 /* 0xe5 offset in data sheet */
/*
* MPLL register offeset taken from the S905 datasheet. Vendor kernel source
@@ -107,14 +117,11 @@
#define CLKID_PERIPH_SEL 125
#define CLKID_AXI_SEL 127
#define CLKID_L2_DRAM_SEL 129
-#define CLKID_HDMI_PLL_LVDS_OUT 131
-#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_HDMI_PLL_LVDS_OUT 131
#define CLKID_VID_PLL_IN_SEL 133
#define CLKID_VID_PLL_IN_EN 134
#define CLKID_VID_PLL_PRE_DIV 135
#define CLKID_VID_PLL_POST_DIV 136
-#define CLKID_VID_PLL_FINAL_DIV 137
-#define CLKID_VCLK_IN_SEL 138
#define CLKID_VCLK_IN_EN 139
#define CLKID_VCLK_DIV1 140
#define CLKID_VCLK_DIV2_DIV 141
@@ -125,7 +132,6 @@
#define CLKID_VCLK_DIV6 146
#define CLKID_VCLK_DIV12_DIV 147
#define CLKID_VCLK_DIV12 148
-#define CLKID_VCLK2_IN_SEL 149
#define CLKID_VCLK2_IN_EN 150
#define CLKID_VCLK2_DIV1 151
#define CLKID_VCLK2_DIV2_DIV 152
@@ -137,17 +143,11 @@
#define CLKID_VCLK2_DIV12_DIV 158
#define CLKID_VCLK2_DIV12 159
#define CLKID_CTS_ENCT_SEL 160
-#define CLKID_CTS_ENCT 161
#define CLKID_CTS_ENCP_SEL 162
-#define CLKID_CTS_ENCP 163
#define CLKID_CTS_ENCI_SEL 164
-#define CLKID_CTS_ENCI 165
#define CLKID_HDMI_TX_PIXEL_SEL 166
-#define CLKID_HDMI_TX_PIXEL 167
#define CLKID_CTS_ENCL_SEL 168
-#define CLKID_CTS_ENCL 169
#define CLKID_CTS_VDAC0_SEL 170
-#define CLKID_CTS_VDAC0 171
#define CLKID_HDMI_SYS_SEL 172
#define CLKID_HDMI_SYS_DIV 173
#define CLKID_MALI_0_SEL 175
@@ -182,8 +182,10 @@
#define CLKID_CTS_MCLK_I958_DIV 211
#define CLKID_VCLK_EN 214
#define CLKID_VCLK2_EN 215
+#define CLKID_VID_PLL_LVDS_EN 216
+#define CLKID_HDMI_PLL_DCO_IN 217
-#define CLK_NR_CLKS 216
+#define CLK_NR_CLKS 218
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 08ba59ec3fb1..71bdd7c3ff03 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -256,12 +256,15 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
int cpu, err;
err = of_property_read_u32(dn, "reg", &cpu);
- if (WARN_ON(err))
+ if (WARN_ON(err)) {
+ of_node_put(dn);
return err;
+ }
/* If cpu2 or cpu3 is enabled */
if (cpu & APN806_CLUSTER_NUM_MASK) {
nclusters = 2;
+ of_node_put(dn);
break;
}
}
@@ -288,8 +291,10 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
int cpu, err;
err = of_property_read_u32(dn, "reg", &cpu);
- if (WARN_ON(err))
+ if (WARN_ON(err)) {
+ of_node_put(dn);
return err;
+ }
cluster_index = cpu & APN806_CLUSTER_NUM_MASK;
cluster_index >>= APN806_CLUSTER_NUM_OFFSET;
@@ -301,6 +306,7 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
parent = of_clk_get(np, cluster_index);
if (IS_ERR(parent)) {
dev_err(dev, "Could not get the clock parent\n");
+ of_node_put(dn);
return -EINVAL;
}
parent_name = __clk_get_name(parent);
@@ -319,8 +325,10 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
init.parent_names = &parent_name;
ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw);
- if (ret)
+ if (ret) {
+ of_node_put(dn);
return ret;
+ }
ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 9ef007b3cf9b..74efc82127e1 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -324,6 +324,14 @@ config MSM_MMCC_8998
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config QCM_GCC_2290
+ tristate "QCM2290 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCM2290 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC etc.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -340,6 +348,15 @@ config SC_CAMCC_7180
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SC_CAMCC_7280
+ tristate "SC7280 Camera Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC7280 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
select SC_GCC_7180
@@ -385,15 +402,6 @@ config SC_GCC_8180X
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
-config SC_LPASS_CORECC_7180
- tristate "SC7180 LPASS Core Clock Controller"
- select SC_GCC_7180
- help
- Support for the LPASS(Low Power Audio Subsystem) core clock controller
- on SC7180 devices.
- Say Y if you want to use LPASS clocks and power domains of the LPASS
- core clock controller.
-
config SC_GPUCC_7180
tristate "SC7180 Graphics Clock Controller"
select SC_GCC_7180
@@ -410,6 +418,23 @@ config SC_GPUCC_7280
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_LPASSCC_7280
+ tristate "SC7280 Low Power Audio Subsystem (LPASS) Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the LPASS clock controller on SC7280 devices.
+ Say Y if you want to use the LPASS branch clocks of the LPASS clock
+ controller to reset the LPASS subsystem.
+
+config SC_LPASS_CORECC_7180
+ tristate "SC7180 LPASS Core Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the LPASS(Low Power Audio Subsystem) core clock controller
+ on SC7180 devices.
+ Say Y if you want to use LPASS clocks and power domains of the LPASS
+ core clock controller.
+
config SC_MSS_7180
tristate "SC7180 Modem Clock Controller"
select SC_GCC_7180
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 9825ef843f4a..1718c34d3551 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_QCOM_CLK_APCS_SDX55) += apcs-sdx55.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
+obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
@@ -65,6 +67,7 @@ obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
+obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index 9e6decb9c26f..329d2c5356d8 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -90,7 +90,6 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct regmap *regmap;
- struct resource *res;
struct clk_pll *pll;
void __iomem *base;
struct clk_init_data init = { };
@@ -100,8 +99,7 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
if (!pll)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
new file mode 100644
index 000000000000..ec163ea769f5
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc7280.c
@@ -0,0 +1,2484 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_AUX2,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_MAIN,
+ P_CAM_CC_PLL6_OUT_ODD,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+ { 595200000UL, 3600000000UL, 0 },
+};
+
+/* 1200MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3E,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00003101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 600MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1F,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 1440MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x4B,
+ .alpha = 0x0,
+ .config_ctl_val = 0x08200800,
+ .config_ctl_hi_val = 0x05022011,
+ .config_ctl_hi1_val = 0x08000000,
+ .user_ctl_val = 0x00000301,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = zonda_vco,
+ .num_vco = ARRAY_SIZE(zonda_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_aux",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux2[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux2 = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux2),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_aux2",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+/* 760MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x27,
+ .alpha = 0x9555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 760MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x27,
+ .alpha = 0x9555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll4_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 760MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x27,
+ .alpha = 0x9555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll5_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 960MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00003101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll6_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_odd = {
+ .offset = 0x6000,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll6_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll6_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL6_OUT_MAIN, 4 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll6.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX2, 3 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll2_out_aux2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL6_OUT_ODD, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+ { .hw = &cam_cc_pll6_out_odd.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_ODD, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL6_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL6_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc0fc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0xe0ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0xe0d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0xe0f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0xe11c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0xe140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(380000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(510000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(510000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(380000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(510000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0xb07c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xb0a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL6_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(430000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL6_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL6_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_EARLY, 1, 1, 75),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 6),
+ F(34285714, P_CAM_CC_PLL2_OUT_EARLY, 2, 1, 21),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0xe000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0xe01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0xe038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0xe054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0xe070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0xe08c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xc1c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc1a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_9_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc148,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc114,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc114,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1a0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xc1a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0xe0c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0xe0e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0xe10c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe10c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0xe134,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0xe158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe158,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0xe0c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0xe0ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0xe110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0xe138,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe138,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0xe15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+ .halt_reg = 0xc1bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc1bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_gdsc_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_axi_clk = {
+ .halt_reg = 0xb0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0xb094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xb0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_csid_clk = {
+ .halt_reg = 0xb0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0xb0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0xe018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0xe034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0xe050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0xe06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0xe088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0xe0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+ .halt_reg = 0xc1d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc1d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_titan_top_gdsc = {
+ .gdscr = 0xc194,
+ .pd = {
+ .name = "cam_cc_titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_bps_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "cam_cc_bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "cam_cc_ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .pd = {
+ .name = "cam_cc_ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_2_gdsc = {
+ .gdscr = 0xb070,
+ .pd = {
+ .name = "cam_cc_ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .pd = {
+ .name = "cam_cc_ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sc7280_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_2_AXI_CLK] = &cam_cc_ife_2_axi_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_CPHY_RX_CLK] = &cam_cc_ife_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK] = &cam_cc_ife_2_csid_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK_SRC] = &cam_cc_ife_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr,
+ [CAM_CC_PLL2_OUT_AUX2] = &cam_cc_pll2_out_aux2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL6_OUT_ODD] = &cam_cc_pll6_out_odd.clkr,
+ [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sc7280_gdscs[] = {
+ [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc,
+ [CAM_CC_BPS_GDSC] = &cam_cc_bps_gdsc,
+ [CAM_CC_IFE_0_GDSC] = &cam_cc_ife_0_gdsc,
+ [CAM_CC_IFE_1_GDSC] = &cam_cc_ife_1_gdsc,
+ [CAM_CC_IFE_2_GDSC] = &cam_cc_ife_2_gdsc,
+ [CAM_CC_IPE_0_GDSC] = &cam_cc_ipe_0_gdsc,
+};
+
+static const struct regmap_config cam_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf00c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc7280_desc = {
+ .config = &cam_cc_sc7280_regmap_config,
+ .clks = cam_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc7280_clocks),
+ .gdscs = cam_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc7280_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc7280_match_table);
+
+static int cam_cc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+
+ return qcom_cc_really_probe(pdev, &cam_cc_sc7280_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sc7280_driver = {
+ .probe = cam_cc_sc7280_probe,
+ .driver = {
+ .name = "cam_cc-sc7280",
+ .of_match_table = cam_cc_sc7280_match_table,
+ },
+};
+
+static int __init cam_cc_sc7280_init(void)
+{
+ return platform_driver_register(&cam_cc_sc7280_driver);
+}
+subsys_initcall(cam_cc_sc7280_init);
+
+static void __exit cam_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&cam_cc_sc7280_driver);
+}
+module_exit(cam_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 66d7807ee38e..5776d85a1e5c 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -118,14 +118,15 @@
__DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
0, QCOM_RPM_SMD_KEY_STATE)
-#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id, r) \
__DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
- QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, r, \
QCOM_RPM_KEY_SOFTWARE_ENABLE)
-#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, \
+ r_id, r) \
__DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
- QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, r, \
QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
@@ -195,6 +196,10 @@ static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
+ /* Buffered clock needs a binary value */
+ if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
+ req.value = cpu_to_le32(!!req.value);
+
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -209,6 +214,10 @@ static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
+ /* Buffered clock needs a binary value */
+ if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
+ req.value = cpu_to_le32(!!req.value);
+
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -416,20 +425,21 @@ static const struct clk_ops clk_smd_rpm_ops = {
static const struct clk_ops clk_smd_rpm_branch_ops = {
.prepare = clk_smd_rpm_prepare,
.unprepare = clk_smd_rpm_unprepare,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
};
DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5, 19200000);
static struct clk_smd_rpm *msm8916_clks[] = {
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
@@ -503,19 +513,19 @@ DEFINE_CLK_SMD_RPM(msm8974, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msm8974, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, QCOM_SMD_RPM_BUS_CLK, 3);
DEFINE_CLK_SMD_RPM(msm8974, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8974, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6, 19200000);
static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
@@ -603,8 +613,8 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
.num_clks = ARRAY_SIZE(msm8976_clks),
};
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8, 19200000);
DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1);
@@ -782,7 +792,7 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
DEFINE_CLK_SMD_RPM(qcs404, bimc_gpu_clk, bimc_gpu_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8, 19200000);
static struct clk_smd_rpm *qcs404_clks[] = {
[RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
@@ -811,13 +821,13 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
};
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin,
- 3);
+ 3, 19200000);
DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
QCOM_SMD_RPM_AGGR_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
QCOM_SMD_RPM_AGGR_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6, 19200000);
static struct clk_smd_rpm *msm8998_clks[] = {
[RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
[RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
@@ -864,8 +874,8 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0,
19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, ln_bb_clk3_pin_a, 3);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, ln_bb_clk3_pin_a, 3, 19200000);
static struct clk_smd_rpm *sdm660_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
@@ -1067,6 +1077,64 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
.num_clks = ARRAY_SIZE(sm6115_clks),
};
+/* QCM2290 */
+DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, ln_bb_clk2, ln_bb_clk2_a, 0x2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, rf_clk3, rf_clk3_a, 6, 38400000);
+
+DEFINE_CLK_SMD_RPM(qcm2290, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, cpuss_gnoc_clk, cpuss_gnoc_a_clk,
+ QCOM_SMD_RPM_MEM_CLK, 1);
+DEFINE_CLK_SMD_RPM(qcm2290, bimc_gpu_clk, bimc_gpu_a_clk,
+ QCOM_SMD_RPM_MEM_CLK, 2);
+
+static struct clk_smd_rpm *qcm2290_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
+ [RPM_SMD_LN_BB_CLK2] = &qcm2290_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &qcm2290_ln_bb_clk2_a,
+ [RPM_SMD_RF_CLK3] = &qcm2290_rf_clk3,
+ [RPM_SMD_RF_CLK3_A] = &qcm2290_rf_clk3_a,
+ [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
+ [RPM_SMD_QPIC_CLK] = &qcm2290_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &qcm2290_qpic_a_clk,
+ [RPM_SMD_HWKM_CLK] = &qcm2290_hwkm_clk,
+ [RPM_SMD_HWKM_A_CLK] = &qcm2290_hwkm_a_clk,
+ [RPM_SMD_PKA_CLK] = &qcm2290_pka_clk,
+ [RPM_SMD_PKA_A_CLK] = &qcm2290_pka_a_clk,
+ [RPM_SMD_BIMC_GPU_CLK] = &qcm2290_bimc_gpu_clk,
+ [RPM_SMD_BIMC_GPU_A_CLK] = &qcm2290_bimc_gpu_a_clk,
+ [RPM_SMD_CPUSS_GNOC_CLK] = &qcm2290_cpuss_gnoc_clk,
+ [RPM_SMD_CPUSS_GNOC_A_CLK] = &qcm2290_cpuss_gnoc_a_clk,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_qcm2290 = {
+ .clks = qcm2290_clks,
+ .num_clks = ARRAY_SIZE(qcm2290_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-mdm9607", .data = &rpm_clk_mdm9607 },
{ .compatible = "qcom,rpmcc-msm8226", .data = &rpm_clk_msm8974 },
@@ -1079,6 +1147,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8994", .data = &rpm_clk_msm8994 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
+ { .compatible = "qcom,rpmcc-qcm2290", .data = &rpm_clk_qcm2290 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 60d2a78d1395..0932e019dd12 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -73,11 +73,9 @@ struct regmap *
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
{
void __iomem *base;
- struct resource *res;
struct device *dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return ERR_CAST(base);
@@ -313,11 +311,9 @@ int qcom_cc_probe_by_index(struct platform_device *pdev, int index,
const struct qcom_cc_desc *desc)
{
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(base))
return -ENOMEM;
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index bf9ffe1a1cf4..566fdfa0a15b 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -1228,13 +1229,31 @@ static const struct of_device_id disp_cc_sm8250_match_table[] = {
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
+static void disp_cc_sm8250_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
static int disp_cc_sm8250_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, disp_cc_sm8250_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &disp_cc_sm8250_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
/* note: trion == lucid, except for the prepare() op */
BUILD_BUG_ON(CLK_ALPHA_PLL_TYPE_TRION != CLK_ALPHA_PLL_TYPE_LUCID);
@@ -1259,7 +1278,11 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
/* DISP_CC_XO_CLK always-on */
regmap_update_bits(regmap, 0x605c, BIT(0), BIT(0));
- return qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static struct platform_driver disp_cc_sm8250_driver = {
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
index 49513f1366ff..8aafa6591e84 100644
--- a/drivers/clk/qcom/gcc-msm8953.c
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -4230,7 +4230,6 @@ static struct platform_driver gcc_msm8953_driver = {
.driver = {
.name = "gcc-msm8953",
.of_match_table = gcc_msm8953_match_table,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 144d2ba7a9be..702a9bdc0559 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -8,6 +8,7 @@
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -28,50 +29,17 @@ enum {
P_GPLL4,
};
-static const struct parent_map gcc_xo_gpll0_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
-};
-
-static const char * const gcc_xo_gpll0[] = {
- "xo",
- "gpll0",
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL4, 5 },
-};
-
-static const char * const gcc_xo_gpll0_gpll4[] = {
- "xo",
- "gpll0",
- "gpll4",
-};
-
-static struct clk_fixed_factor xo = {
- .mult = 1,
- .div = 1,
- .hw.init = &(struct clk_init_data)
- {
- .name = "xo",
- .parent_names = (const char *[]) { "xo_board" },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-};
-
static struct clk_alpha_pll gpll0_early = {
- .offset = 0x00000,
+ .offset = 0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gpll0_early",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -79,10 +47,9 @@ static struct clk_alpha_pll gpll0_early = {
};
static struct clk_alpha_pll_postdiv gpll0 = {
- .offset = 0x00000,
+ .offset = 0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
.parent_names = (const char *[]) { "gpll0_early" },
.num_parents = 1,
@@ -96,10 +63,11 @@ static struct clk_alpha_pll gpll4_early = {
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(4),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gpll4_early",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -109,8 +77,7 @@ static struct clk_alpha_pll gpll4_early = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x1dc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
.parent_names = (const char *[]) { "gpll4_early" },
.num_parents = 1,
@@ -118,6 +85,64 @@ static struct clk_alpha_pll_postdiv gpll4 = {
},
};
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+ .cmd_rcgr = 0x0120,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+ .cmd_rcgr = 0x0150,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "config_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+ .cmd_rcgr = 0x0190,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "periph_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
@@ -134,11 +159,10 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_ufs_axi_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -155,11 +179,10 @@ static struct clk_rcg2 usb30_master_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_usb30_master_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -175,16 +198,15 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
-static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = {
+static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -197,17 +219,27 @@ static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = {
{ }
};
+static struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.cmd_rcgr = 0x064c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -217,26 +249,37 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(46150000, P_GPLL0, 13, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.cmd_rcgr = 0x06cc,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -246,26 +289,37 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(44440000, P_GPLL0, 13.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.cmd_rcgr = 0x074c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -275,11 +329,10 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -289,12 +342,11 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -304,26 +356,37 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.cmd_rcgr = 0x084c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup5_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -333,26 +396,37 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(27906976, P_GPLL0, 1, 2, 43),
+ F(41380000, P_GPLL0, 15, 0, 0),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.cmd_rcgr = 0x08cc,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup6_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -382,11 +456,10 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -397,11 +470,10 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -412,11 +484,10 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -427,11 +498,10 @@ static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -442,11 +512,10 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart5_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -457,11 +526,10 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart6_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -471,26 +539,37 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(44440000, P_GPLL0, 13.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.cmd_rcgr = 0x098c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup1_2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -500,11 +579,10 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -514,26 +592,37 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup1_2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.cmd_rcgr = 0x0aa0,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -543,12 +632,11 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -558,11 +646,10 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -572,12 +659,11 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -587,11 +673,10 @@ static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -601,12 +686,12 @@ static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ /* BLSP1 QUP1 and BLSP2 QUP5 use the same freqs */
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -616,26 +701,37 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(44440000, P_GPLL0, 13.5, 0, 0),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
.cmd_rcgr = 0x0c0c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup6_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -646,11 +742,10 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -661,11 +756,10 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -676,11 +770,10 @@ static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -691,11 +784,10 @@ static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -706,11 +798,10 @@ static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart5_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -721,11 +812,10 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart6_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -743,11 +833,10 @@ static struct clk_rcg2 gp1_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_gp1_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -765,11 +854,10 @@ static struct clk_rcg2 gp2_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_gp2_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -787,11 +875,10 @@ static struct clk_rcg2 gp3_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_gp3_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -806,10 +893,11 @@ static struct clk_rcg2 pcie_0_aux_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.freq_tbl = ftbl_pcie_0_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_aux_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -824,10 +912,11 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = {
.cmd_rcgr = 0x1adc,
.hid_width = 5,
.freq_tbl = ftbl_pcie_pipe_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_pipe_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -843,10 +932,11 @@ static struct clk_rcg2 pcie_1_aux_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.freq_tbl = ftbl_pcie_1_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_1_aux_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -856,10 +946,11 @@ static struct clk_rcg2 pcie_1_pipe_clk_src = {
.cmd_rcgr = 0x1b5c,
.hid_width = 5,
.freq_tbl = ftbl_pcie_pipe_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_1_pipe_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -875,11 +966,10 @@ static struct clk_rcg2 pdm2_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_pdm2_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -896,17 +986,28 @@ static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
{ }
};
+static struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(172000000, P_GPLL4, 2, 0, 0),
+ F(344000000, P_GPLL4, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 sdcc1_apps_clk_src = {
.cmd_rcgr = 0x04d0,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
.freq_tbl = ftbl_sdcc1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_xo_gpll0_gpll4,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -928,11 +1029,10 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_sdcc2_4_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -943,11 +1043,10 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_sdcc2_4_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -958,11 +1057,10 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_sdcc2_4_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -977,10 +1075,11 @@ static struct clk_rcg2 tsif_ref_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.freq_tbl = ftbl_tsif_ref_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -997,11 +1096,10 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_usb30_mock_utmi_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,10 +1113,11 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.cmd_rcgr = 0x1414,
.hid_width = 5,
.freq_tbl = ftbl_usb3_phy_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -1034,11 +1133,10 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_usb_hs_system_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1049,9 +1147,10 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.clkr = {
.enable_reg = 0x1484,
.enable_mask = BIT(17),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1062,12 +1161,9 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0648,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup1_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup1_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1080,12 +1176,9 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0644,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup1_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup1_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1098,12 +1191,9 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x06c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup2_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup2_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1116,12 +1206,9 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.clkr = {
.enable_reg = 0x06c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup2_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup2_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1134,12 +1221,9 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0748,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup3_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup3_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1152,12 +1236,9 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0744,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup3_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup3_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1170,12 +1251,9 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x07c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup4_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup4_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1188,12 +1266,9 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.clkr = {
.enable_reg = 0x07c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup4_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup4_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1206,12 +1281,9 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0848,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup5_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup5_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1224,12 +1296,9 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0844,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup5_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup5_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1242,12 +1311,9 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x08c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup6_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup6_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1260,12 +1326,9 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.clkr = {
.enable_reg = 0x08c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup6_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup6_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1278,12 +1341,9 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.clkr = {
.enable_reg = 0x0684,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart1_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1296,12 +1356,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.clkr = {
.enable_reg = 0x0704,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart2_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1314,12 +1371,9 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.clkr = {
.enable_reg = 0x0784,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart3_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart3_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1332,12 +1386,9 @@ static struct clk_branch gcc_blsp1_uart4_apps_clk = {
.clkr = {
.enable_reg = 0x0804,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart4_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart4_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart4_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1350,12 +1401,9 @@ static struct clk_branch gcc_blsp1_uart5_apps_clk = {
.clkr = {
.enable_reg = 0x0884,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart5_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart5_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart5_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1368,12 +1416,9 @@ static struct clk_branch gcc_blsp1_uart6_apps_clk = {
.clkr = {
.enable_reg = 0x0904,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart6_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart6_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart6_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1387,9 +1432,10 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.clkr = {
.enable_reg = 0x1484,
.enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1400,12 +1446,9 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0988,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup1_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup1_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1418,12 +1461,9 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0984,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup1_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup1_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1436,12 +1476,9 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0a08,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup2_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup2_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1454,12 +1491,9 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0a04,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup2_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup2_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1472,12 +1506,9 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0a88,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup3_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup3_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1490,12 +1521,9 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0a84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup3_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup3_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1508,12 +1536,9 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0b08,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup4_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup4_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1526,12 +1551,9 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0b04,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup4_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup4_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1544,12 +1566,9 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0b88,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup5_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup5_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1562,12 +1581,9 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0b84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup5_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup5_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1580,12 +1596,9 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0c08,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup6_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup6_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1598,12 +1611,9 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0c04,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup6_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup6_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1616,12 +1626,9 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
.clkr = {
.enable_reg = 0x09c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart1_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart1_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1634,12 +1641,9 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
.clkr = {
.enable_reg = 0x0a44,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart2_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart2_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1652,12 +1656,9 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
.clkr = {
.enable_reg = 0x0ac4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart3_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart3_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart3_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1670,12 +1671,9 @@ static struct clk_branch gcc_blsp2_uart4_apps_clk = {
.clkr = {
.enable_reg = 0x0b44,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart4_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart4_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart4_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1688,12 +1686,9 @@ static struct clk_branch gcc_blsp2_uart5_apps_clk = {
.clkr = {
.enable_reg = 0x0bc4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart5_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart5_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart5_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1706,12 +1701,9 @@ static struct clk_branch gcc_blsp2_uart6_apps_clk = {
.clkr = {
.enable_reg = 0x0c44,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart6_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart6_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart6_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1724,12 +1716,9 @@ static struct clk_branch gcc_gp1_clk = {
.clkr = {
.enable_reg = 0x1900,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]) {
- "gp1_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &gp1_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1742,12 +1731,9 @@ static struct clk_branch gcc_gp2_clk = {
.clkr = {
.enable_reg = 0x1940,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]) {
- "gp2_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &gp2_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1760,12 +1746,9 @@ static struct clk_branch gcc_gp3_clk = {
.clkr = {
.enable_reg = 0x1980,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]) {
- "gp3_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &gp3_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1778,9 +1761,10 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
.clkr = {
.enable_reg = 0x0280,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_lpass_q6_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1791,9 +1775,10 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.clkr = {
.enable_reg = 0x0284,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1804,12 +1789,9 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.clkr = {
.enable_reg = 0x1ad4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_names = (const char *[]) {
- "pcie_0_aux_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_0_aux_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1822,9 +1804,11 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.clkr = {
.enable_reg = 0x1ad0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1835,9 +1819,11 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.clkr = {
.enable_reg = 0x1acc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1849,12 +1835,9 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.clkr = {
.enable_reg = 0x1ad8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_names = (const char *[]) {
- "pcie_0_pipe_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_0_pipe_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1868,9 +1851,11 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.clkr = {
.enable_reg = 0x1ac8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1881,12 +1866,9 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
.clkr = {
.enable_reg = 0x1b54,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk",
- .parent_names = (const char *[]) {
- "pcie_1_aux_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_1_aux_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1899,9 +1881,11 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.clkr = {
.enable_reg = 0x1b54,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1912,9 +1896,11 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.clkr = {
.enable_reg = 0x1b50,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1926,12 +1912,9 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.clkr = {
.enable_reg = 0x1b58,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .parent_names = (const char *[]) {
- "pcie_1_pipe_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_1_pipe_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1944,9 +1927,11 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.clkr = {
.enable_reg = 0x1b48,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1957,12 +1942,9 @@ static struct clk_branch gcc_pdm2_clk = {
.clkr = {
.enable_reg = 0x0ccc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]) {
- "pdm2_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pdm2_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1975,9 +1957,10 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.clkr = {
.enable_reg = 0x0cc4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1988,12 +1971,9 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.clkr = {
.enable_reg = 0x04c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc1_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2006,13 +1986,11 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.clkr = {
.enable_reg = 0x04c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2023,13 +2001,11 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.clkr = {
.enable_reg = 0x0508,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2040,12 +2016,9 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.clkr = {
.enable_reg = 0x0504,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc2_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2058,13 +2031,11 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.clkr = {
.enable_reg = 0x0548,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2075,12 +2046,9 @@ static struct clk_branch gcc_sdcc3_apps_clk = {
.clkr = {
.enable_reg = 0x0544,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc3_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc3_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2093,13 +2061,11 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.clkr = {
.enable_reg = 0x0588,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2110,12 +2076,9 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.clkr = {
.enable_reg = 0x0584,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc4_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc4_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2128,12 +2091,9 @@ static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
.clkr = {
.enable_reg = 0x1d7c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_ufs_axi_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2146,12 +2106,9 @@ static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
.clkr = {
.enable_reg = 0x03fc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_usb3_axi_clk",
- .parent_names = (const char *[]) {
- "usb30_master_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb30_master_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2164,9 +2121,10 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.clkr = {
.enable_reg = 0x0d84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2177,12 +2135,9 @@ static struct clk_branch gcc_tsif_ref_clk = {
.clkr = {
.enable_reg = 0x0d88,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk",
- .parent_names = (const char *[]) {
- "tsif_ref_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &tsif_ref_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2195,9 +2150,10 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.clkr = {
.enable_reg = 0x1d4c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2208,12 +2164,9 @@ static struct clk_branch gcc_ufs_axi_clk = {
.clkr = {
.enable_reg = 0x1d48,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_axi_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2226,12 +2179,9 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
.clkr = {
.enable_reg = 0x1d54,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_cfg_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2245,9 +2195,10 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.clkr = {
.enable_reg = 0x1d60,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2259,9 +2210,10 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.clkr = {
.enable_reg = 0x1d64,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2272,12 +2224,9 @@ static struct clk_branch gcc_ufs_tx_cfg_clk = {
.clkr = {
.enable_reg = 0x1d50,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_cfg_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2291,9 +2240,10 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.clkr = {
.enable_reg = 0x1d58,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2305,9 +2255,10 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
.clkr = {
.enable_reg = 0x1d5c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2318,9 +2269,13 @@ static struct clk_branch gcc_usb2_hs_phy_sleep_clk = {
.clkr = {
.enable_reg = 0x04ac,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb2_hs_phy_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep",
+ .name = "sleep"
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2331,12 +2286,9 @@ static struct clk_branch gcc_usb30_master_clk = {
.clkr = {
.enable_reg = 0x03c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]) {
- "usb30_master_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb30_master_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2349,12 +2301,9 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.clkr = {
.enable_reg = 0x03d0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]) {
- "usb30_mock_utmi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb30_mock_utmi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2367,9 +2316,13 @@ static struct clk_branch gcc_usb30_sleep_clk = {
.clkr = {
.enable_reg = 0x03cc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep",
+ .name = "sleep"
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2380,12 +2333,9 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.clkr = {
.enable_reg = 0x1408,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_names = (const char *[]) {
- "usb3_phy_aux_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb3_phy_aux_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2393,14 +2343,28 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
},
};
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x140c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x140c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb_hs_ahb_clk = {
.halt_reg = 0x0488,
.clkr = {
.enable_reg = 0x0488,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2411,12 +2375,9 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.clkr = {
.enable_reg = 0x0484,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_names = (const char *[]) {
- "usb_hs_system_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb_hs_system_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2429,20 +2390,123 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
.clkr = {
.enable_reg = 0x1a84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb_phy_cfg_ahb2phy_clk",
.ops = &clk_branch2_ops,
},
},
};
-static struct gdsc pcie_gdsc = {
- .gdscr = 0x1e18,
- .pd = {
- .name = "pcie",
+static struct clk_branch gpll0_out_mmsscc = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_mmsscc",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
},
- .pwrsts = PWRSTS_OFF_ON,
+ },
+};
+
+static struct clk_branch gpll0_out_msscc = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_msscc",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch pcie_0_phy_ldo = {
+ .halt_reg = 0x1e00,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E00,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_0_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch pcie_1_phy_ldo = {
+ .halt_reg = 0x1e04,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_1_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ufs_phy_ldo = {
+ .halt_reg = 0x1e0c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E0C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ufs_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch usb_ss_phy_ldo = {
+ .halt_reg = 0x1e08,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_ss_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x0e04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x0e04,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x0d04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
};
static struct gdsc pcie_0_gdsc = {
@@ -2482,6 +2546,9 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL4_EARLY] = &gpll4_early.clkr,
[GPLL4] = &gpll4.clkr,
+ [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+ [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -2616,13 +2683,23 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
[GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
[GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
[GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
[GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
[GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GPLL0_OUT_MMSSCC] = &gpll0_out_mmsscc.clkr,
+ [GPLL0_OUT_MSSCC] = &gpll0_out_msscc.clkr,
+ [PCIE_0_PHY_LDO] = &pcie_0_phy_ldo.clkr,
+ [PCIE_1_PHY_LDO] = &pcie_1_phy_ldo.clkr,
+ [UFS_PHY_LDO] = &ufs_phy_ldo.clkr,
+ [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
};
static struct gdsc *gcc_msm8994_gdscs[] = {
- [PCIE_GDSC] = &pcie_gdsc,
+ /* This GDSC does not exist, but ABI has to remain intact */
+ [PCIE_GDSC] = NULL,
[PCIE_0_GDSC] = &pcie_0_gdsc,
[PCIE_1_GDSC] = &pcie_1_gdsc,
[USB30_GDSC] = &usb30_gdsc,
@@ -2632,6 +2709,7 @@ static struct gdsc *gcc_msm8994_gdscs[] = {
static const struct qcom_reset_map gcc_msm8994_resets[] = {
[USB3_PHY_RESET] = { 0x1400 },
[USB3PHY_PHY_RESET] = { 0x1404 },
+ [MSS_RESET] = { 0x1680 },
[PCIE_PHY_0_RESET] = { 0x1b18 },
[PCIE_PHY_1_RESET] = { 0x1b98 },
[QUSB2_PHY_RESET] = { 0x04b8 },
@@ -2656,19 +2734,57 @@ static const struct qcom_cc_desc gcc_msm8994_desc = {
};
static const struct of_device_id gcc_msm8994_match_table[] = {
- { .compatible = "qcom,gcc-msm8994" },
+ { .compatible = "qcom,gcc-msm8992" },
+ { .compatible = "qcom,gcc-msm8994" }, /* V2 and V2.1 */
{}
};
MODULE_DEVICE_TABLE(of, gcc_msm8994_match_table);
static int gcc_msm8994_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct clk *clk;
-
- clk = devm_clk_register(dev, &xo.hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-msm8992")) {
+ /* MSM8992 features less clocks and some have different freq tables */
+ gcc_msm8994_desc.clks[UFS_AXI_CLK_SRC] = NULL;
+ gcc_msm8994_desc.clks[GCC_LPASS_Q6_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[UFS_PHY_LDO] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_AHB_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_RX_CFG_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_RX_SYMBOL_0_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_RX_SYMBOL_1_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_TX_CFG_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_TX_SYMBOL_0_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_TX_SYMBOL_1_CLK] = NULL;
+
+ sdcc1_apps_clk_src.freq_tbl = ftbl_sdcc1_apps_clk_src_8992;
+ blsp1_qup1_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup2_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup3_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup4_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup5_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup6_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup1_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup2_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup3_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup4_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup5_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup6_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+
+ /*
+ * Some 8992 boards might *possibly* use
+ * PCIe1 clocks and controller, but it's not
+ * standard and they should be disabled otherwise.
+ */
+ gcc_msm8994_desc.clks[PCIE_1_AUX_CLK_SRC] = NULL;
+ gcc_msm8994_desc.clks[PCIE_1_PIPE_CLK_SRC] = NULL;
+ gcc_msm8994_desc.clks[PCIE_1_PHY_LDO] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_AUX_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_CFG_AHB_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_MSTR_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_PIPE_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_SLV_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_SYS_NOC_UFS_AXI_CLK] = NULL;
+ }
return qcom_cc_probe(pdev, &gcc_msm8994_desc);
}
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 050c91af888e..407e2c5caea4 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -25,109 +25,6 @@
#include "reset.h"
#include "gdsc.h"
-enum {
- P_AUD_REF_CLK,
- P_CORE_BI_PLL_TEST_SE,
- P_GPLL0_OUT_MAIN,
- P_GPLL4_OUT_MAIN,
- P_PLL0_EARLY_DIV_CLK_SRC,
- P_SLEEP_CLK,
- P_XO,
-};
-
-static const struct parent_map gcc_parent_map_0[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_0[] = {
- "xo",
- "gpll0_out_main",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_1[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_1[] = {
- "xo",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_2[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_SLEEP_CLK, 5 },
- { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_2[] = {
- "xo",
- "gpll0_out_main",
- "core_pi_sleep_clk",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_3[] = {
- { P_XO, 0 },
- { P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_3[] = {
- "xo",
- "core_pi_sleep_clk",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_4[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_MAIN, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_4[] = {
- "xo",
- "gpll0_out_main",
- "gpll4_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_5[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_AUD_REF_CLK, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_5[] = {
- "xo",
- "gpll0_out_main",
- "aud_ref_clk",
- "core_bi_pll_test_se",
-};
-
-static struct clk_fixed_factor xo = {
- .mult = 1,
- .div = 1,
- .hw.init = &(struct clk_init_data){
- .name = "xo",
- .parent_names = (const char *[]){ "xo_board" },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-};
-
static struct pll_vco fabia_vco[] = {
{ 250000000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
@@ -143,7 +40,9 @@ static struct clk_alpha_pll gpll0 = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -155,7 +54,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -166,7 +67,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -177,7 +80,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_odd",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -188,7 +93,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_test",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -204,7 +111,9 @@ static struct clk_alpha_pll gpll1 = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gpll1",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -216,7 +125,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_even",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -227,7 +138,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -238,7 +151,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_odd",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -249,7 +164,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_test",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -265,7 +182,9 @@ static struct clk_alpha_pll gpll2 = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gpll2",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -277,7 +196,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_even",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -288,7 +209,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_main",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -299,7 +222,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_odd",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -310,7 +235,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_test",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -326,7 +253,9 @@ static struct clk_alpha_pll gpll3 = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gpll3",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -338,7 +267,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_even",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -349,7 +280,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -360,7 +293,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_odd",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -371,7 +306,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_test",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -387,7 +324,9 @@ static struct clk_alpha_pll gpll4 = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gpll4",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -399,7 +338,9 @@ static struct clk_alpha_pll_postdiv gpll4_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_even",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -410,7 +351,9 @@ static struct clk_alpha_pll_postdiv gpll4_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -421,7 +364,9 @@ static struct clk_alpha_pll_postdiv gpll4_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_odd",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -432,12 +377,106 @@ static struct clk_alpha_pll_postdiv gpll4_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_test",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
+enum {
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_PLL0_EARLY_DIV_CLK_SRC,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll4_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "aud_ref_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
@@ -452,8 +491,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -477,8 +516,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -491,8 +530,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -505,8 +544,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -519,8 +558,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -533,8 +572,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -547,8 +586,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -561,8 +600,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -575,8 +614,8 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -589,8 +628,8 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -603,8 +642,8 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -617,8 +656,8 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -650,8 +689,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -664,8 +703,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -678,8 +717,8 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart3_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -692,8 +731,8 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -706,8 +745,8 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -720,8 +759,8 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -734,8 +773,8 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -748,8 +787,8 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -762,8 +801,8 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -776,8 +815,8 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -790,8 +829,8 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -804,8 +843,8 @@ static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -818,8 +857,8 @@ static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -832,8 +871,8 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -846,8 +885,8 @@ static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -860,8 +899,8 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -874,8 +913,8 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -888,8 +927,8 @@ static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart3_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -909,8 +948,8 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -923,8 +962,8 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -937,8 +976,8 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -958,8 +997,8 @@ static struct clk_rcg2 hmss_ahb_clk_src = {
.freq_tbl = ftbl_hmss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_ahb_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -977,8 +1016,8 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -996,8 +1035,8 @@ static struct clk_rcg2 pcie_aux_clk_src = {
.freq_tbl = ftbl_pcie_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_aux_clk_src",
- .parent_names = gcc_parent_names_3,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,8 +1054,8 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_pdm2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -1040,8 +1079,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_sdcc2_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_4,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1064,8 +1103,8 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.freq_tbl = ftbl_sdcc4_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc4_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1083,8 +1122,8 @@ static struct clk_rcg2 tsif_ref_clk_src = {
.freq_tbl = ftbl_tsif_ref_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk_src",
- .parent_names = gcc_parent_names_5,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
.ops = &clk_rcg2_ops,
},
};
@@ -1104,8 +1143,8 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.freq_tbl = ftbl_ufs_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1125,8 +1164,8 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
.freq_tbl = ftbl_ufs_unipro_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_unipro_core_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1147,8 +1186,8 @@ static struct clk_rcg2 usb30_master_clk_src = {
.freq_tbl = ftbl_usb30_master_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1161,8 +1200,8 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1180,8 +1219,8 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.freq_tbl = ftbl_usb3_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
- .parent_names = gcc_parent_names_3,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -1207,8 +1246,8 @@ static struct clk_branch gcc_aggre1_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre1_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1225,8 +1264,8 @@ static struct clk_branch gcc_aggre1_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre1_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1348,8 +1387,8 @@ static struct clk_branch gcc_mmss_gpll0_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_gpll0_clk",
- .parent_names = (const char *[]){
- "gpll0_out_main",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_out_main.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1390,8 +1429,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1408,8 +1447,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1426,8 +1465,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1444,8 +1483,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1462,8 +1501,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1480,8 +1519,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1498,8 +1537,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1516,8 +1555,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1534,8 +1573,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1552,8 +1591,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1570,8 +1609,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1588,8 +1627,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1619,8 +1658,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1637,8 +1676,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1655,8 +1694,8 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1686,8 +1725,8 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1704,8 +1743,8 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1722,8 +1761,8 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1740,8 +1779,8 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1758,8 +1797,8 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1776,8 +1815,8 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1794,8 +1833,8 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1812,8 +1851,8 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1830,8 +1869,8 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1848,8 +1887,8 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1866,8 +1905,8 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1884,8 +1923,8 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1915,8 +1954,8 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1933,8 +1972,8 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1951,8 +1990,8 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1969,8 +2008,8 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1987,8 +2026,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2005,8 +2044,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2023,8 +2062,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2112,8 +2151,8 @@ static struct clk_branch gcc_hmss_ahb_clk = {
.enable_mask = BIT(21),
.hw.init = &(struct clk_init_data){
.name = "gcc_hmss_ahb_clk",
- .parent_names = (const char *[]){
- "hmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &hmss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2143,8 +2182,8 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hmss_rbcpr_clk",
- .parent_names = (const char *[]){
- "hmss_rbcpr_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &hmss_rbcpr_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2179,8 +2218,8 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
.freq_tbl = ftbl_hmss_gpll0_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "hmss_gpll0_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = ARRAY_SIZE(gcc_parent_names_1),
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -2264,8 +2303,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_names = (const char *[]){
- "pcie_aux_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2334,8 +2373,8 @@ static struct clk_branch gcc_pcie_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_phy_aux_clk",
- .parent_names = (const char *[]){
- "pcie_aux_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2352,8 +2391,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2422,8 +2461,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2453,8 +2492,8 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_names = (const char *[]){
- "sdcc4_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2497,8 +2536,8 @@ static struct clk_branch gcc_tsif_ref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk",
- .parent_names = (const char *[]){
- "tsif_ref_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &tsif_ref_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2528,8 +2567,8 @@ static struct clk_branch gcc_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2611,8 +2650,8 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_unipro_core_clk",
- .parent_names = (const char *[]){
- "ufs_unipro_core_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ufs_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2629,8 +2668,8 @@ static struct clk_branch gcc_usb30_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2647,8 +2686,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2678,8 +2717,8 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_names = (const char *[]){
- "usb3_phy_aux_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb3_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2721,7 +2760,9 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hdmi_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2735,7 +2776,9 @@ static struct clk_branch gcc_ufs_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2749,7 +2792,9 @@ static struct clk_branch gcc_usb3_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2763,7 +2808,9 @@ static struct clk_branch gcc_pcie_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2777,7 +2824,9 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_rx1_usb2_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3115,10 +3164,6 @@ static const struct regmap_config gcc_msm8998_regmap_config = {
.fast_io = true,
};
-static struct clk_hw *gcc_msm8998_hws[] = {
- &xo.hw,
-};
-
static const struct qcom_cc_desc gcc_msm8998_desc = {
.config = &gcc_msm8998_regmap_config,
.clks = gcc_msm8998_clocks,
@@ -3127,8 +3172,6 @@ static const struct qcom_cc_desc gcc_msm8998_desc = {
.num_resets = ARRAY_SIZE(gcc_msm8998_resets),
.gdscs = gcc_msm8998_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
- .clk_hws = gcc_msm8998_hws,
- .num_clk_hws = ARRAY_SIZE(gcc_msm8998_hws),
};
static int gcc_msm8998_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
new file mode 100644
index 000000000000..b6fa7b8e8006
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -0,0 +1,3044 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-qcm2290.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2,
+ P_GPLL0_OUT_EARLY,
+ P_GPLL10_OUT_MAIN,
+ P_GPLL11_OUT_AUX,
+ P_GPLL11_OUT_AUX2,
+ P_GPLL11_OUT_MAIN,
+ P_GPLL3_OUT_EARLY,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL5_OUT_MAIN,
+ P_GPLL6_OUT_EARLY,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_EARLY,
+ P_GPLL8_OUT_MAIN,
+ P_GPLL9_OUT_EARLY,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco brammo_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static const struct pll_vco default_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+static const struct pll_vco spark_vco[] = {
+ { 750000000, 1500000000, 1 },
+};
+
+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1C,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1C,
+ [PLL_OFF_STATUS] = 0x20,
+ },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_aux2",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll10_config = {
+ .l = 0x3c,
+ .alpha = 0x0,
+ .vco_val = 0x1 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055B,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpll10 = {
+ .offset = 0xa000,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 532MHz configuration */
+static const struct alpha_pll_config gpll11_config = {
+ .l = 0x1B,
+ .alpha = 0x55555555,
+ .alpha_hi = 0xB5,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055B,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpll11 = {
+ .offset = 0xb000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll11",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll3_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll3_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll3.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 533.2MHz configuration */
+static const struct alpha_pll_config gpll8_config = {
+ .l = 0x1B,
+ .alpha = 0x55555555,
+ .alpha_hi = 0xC5,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .config_ctl_val = 0x4001055B,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x8000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll9_config = {
+ .l = 0x3C,
+ .alpha = 0x0,
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(9, 8),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .config_ctl_val = 0x00004289,
+ .test_ctl_val = 0x08000000,
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x9000,
+ .vco_table = brammo_vco,
+ .num_vco = ARRAY_SIZE(brammo_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+ .offset = 0x9000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll9_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll9_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL6_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parents_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL9_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_9[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_EARLY, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_10[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_12[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_13[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL11_OUT_MAIN, 1 },
+ { P_GPLL11_OUT_AUX, 2 },
+ { P_GPLL11_OUT_AUX2, 3 },
+};
+
+static const struct clk_parent_data gcc_parents_14[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll11.clkr.hw },
+ { .hw = &gpll11.clkr.hw },
+ { .hw = &gpll11.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv = {
+ .reg = 0x1a04c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ .cmd_rcgr = 0x5802c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ .cmd_rcgr = 0x56000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk_src",
+ .parent_data = gcc_parents_9,
+ .num_parents = ARRAY_SIZE(gcc_parents_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(268800000, P_GPLL4_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x45000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4501c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 24),
+ F(64000000, P_GPLL9_OUT_EARLY, 9, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ .cmd_rcgr = 0x5101c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ .cmd_rcgr = 0x51038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ .cmd_rcgr = 0x51054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_EARLY, 3.5, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ .cmd_rcgr = 0x55024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ F(266600000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(465000000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(580000000, P_GPLL8_OUT_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ .cmd_rcgr = 0x55004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_ope_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(128000000, P_GPLL10_OUT_MAIN, 9, 0, 0),
+ F(135529412, P_GPLL10_OUT_MAIN, 8.5, 0, 0),
+ F(144000000, P_GPLL10_OUT_MAIN, 8, 0, 0),
+ F(153600000, P_GPLL10_OUT_MAIN, 7.5, 0, 0),
+ F(164571429, P_GPLL10_OUT_MAIN, 7, 0, 0),
+ F(177230769, P_GPLL10_OUT_MAIN, 6.5, 0, 0),
+ F(192000000, P_GPLL10_OUT_MAIN, 6, 0, 0),
+ F(209454545, P_GPLL10_OUT_MAIN, 5.5, 0, 0),
+ F(230400000, P_GPLL10_OUT_MAIN, 5, 0, 0),
+ F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0),
+ F(288000000, P_GPLL10_OUT_MAIN, 4, 0, 0),
+ F(329142857, P_GPLL10_OUT_MAIN, 3.5, 0, 0),
+ F(384000000, P_GPLL10_OUT_MAIN, 3, 0, 0),
+ F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0),
+ F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ .cmd_rcgr = 0x52004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_EARLY, 5, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ F(426400000, P_GPLL3_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x52094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = ARRAY_SIZE(gcc_parents_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ .cmd_rcgr = 0x52024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x520b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = ARRAY_SIZE(gcc_parents_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(341333333, P_GPLL6_OUT_EARLY, 1, 4, 9),
+ F(384000000, P_GPLL6_OUT_EARLY, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x52064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_cphy_rx_clk_src",
+ .parent_data = gcc_parents_10,
+ .num_parents = ARRAY_SIZE(gcc_parents_10),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0),
+ F(80000000, P_GPLL0_OUT_EARLY, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x58010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x4d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x4e004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x4f004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_AUX2, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x20010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x1f148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x1f278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x1f3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x1f4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x1f608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x1f738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x38028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x38010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_12,
+ .num_parents = ARRAY_SIZE(gcc_parents_12),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_EARLY, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parents_13,
+ .num_parents = ARRAY_SIZE(gcc_parents_13),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+ F(133333333, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+ .cmd_rcgr = 0x58060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_video_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_clk_src",
+ .parent_data = gcc_parents_14,
+ .num_parents = ARRAY_SIZE(gcc_parents_14),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x1d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_csi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_usb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_nrt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_rt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_atb_clk = {
+ .halt_reg = 0x5804c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x5804c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_nts_xo_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x58050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_nts_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+ .halt_reg = 0x56018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_cci_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+ .halt_reg = 0x52088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+ .halt_reg = 0x5208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x45018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_csi0phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x45034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_csi1phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x51034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x51050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5106c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+ .halt_reg = 0x58054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+ .halt_reg = 0x5503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_ope_ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+ .halt_reg = 0x5501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_ope_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+ .halt_reg = 0x5201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x5207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+ .halt_reg = 0x520ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_0_csid_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+ .halt_reg = 0x5203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x52080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+ .halt_reg = 0x520cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_1_csid_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_top_ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a084,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+ .reg = 0x17058,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_disp_gpll0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x4d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x4e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_gp2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x4f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_gp3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x36004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[])
+ { &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[])
+ { &gpll0_out_aux2.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x36100,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x36100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+ .halt_reg = 0x36048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(31),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_core_clk",
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_pdm2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x20004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pwm0_xo512_clk = {
+ .halt_reg = 0x2002c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2002c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pwm0_xo512_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x17060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x17010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x1f014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1f144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1f274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1f3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1f4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x1f604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s4_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x1f734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s5_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x38008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_sdcc1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x3800c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_sdcc1_ice_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_sdcc2_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a080,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_mock_utmi_postdiv.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x9f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb3_prim_phy_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+ .halt_reg = 0x6e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+ .halt_reg = 0x6e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+ .halt_reg = 0x580a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x580a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x580a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_vcodec0_sys_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_video_venus_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+ .halt_reg = 0x5808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_ctl_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_video_venus_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_camss_top_gdsc = {
+ .gdscr = 0x58004,
+ .pd = {
+ .name = "gcc_camss_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x1a004,
+ .pd = {
+ .name = "gcc_usb30_prim",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_vcodec0_gdsc = {
+ .gdscr = 0x58098,
+ .pd = {
+ .name = "gcc_vcodec0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_venus_gdsc = {
+ .gdscr = 0x5807c,
+ .pd = {
+ .name = "gcc_venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d07c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc = {
+ .gdscr = 0x7d074,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_rt",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc = {
+ .gdscr = 0x7d078,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_nrt",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_qcm2290_clocks[] = {
+ [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+ [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+ [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+ [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+ [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+ [GCC_CAMSS_CAMNOC_ATB_CLK] = &gcc_camss_camnoc_atb_clk.clkr,
+ [GCC_CAMSS_CAMNOC_NTS_XO_CLK] = &gcc_camss_camnoc_nts_xo_clk.clkr,
+ [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+ [GCC_CAMSS_CCI_CLK_SRC] = &gcc_camss_cci_clk_src.clkr,
+ [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+ [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+ [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+ [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+ [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+ [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+ [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+ [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PWM0_XO512_CLK] = &gcc_pwm0_xo512_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV] =
+ &gcc_usb30_prim_mock_utmi_postdiv.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+ [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+ [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+ [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+ [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+ [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_AUX2] = &gpll0_out_aux2.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL10] = &gpll10.clkr,
+ [GPLL11] = &gpll11.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_OUT_MAIN] = &gpll3_out_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+ [GPLL9] = &gpll9.clkr,
+ [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_qcm2290_resets[] = {
+ [GCC_CAMSS_OPE_BCR] = { 0x55000 },
+ [GCC_CAMSS_TFE_BCR] = { 0x52000 },
+ [GCC_CAMSS_TOP_BCR] = { 0x58000 },
+ [GCC_GPU_BCR] = { 0x36000 },
+ [GCC_MMSS_BCR] = { 0x17000 },
+ [GCC_PDM_BCR] = { 0x20000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x1f000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_SDCC1_BCR] = { 0x38000 },
+ [GCC_SDCC2_BCR] = { 0x1e000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x1b008 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+ [GCC_VCODEC0_BCR] = { 0x58094 },
+ [GCC_VENUS_BCR] = { 0x58078 },
+ [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+static struct gdsc *gcc_qcm2290_gdscs[] = {
+ [GCC_CAMSS_TOP_GDSC] = &gcc_camss_top_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_VCODEC0_GDSC] = &gcc_vcodec0_gdsc,
+ [GCC_VENUS_GDSC] = &gcc_venus_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc,
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+};
+
+static const struct regmap_config gcc_qcm2290_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcm2290_desc = {
+ .config = &gcc_qcm2290_regmap_config,
+ .clks = gcc_qcm2290_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcm2290_clocks),
+ .resets = gcc_qcm2290_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcm2290_resets),
+ .gdscs = gcc_qcm2290_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcm2290_gdscs),
+};
+
+static const struct of_device_id gcc_qcm2290_match_table[] = {
+ { .compatible = "qcom,gcc-qcm2290" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcm2290_match_table);
+
+static int gcc_qcm2290_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcm2290_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
+ clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
+ clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
+ clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_qcm2290_desc, regmap);
+}
+
+static struct platform_driver gcc_qcm2290_driver = {
+ .probe = gcc_qcm2290_probe,
+ .driver = {
+ .name = "gcc-qcm2290",
+ .of_match_table = gcc_qcm2290_match_table,
+ },
+};
+
+static int __init gcc_qcm2290_init(void)
+{
+ return platform_driver_register(&gcc_qcm2290_driver);
+}
+subsys_initcall(gcc_qcm2290_init);
+
+static void __exit gcc_qcm2290_exit(void)
+{
+ platform_driver_unregister(&gcc_qcm2290_driver);
+}
+module_exit(gcc_qcm2290_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCM2290 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 6cefcdc86990..8fb6bd69f240 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -197,12 +197,6 @@ static const struct clk_parent_data gcc_parent_data_0[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct clk_parent_data gcc_parent_data_0_ao[] = {
- { .fw_name = "bi_tcxo_ao" },
- { .hw = &gcc_gpll0.clkr.hw },
- { .hw = &gcc_gpll0_out_even.clkr.hw },
-};
-
static const struct parent_map gcc_parent_map_1[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -479,24 +473,6 @@ static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
},
},
};
-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
- .cmd_rcgr = 0x4800c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk_src",
- .parent_data = gcc_parent_data_0_ao,
- .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
- .ops = &clk_rcg2_ops,
- },
-};
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
@@ -1239,21 +1215,6 @@ static struct clk_rcg2 gcc_sec_ctrl_clk_src = {
},
};
-static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
- .reg = 0x48024,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
- .name = "gcc_cpuss_ahb_postdiv_clk_src",
- .parent_hws = (const struct clk_hw*[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
.reg = 0xf050,
.shift = 0,
@@ -1500,27 +1461,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
},
};
-/* For CPUSS functionality the AHB clock needs to be left enabled */
-static struct clk_branch gcc_cpuss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .hwcg_reg = 0x48000,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x52000,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]){
- &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_ddrss_gpu_axi_clk = {
.halt_reg = 0x71154,
.halt_check = BRANCH_HALT_SKIP,
@@ -2608,27 +2548,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
-/* For CPUSS functionality the AHB clock needs to be left enabled */
-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
- .halt_reg = 0x48178,
- .halt_check = BRANCH_HALT_VOTED,
- .hwcg_reg = 0x48178,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x52000,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sys_noc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]){
- &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_throttle_pcie_ahb_clk = {
.halt_reg = 0x9001c,
.halt_check = BRANCH_HALT,
@@ -3294,9 +3213,6 @@ static struct clk_regmap *gcc_sc7280_clocks[] = {
[GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
[GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
- [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr,
[GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
@@ -3403,7 +3319,6 @@ static struct clk_regmap *gcc_sc7280_clocks[] = {
[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
[GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
[GCC_THROTTLE_PCIE_AHB_CLK] = &gcc_throttle_pcie_ahb_clk.clkr,
[GCC_TITAN_NRT_THROTTLE_CORE_CLK] =
&gcc_titan_nrt_throttle_core_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 4d36f96e9ae2..9b97425008ce 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -284,7 +284,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -309,7 +309,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -323,7 +323,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -337,7 +337,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -351,7 +351,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -365,7 +365,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -379,7 +379,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -393,7 +393,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -426,7 +426,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -440,7 +440,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -454,7 +454,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -468,7 +468,7 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -482,7 +482,7 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -496,7 +496,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -510,7 +510,7 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -524,7 +524,7 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -538,7 +538,7 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -552,7 +552,7 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -566,7 +566,7 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -580,7 +580,7 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -601,7 +601,7 @@ static struct clk_rcg2 gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -615,7 +615,7 @@ static struct clk_rcg2 gp2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -629,7 +629,7 @@ static struct clk_rcg2 gp3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -649,7 +649,7 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_gpll0_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -670,7 +670,7 @@ static struct clk_rcg2 hmss_gpll4_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_gpll4_clk_src",
.parent_data = gcc_parent_data_xo_gpll4,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll4),
.ops = &clk_rcg2_ops,
},
};
@@ -689,7 +689,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",
.parent_data = gcc_parent_data_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -708,7 +708,7 @@ static struct clk_rcg2 pdm2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -730,7 +730,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "qspi_ser_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -756,7 +756,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -778,7 +778,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -804,7 +804,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -827,7 +827,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -848,7 +848,7 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_ice_core_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -862,7 +862,7 @@ static struct clk_rcg2 ufs_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_phy_aux_clk_src",
.parent_data = gcc_parent_data_xo_sleep_clk,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
.ops = &clk_rcg2_ops,
},
};
@@ -883,7 +883,7 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_unipro_core_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -904,7 +904,7 @@ static struct clk_rcg2 usb20_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_master_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -924,7 +924,7 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_mock_utmi_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -949,7 +949,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -970,7 +970,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -990,7 +990,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
.parent_data = gcc_parent_data_xo_sleep_clk,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 4ece326ea233..7e1dd8ccfa38 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset-controller.h>
@@ -50,6 +51,22 @@ enum gdsc_status {
GDSC_ON
};
+static int gdsc_pm_runtime_get(struct gdsc *sc)
+{
+ if (!sc->dev)
+ return 0;
+
+ return pm_runtime_resume_and_get(sc->dev);
+}
+
+static int gdsc_pm_runtime_put(struct gdsc *sc)
+{
+ if (!sc->dev)
+ return 0;
+
+ return pm_runtime_put_sync(sc->dev);
+}
+
/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
{
@@ -232,9 +249,8 @@ static void gdsc_retain_ff_on(struct gdsc *sc)
regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
}
-static int gdsc_enable(struct generic_pm_domain *domain)
+static int _gdsc_enable(struct gdsc *sc)
{
- struct gdsc *sc = domain_to_gdsc(domain);
int ret;
if (sc->pwrsts == PWRSTS_ON)
@@ -290,11 +306,22 @@ static int gdsc_enable(struct generic_pm_domain *domain)
return 0;
}
-static int gdsc_disable(struct generic_pm_domain *domain)
+static int gdsc_enable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
int ret;
+ ret = gdsc_pm_runtime_get(sc);
+ if (ret)
+ return ret;
+
+ return _gdsc_enable(sc);
+}
+
+static int _gdsc_disable(struct gdsc *sc)
+{
+ int ret;
+
if (sc->pwrsts == PWRSTS_ON)
return gdsc_assert_reset(sc);
@@ -329,6 +356,18 @@ static int gdsc_disable(struct generic_pm_domain *domain)
return 0;
}
+static int gdsc_disable(struct generic_pm_domain *domain)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
+
+ ret = _gdsc_disable(sc);
+
+ gdsc_pm_runtime_put(sc);
+
+ return ret;
+}
+
static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
@@ -443,6 +482,8 @@ int gdsc_register(struct gdsc_desc *desc,
for (i = 0; i < num; i++) {
if (!scs[i])
continue;
+ if (pm_runtime_enabled(dev))
+ scs[i]->dev = dev;
scs[i]->regmap = regmap;
scs[i]->rcdev = rcdev;
ret = gdsc_init(scs[i]);
@@ -457,6 +498,8 @@ int gdsc_register(struct gdsc_desc *desc,
continue;
if (scs[i]->parent)
pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ else if (!IS_ERR_OR_NULL(dev->pm_domain))
+ pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
return of_genpd_add_provider_onecell(dev->of_node, data);
@@ -475,6 +518,8 @@ void gdsc_unregister(struct gdsc_desc *desc)
continue;
if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ else if (!IS_ERR_OR_NULL(dev->pm_domain))
+ pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5bb396b344d1..d7cc4c21a9d4 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -25,6 +25,7 @@ struct reset_controller_dev;
* @resets: ids of resets associated with this gdsc
* @reset_count: number of @resets
* @rcdev: reset controller
+ * @dev: the device holding the GDSC, used for pm_runtime calls
*/
struct gdsc {
struct generic_pm_domain pd;
@@ -58,6 +59,7 @@ struct gdsc {
const char *supply;
struct regulator *rsupply;
+ struct device *dev;
};
struct gdsc_desc {
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index fedfffaf0a8d..a925ac90018d 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -40,8 +40,7 @@ static struct clk_branch gpucc_cxo_clk = {
.hw.init = &(struct clk_init_data){
.name = "gpucc_cxo_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -99,7 +98,7 @@ static const struct parent_map gpu_xo_gpll0_map[] = {
static const struct clk_parent_data gpu_xo_gpll0[] = {
{ .hw = &gpucc_cxo_clk.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
};
static const struct parent_map gpu_xo_gpupll0_map[] = {
@@ -126,7 +125,7 @@ static struct clk_rcg2 rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
.parent_data = gpu_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -144,7 +143,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
.parent_data = gpu_xo_gpupll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpupll0),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
},
@@ -163,7 +162,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
.parent_data = gpu_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -184,7 +183,7 @@ static struct clk_rcg2 gfx3d_isense_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_isense_clk_src",
.parent_data = gpu_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 1ebcceb3a50d..41bba96a08b3 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -44,8 +44,7 @@ static struct clk_branch gpucc_cxo_clk = {
.hw.init = &(struct clk_init_data){
.name = "gpucc_cxo_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -101,7 +100,7 @@ static const struct clk_parent_data gpucc_parent_data_1[] = {
{ .hw = &gpucc_cxo_clk.clkr.hw },
{ .hw = &gpu_pll0_pll_out_main.clkr.hw },
{ .hw = &gpu_pll1_pll_out_main.clkr.hw },
- { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
+ { .fw_name = "gcc_gpu_gpll0_clk" },
};
static struct clk_rcg2_gfx3d gfx3d_clk_src = {
@@ -114,7 +113,7 @@ static struct clk_rcg2_gfx3d gfx3d_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
.parent_data = gpucc_parent_data_1,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_1),
.ops = &clk_gfx3d_ops,
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
},
@@ -154,8 +153,8 @@ static const struct parent_map gpucc_parent_map_0[] = {
static const struct clk_parent_data gpucc_parent_data_0[] = {
{ .hw = &gpucc_cxo_clk.clkr.hw },
- { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
- { .fw_name = "gcc_gpu_gpll0_div_clk", .name = "gcc_gpu_gpll0_div_clk" },
+ { .fw_name = "gcc_gpu_gpll0_clk" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk" },
};
static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
@@ -172,7 +171,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
.parent_data = gpucc_parent_data_0,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -192,7 +191,7 @@ static struct clk_rcg2 rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
.parent_data = gpucc_parent_data_0,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
index 8590b5edd19d..4fec1f9142b8 100644
--- a/drivers/clk/qcom/kpss-xcc.c
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -33,7 +33,6 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct clk *clk;
- struct resource *res;
void __iomem *base;
const char *name;
@@ -41,8 +40,7 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
if (!id)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
new file mode 100644
index 000000000000..89f1ad6631da
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpass-sc7280.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+
+static struct clk_branch lpass_q6ss_ahbm_clk = {
+ .halt_reg = 0x1c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_q6ss_ahbs_clk = {
+ .halt_reg = 0x20,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_top_cc_lpi_q6_axim_hs_clk = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_top_cc_lpi_q6_axim_hs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_core_clk = {
+ .halt_reg = 0x20,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_xo_clk = {
+ .halt_reg = 0x38,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x38,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_sleep_clk = {
+ .halt_reg = 0x3c,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct regmap_config lpass_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static struct clk_regmap *lpass_cc_sc7280_clocks[] = {
+ [LPASS_Q6SS_AHBM_CLK] = &lpass_q6ss_ahbm_clk.clkr,
+ [LPASS_Q6SS_AHBS_CLK] = &lpass_q6ss_ahbs_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
+};
+
+static struct clk_regmap *lpass_cc_top_sc7280_clocks[] = {
+ [LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK] =
+ &lpass_top_cc_lpi_q6_axim_hs_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_cc_top_sc7280_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_cc_top_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_top_sc7280_clocks),
+};
+
+static struct clk_regmap *lpass_qdsp6ss_sc7280_clocks[] = {
+ [LPASS_QDSP6SS_XO_CLK] = &lpass_qdsp6ss_xo_clk.clkr,
+ [LPASS_QDSP6SS_SLEEP_CLK] = &lpass_qdsp6ss_sleep_clk.clkr,
+ [LPASS_QDSP6SS_CORE_CLK] = &lpass_qdsp6ss_core_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_qdsp6ss_sc7280_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_qdsp6ss_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_qdsp6ss_sc7280_clocks),
+};
+
+static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ lpass_regmap_config.name = "qdsp6ss";
+ desc = &lpass_qdsp6ss_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ lpass_regmap_config.name = "top_cc";
+ desc = &lpass_cc_top_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ lpass_regmap_config.name = "cc";
+ desc = &lpass_cc_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 2, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static const struct of_device_id lpass_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-lpasscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_cc_sc7280_match_table);
+
+static struct platform_driver lpass_cc_sc7280_driver = {
+ .probe = lpass_cc_sc7280_probe,
+ .driver = {
+ .name = "sc7280-lpasscc",
+ .of_match_table = lpass_cc_sc7280_match_table,
+ },
+};
+
+static int __init lpass_cc_sc7280_init(void)
+{
+ return platform_driver_register(&lpass_cc_sc7280_driver);
+}
+subsys_initcall(lpass_cc_sc7280_init);
+
+static void __exit lpass_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&lpass_cc_sc7280_driver);
+}
+module_exit(lpass_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index 467dadccde02..c421b1291651 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -53,8 +53,7 @@ static struct clk_fixed_factor gpll0_div = {
.hw.init = &(struct clk_init_data){
.name = "mmss_gpll0_div",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "gpll0",
- .name = "gpll0"
+ .fw_name = "gpll0"
},
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
@@ -78,8 +77,7 @@ static struct clk_alpha_pll mmpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mmpll0",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -111,8 +109,7 @@ static struct clk_alpha_pll mmpll1 = {
.hw.init = &(struct clk_init_data){
.name = "mmpll1",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -141,8 +138,7 @@ static struct clk_alpha_pll mmpll3 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -170,8 +166,7 @@ static struct clk_alpha_pll mmpll4 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -199,8 +194,7 @@ static struct clk_alpha_pll mmpll5 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -228,8 +222,7 @@ static struct clk_alpha_pll mmpll6 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll6",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -257,8 +250,7 @@ static struct clk_alpha_pll mmpll7 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll7",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -286,8 +278,7 @@ static struct clk_alpha_pll mmpll10 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll10",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -316,9 +307,9 @@ static const struct parent_map mmss_xo_hdmi_map[] = {
};
static const struct clk_parent_data mmss_xo_hdmi[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "hdmipll", .name = "hdmipll" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "hdmipll" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
@@ -329,10 +320,10 @@ static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
};
static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "dsi0dsi", .name = "dsi0dsi" },
- { .fw_name = "dsi1dsi", .name = "dsi1dsi" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0dsi" },
+ { .fw_name = "dsi1dsi" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dsibyte_map[] = {
@@ -343,10 +334,10 @@ static const struct parent_map mmss_xo_dsibyte_map[] = {
};
static const struct clk_parent_data mmss_xo_dsibyte[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "dsi0byte", .name = "dsi0byte" },
- { .fw_name = "dsi1byte", .name = "dsi1byte" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0byte" },
+ { .fw_name = "dsi1byte" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dp_map[] = {
@@ -357,10 +348,10 @@ static const struct parent_map mmss_xo_dp_map[] = {
};
static const struct clk_parent_data mmss_xo_dp[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "dplink", .name = "dplink" },
- { .fw_name = "dpvco", .name = "dpvco" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "dplink" },
+ { .fw_name = "dpvco" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
@@ -371,10 +362,10 @@ static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
@@ -386,11 +377,11 @@ static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
@@ -403,12 +394,12 @@ static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll1_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
@@ -421,12 +412,12 @@ static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll5_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[] = {
@@ -440,13 +431,13 @@ static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll3_out_even.clkr.hw },
{ .hw = &mmpll6_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -460,13 +451,13 @@ static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map
};
static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll4_out_even.clkr.hw },
{ .hw = &mmpll7_out_even.clkr.hw },
{ .hw = &mmpll10_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -480,13 +471,13 @@ static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll7_out_even.clkr.hw },
{ .hw = &mmpll10_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -501,14 +492,14 @@ static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll4_out_even.clkr.hw },
{ .hw = &mmpll7_out_even.clkr.hw },
{ .hw = &mmpll10_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static struct clk_rcg2 byte0_clk_src = {
@@ -518,7 +509,7 @@ static struct clk_rcg2 byte0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -531,7 +522,7 @@ static struct clk_rcg2 byte1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -552,7 +543,7 @@ static struct clk_rcg2 cci_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -576,7 +567,7 @@ static struct clk_rcg2 cpp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -599,7 +590,7 @@ static struct clk_rcg2 csi0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -612,7 +603,7 @@ static struct clk_rcg2 csi1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -625,7 +616,7 @@ static struct clk_rcg2 csi2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -638,7 +629,7 @@ static struct clk_rcg2 csi3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -660,7 +651,7 @@ static struct clk_rcg2 csiphy_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -679,7 +670,7 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -692,7 +683,7 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -705,7 +696,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -723,7 +714,7 @@ static struct clk_rcg2 dp_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_aux_clk_src",
.parent_data = mmss_xo_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -743,7 +734,7 @@ static struct clk_rcg2 dp_crypto_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_crypto_clk_src",
.parent_data = mmss_xo_dp,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dp),
.ops = &clk_rcg2_ops,
},
};
@@ -763,7 +754,7 @@ static struct clk_rcg2 dp_link_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_link_clk_src",
.parent_data = mmss_xo_dp,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dp),
.ops = &clk_rcg2_ops,
},
};
@@ -783,7 +774,7 @@ static struct clk_rcg2 dp_pixel_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_pixel_clk_src",
.parent_data = mmss_xo_dp,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dp),
.ops = &clk_rcg2_ops,
},
};
@@ -801,7 +792,7 @@ static struct clk_rcg2 esc0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -814,7 +805,7 @@ static struct clk_rcg2 esc1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -832,7 +823,7 @@ static struct clk_rcg2 extpclk_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "extpclk_clk_src",
.parent_data = mmss_xo_hdmi,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmss_xo_hdmi),
.ops = &clk_byte_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -855,7 +846,7 @@ static struct clk_rcg2 fd_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "fd_core_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -873,7 +864,7 @@ static struct clk_rcg2 hdmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_clk_src",
.parent_data = mmss_xo_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -894,7 +885,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -916,7 +907,7 @@ static struct clk_rcg2 maxi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "maxi_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -943,7 +934,7 @@ static struct clk_rcg2 mclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -956,7 +947,7 @@ static struct clk_rcg2 mclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -969,7 +960,7 @@ static struct clk_rcg2 mclk2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -982,7 +973,7 @@ static struct clk_rcg2 mclk3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1008,7 +999,7 @@ static struct clk_rcg2 mdp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1026,7 +1017,7 @@ static struct clk_rcg2 vsync_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
.parent_data = mmss_xo_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1046,7 +1037,7 @@ static struct clk_rcg2 ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1069,7 +1060,7 @@ static struct clk_rcg2 axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1082,7 +1073,7 @@ static struct clk_rcg2 pclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_data = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1096,7 +1087,7 @@ static struct clk_rcg2 pclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_data = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1118,7 +1109,7 @@ static struct clk_rcg2 rot_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rot_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1140,7 +1131,7 @@ static struct clk_rcg2 video_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1153,7 +1144,7 @@ static struct clk_rcg2 video_subcore0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1166,7 +1157,7 @@ static struct clk_rcg2 video_subcore1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1191,7 +1182,7 @@ static struct clk_rcg2 vfe0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1204,7 +1195,7 @@ static struct clk_rcg2 vfe1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index 941993bc610d..bc19a23e13f8 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -483,7 +483,7 @@ static struct clk_rcg2 ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_data = mmcc_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -496,7 +496,7 @@ static struct clk_rcg2 byte0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -510,7 +510,7 @@ static struct clk_rcg2 byte1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -538,7 +538,7 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -552,7 +552,7 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -573,7 +573,7 @@ static struct clk_rcg2 cci_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -597,7 +597,7 @@ static struct clk_rcg2 cpp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6),
.ops = &clk_rcg2_ops,
},
};
@@ -620,7 +620,7 @@ static struct clk_rcg2 csi0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -641,7 +641,7 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -655,7 +655,7 @@ static struct clk_rcg2 csi1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -669,7 +669,7 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -683,7 +683,7 @@ static struct clk_rcg2 csi2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -697,7 +697,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -711,7 +711,7 @@ static struct clk_rcg2 csi3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -733,7 +733,7 @@ static struct clk_rcg2 csiphy_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -752,7 +752,7 @@ static struct clk_rcg2 dp_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_aux_clk_src",
.parent_data = mmcc_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -773,7 +773,7 @@ static struct clk_rcg2 dp_crypto_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_crypto_clk_src",
.parent_data = mmcc_xo_dplink_dpvco,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dplink_dpvco),
.ops = &clk_rcg2_ops,
},
};
@@ -793,7 +793,7 @@ static struct clk_rcg2 dp_gtc_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_gtc_clk_src",
.parent_data = mmcc_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -814,7 +814,7 @@ static struct clk_rcg2 dp_link_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_link_clk_src",
.parent_data = mmcc_xo_dplink_dpvco,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dplink_dpvco),
.ops = &clk_rcg2_ops,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -828,7 +828,7 @@ static struct clk_rcg2 dp_pixel_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_pixel_clk_src",
.parent_data = mmcc_xo_dplink_dpvco,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dplink_dpvco),
.ops = &clk_dp_ops,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -842,7 +842,7 @@ static struct clk_rcg2 esc0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -855,7 +855,7 @@ static struct clk_rcg2 esc1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -878,7 +878,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -906,7 +906,7 @@ static struct clk_rcg2 mclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -920,7 +920,7 @@ static struct clk_rcg2 mclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -934,7 +934,7 @@ static struct clk_rcg2 mclk2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -948,7 +948,7 @@ static struct clk_rcg2 mclk3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -974,7 +974,7 @@ static struct clk_rcg2 mdp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -987,7 +987,7 @@ static struct clk_rcg2 pclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_data = mmcc_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -1001,7 +1001,7 @@ static struct clk_rcg2 pclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_data = mmcc_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -1025,7 +1025,7 @@ static struct clk_rcg2 rot_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rot_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1051,7 +1051,7 @@ static struct clk_rcg2 vfe0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
.parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1065,7 +1065,7 @@ static struct clk_rcg2 vfe1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
.parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1089,7 +1089,7 @@ static struct clk_rcg2 video_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7),
.ops = &clk_rcg2_ops,
.flags = CLK_IS_CRITICAL,
},
@@ -1104,7 +1104,7 @@ static struct clk_rcg2 vsync_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
.parent_data = mmcc_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -2055,7 +2055,7 @@ static struct clk_rcg2 axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -2560,6 +2560,8 @@ static struct clk_branch video_subcore0_clk = {
static struct gdsc venus_gdsc = {
.gdscr = 0x1024,
+ .cxcs = (unsigned int[]){ 0x1028, 0x1034, 0x1048 },
+ .cxc_count = 3,
.pd = {
.name = "venus",
},
@@ -2573,6 +2575,7 @@ static struct gdsc venus_core0_gdsc = {
},
.parent = &venus_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc mdss_gdsc = {
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 7b435a1c2c4b..8617454e4a77 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,videocc-sm8250.h>
@@ -364,13 +365,31 @@ static const struct of_device_id video_cc_sm8250_match_table[] = {
};
MODULE_DEVICE_TABLE(of, video_cc_sm8250_match_table);
+static void video_cc_sm8250_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
static int video_cc_sm8250_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, video_cc_sm8250_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &video_cc_sm8250_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
@@ -379,7 +398,11 @@ static int video_cc_sm8250_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
regmap_update_bits(regmap, 0xeec, BIT(0), BIT(0));
- return qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static struct platform_driver video_cc_sm8250_driver = {
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index c32d2c678046..d6b1d0148bfd 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -229,6 +229,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("lvds", 727, R8A7795_CLK_S0D4),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
+ DEF_MOD("mlp", 802, R8A7795_CLK_S2D1),
DEF_MOD("vin7", 804, R8A7795_CLK_S0D2),
DEF_MOD("vin6", 805, R8A7795_CLK_S0D2),
DEF_MOD("vin5", 806, R8A7795_CLK_S0D2),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 41593c126faf..9c22977e42c2 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -207,6 +207,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("du0", 724, R8A7796_CLK_S2D1),
DEF_MOD("lvds", 727, R8A7796_CLK_S2D1),
DEF_MOD("hdmi0", 729, R8A7796_CLK_HDMI),
+ DEF_MOD("mlp", 802, R8A7796_CLK_S2D1),
DEF_MOD("vin7", 804, R8A7796_CLK_S0D2),
DEF_MOD("vin6", 805, R8A7796_CLK_S0D2),
DEF_MOD("vin5", 806, R8A7796_CLK_S0D2),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index bc1be8bcbbe4..7eee45a31b2a 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -205,6 +205,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("lvds", 727, R8A77965_CLK_S2D1),
DEF_MOD("hdmi0", 729, R8A77965_CLK_HDMI),
+ DEF_MOD("mlp", 802, R8A77965_CLK_S2D1),
DEF_MOD("vin7", 804, R8A77965_CLK_S0D2),
DEF_MOD("vin6", 805, R8A77965_CLK_S0D2),
DEF_MOD("vin5", 806, R8A77965_CLK_S0D2),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index f16d125ca009..fbd7454f2beb 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -33,9 +33,13 @@ enum rcar_r8a779a0_clk_types {
CLK_TYPE_R8A779A0_PLL1,
CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
CLK_TYPE_R8A779A0_PLL5,
+ CLK_TYPE_R8A779A0_Z,
CLK_TYPE_R8A779A0_SD,
CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
+ CLK_TYPE_R8A779A0_RPCSRC,
+ CLK_TYPE_R8A779A0_RPC,
+ CLK_TYPE_R8A779A0_RPCD2,
};
struct rcar_r8a779a0_cpg_pll_config {
@@ -84,6 +88,10 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define DEF_Z(_name, _id, _parent, _div, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_Z, _parent, .div = _div, \
+ .offset = _offset)
+
#define DEF_SD(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
@@ -120,8 +128,14 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
DEF_RATE(".oco", CLK_OCO, 32768),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_R8A779A0_RPCSRC, CLK_PLL5),
+ DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_R8A779A0_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_R8A779A0_RPCD2,
+ R8A779A0_CLK_RPC),
/* Core Clock Outputs */
+ DEF_Z("z0", R8A779A0_CLK_Z0, CLK_PLL20, 2, 0),
+ DEF_Z("z1", R8A779A0_CLK_Z1, CLK_PLL21, 2, 8),
DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
@@ -193,6 +207,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("msi3", 621, R8A779A0_CLK_MSO),
DEF_MOD("msi4", 622, R8A779A0_CLK_MSO),
DEF_MOD("msi5", 623, R8A779A0_CLK_MSO),
+ DEF_MOD("rpc-if", 629, R8A779A0_CLK_RPCD2),
DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
@@ -205,6 +220,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("tmu2", 715, R8A779A0_CLK_S1D4),
DEF_MOD("tmu3", 716, R8A779A0_CLK_S1D4),
DEF_MOD("tmu4", 717, R8A779A0_CLK_S1D4),
+ DEF_MOD("tpu0", 718, R8A779A0_CLK_S1D8),
DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1),
DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1),
DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1),
@@ -259,6 +275,162 @@ static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
+/*
+ * Z0 Clock & Z1 Clock
+ */
+#define CPG_FRQCRB 0x00000804
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_FRQCRC 0x00000808
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+ unsigned long max_rate; /* Maximum rate for normal mode */
+ unsigned int fixed_div;
+ u32 mask;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ val = readl(zclk->reg) & zclk->mask;
+ mult = 32 - (val >> __ffs(zclk->mask));
+
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
+ 32 * zclk->fixed_div);
+}
+
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int min_mult, max_mult, mult;
+ unsigned long rate, prate;
+
+ rate = min(req->rate, req->max_rate);
+ if (rate <= zclk->max_rate) {
+ /* Set parent rate to initial value for normal modes */
+ prate = zclk->max_rate;
+ } else {
+ /* Set increased parent rate for boost modes */
+ prate = rate;
+ }
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ prate * zclk->fixed_div);
+
+ prate = req->best_parent_rate / zclk->fixed_div;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
+
+ req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
+ return 0;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int i;
+
+ mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
+ parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned int div,
+ unsigned int offset)
+{
+ struct clk_init_data init = {};
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = reg + CPG_FRQCRC;
+ zclk->kick_reg = reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+ zclk->mask = GENMASK(offset + 4, offset);
+ zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk)) {
+ kfree(zclk);
+ return clk;
+ }
+
+ zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
+ zclk->fixed_div;
+ return clk;
+}
+
+/*
+ * RPC Clocks
+ */
+#define CPG_RPCCKCR 0x874
+
+static const struct clk_div_table cpg_rpcsrc_div_table[] = {
+ { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
+};
+
static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base,
@@ -293,6 +465,10 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll5_div;
break;
+ case CLK_TYPE_R8A779A0_Z:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, core->div, core->offset);
+
case CLK_TYPE_R8A779A0_SD:
return cpg_sd_clk_register(core->name, base, core->offset,
__clk_get_name(parent), notifiers,
@@ -322,6 +498,21 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
div = cpg_pll_config->osc_prediv * core->div;
break;
+ case CLK_TYPE_R8A779A0_RPCSRC:
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_RPCCKCR, 3, 2, 0,
+ cpg_rpcsrc_div_table,
+ &cpg_lock);
+
+ case CLK_TYPE_R8A779A0_RPC:
+ return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_R8A779A0_RPCD2:
+ return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent));
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 1490446985e2..47c16265fca9 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -29,15 +29,27 @@ enum clk_ids {
CLK_PLL2_DIV16,
CLK_PLL2_DIV20,
CLK_PLL3,
+ CLK_PLL3_400,
+ CLK_PLL3_533,
CLK_PLL3_DIV2,
CLK_PLL3_DIV2_4,
CLK_PLL3_DIV2_4_2,
CLK_PLL3_DIV4,
+ CLK_SEL_PLL3_3,
+ CLK_DIV_PLL3_C,
CLK_PLL4,
CLK_PLL5,
- CLK_PLL5_DIV2,
+ CLK_PLL5_FOUT3,
+ CLK_PLL5_250,
CLK_PLL6,
+ CLK_PLL6_250,
CLK_P1_DIV2,
+ CLK_PLL2_800,
+ CLK_PLL2_SDHI_533,
+ CLK_PLL2_SDHI_400,
+ CLK_PLL2_SDHI_266,
+ CLK_SD0_DIV4,
+ CLK_SD1_DIV4,
/* Module Clocks */
MOD_CLK_BASE,
@@ -53,6 +65,11 @@ static const struct clk_div_table dtable_1_32[] = {
{0, 0},
};
+/* Mux clock tables */
+static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
+static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
+static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+
static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -63,8 +80,20 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 133, 2),
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 133, 2),
+ DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
+ DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
+
+ DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
+ DEF_FIXED(".pll5_fout3", CLK_PLL5_FOUT3, CLK_PLL5, 1, 6),
+
+ DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2),
+ DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2),
+ DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3),
+ DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
+ DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
+
DEF_FIXED(".pll2_div16", CLK_PLL2_DIV16, CLK_PLL2, 1, 16),
DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
@@ -72,6 +101,13 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
+ DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
+ sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
+ DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
+ DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+
+ DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
+ DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
/* Core output clk */
DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
@@ -84,6 +120,18 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
+ DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
+ DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2,
+ sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK),
+ DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
+ DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
+ DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0,
+ sel_shdi, ARRAY_SIZE(sel_shdi)),
+ DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1,
+ sel_shdi, ARRAY_SIZE(sel_shdi)),
+ DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
+ DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
};
static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
@@ -97,6 +145,26 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
0x52c, 1),
+ DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
+ 0x550, 0),
+ DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
+ 0x550, 1),
+ DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
+ 0x554, 0),
+ DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
+ 0x554, 1),
+ DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
+ 0x554, 2),
+ DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
+ 0x554, 3),
+ DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
+ 0x554, 4),
+ DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
+ 0x554, 5),
+ DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
+ 0x554, 6),
+ DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
+ 0x554, 7),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
0x570, 0),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
@@ -121,6 +189,14 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x578, 2),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
0x578, 3),
+ DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
+ 0x57c, 0),
+ DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
+ 0x57c, 0),
+ DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
+ 0x57c, 1),
+ DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
+ 0x57c, 1),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
0x580, 0),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
@@ -157,6 +233,9 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
DEF_RST(R9A07G044_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G044_DMAC_RST_ASYNC, 0x82c, 1),
+ DEF_RST(R9A07G044_SPI_RST, 0x850, 0),
+ DEF_RST(R9A07G044_SDHI0_IXRST, 0x854, 0),
+ DEF_RST(R9A07G044_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1),
DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2),
@@ -165,6 +244,8 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_USB_U2H1_HRESETN, 0x878, 1),
DEF_RST(R9A07G044_USB_U2P_EXL_SYSRST, 0x878, 2),
DEF_RST(R9A07G044_USB_PRESETN, 0x878, 3),
+ DEF_RST(R9A07G044_ETH0_RST_HW_N, 0x87c, 0),
+ DEF_RST(R9A07G044_ETH1_RST_HW_N, 0x87c, 1),
DEF_RST(R9A07G044_I2C0_MRST, 0x880, 0),
DEF_RST(R9A07G044_I2C1_MRST, 0x880, 1),
DEF_RST(R9A07G044_I2C2_MRST, 0x880, 2),
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index 5678768ee1f2..e93f0011eb07 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -267,4 +267,87 @@ free_clock:
return clk;
}
+struct rpc_clock {
+ struct clk_divider div;
+ struct clk_gate gate;
+ /*
+ * One notifier covers both RPC and RPCD2 clocks as they are both
+ * controlled by the same RPCCKCR register...
+ */
+ struct cpg_simple_notifier csn;
+};
+
+static const struct clk_div_table cpg_rpc_div_table[] = {
+ { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
+};
+
+struct clk * __init cpg_rpc_clk_register(const char *name,
+ void __iomem *rpcckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers)
+{
+ struct rpc_clock *rpc;
+ struct clk *clk;
+
+ rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return ERR_PTR(-ENOMEM);
+
+ rpc->div.reg = rpcckcr;
+ rpc->div.width = 3;
+ rpc->div.table = cpg_rpc_div_table;
+ rpc->div.lock = &cpg_lock;
+
+ rpc->gate.reg = rpcckcr;
+ rpc->gate.bit_idx = 8;
+ rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpc->gate.lock = &cpg_lock;
+
+ rpc->csn.reg = rpcckcr;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpc->div.hw, &clk_divider_ops,
+ &rpc->gate.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ if (IS_ERR(clk)) {
+ kfree(rpc);
+ return clk;
+ }
+
+ cpg_simple_notifier_register(notifiers, &rpc->csn);
+ return clk;
+}
+
+struct rpcd2_clock {
+ struct clk_fixed_factor fixed;
+ struct clk_gate gate;
+};
+
+struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ void __iomem *rpcckcr,
+ const char *parent_name)
+{
+ struct rpcd2_clock *rpcd2;
+ struct clk *clk;
+
+ rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
+ if (!rpcd2)
+ return ERR_PTR(-ENOMEM);
+
+ rpcd2->fixed.mult = 1;
+ rpcd2->fixed.div = 2;
+
+ rpcd2->gate.reg = rpcckcr;
+ rpcd2->gate.bit_idx = 9;
+ rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpcd2->gate.lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpcd2->fixed.hw, &clk_fixed_factor_ops,
+ &rpcd2->gate.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ if (IS_ERR(clk))
+ kfree(rpcd2);
+
+ return clk;
+}
diff --git a/drivers/clk/renesas/rcar-cpg-lib.h b/drivers/clk/renesas/rcar-cpg-lib.h
index d00c91b116ca..35c0217c2f8b 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.h
+++ b/drivers/clk/renesas/rcar-cpg-lib.h
@@ -30,4 +30,11 @@ struct clk * __init cpg_sd_clk_register(const char *name,
void __iomem *base, unsigned int offset, const char *parent_name,
struct raw_notifier_head *notifiers, bool skip_first);
+struct clk * __init cpg_rpc_clk_register(const char *name,
+ void __iomem *rpcckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers);
+
+struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ void __iomem *rpcckcr,
+ const char *parent_name);
#endif
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 558191c99b48..741f6e74bbcf 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -301,95 +301,10 @@ static struct clk * __init cpg_z_clk_register(const char *name,
return clk;
}
-struct rpc_clock {
- struct clk_divider div;
- struct clk_gate gate;
- /*
- * One notifier covers both RPC and RPCD2 clocks as they are both
- * controlled by the same RPCCKCR register...
- */
- struct cpg_simple_notifier csn;
-};
-
static const struct clk_div_table cpg_rpcsrc_div_table[] = {
{ 2, 5 }, { 3, 6 }, { 0, 0 },
};
-static const struct clk_div_table cpg_rpc_div_table[] = {
- { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
-};
-
-static struct clk * __init cpg_rpc_clk_register(const char *name,
- void __iomem *base, const char *parent_name,
- struct raw_notifier_head *notifiers)
-{
- struct rpc_clock *rpc;
- struct clk *clk;
-
- rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
- if (!rpc)
- return ERR_PTR(-ENOMEM);
-
- rpc->div.reg = base + CPG_RPCCKCR;
- rpc->div.width = 3;
- rpc->div.table = cpg_rpc_div_table;
- rpc->div.lock = &cpg_lock;
-
- rpc->gate.reg = base + CPG_RPCCKCR;
- rpc->gate.bit_idx = 8;
- rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
- rpc->gate.lock = &cpg_lock;
-
- rpc->csn.reg = base + CPG_RPCCKCR;
-
- clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
- &rpc->div.hw, &clk_divider_ops,
- &rpc->gate.hw, &clk_gate_ops,
- CLK_SET_RATE_PARENT);
- if (IS_ERR(clk)) {
- kfree(rpc);
- return clk;
- }
-
- cpg_simple_notifier_register(notifiers, &rpc->csn);
- return clk;
-}
-
-struct rpcd2_clock {
- struct clk_fixed_factor fixed;
- struct clk_gate gate;
-};
-
-static struct clk * __init cpg_rpcd2_clk_register(const char *name,
- void __iomem *base,
- const char *parent_name)
-{
- struct rpcd2_clock *rpcd2;
- struct clk *clk;
-
- rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
- if (!rpcd2)
- return ERR_PTR(-ENOMEM);
-
- rpcd2->fixed.mult = 1;
- rpcd2->fixed.div = 2;
-
- rpcd2->gate.reg = base + CPG_RPCCKCR;
- rpcd2->gate.bit_idx = 9;
- rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
- rpcd2->gate.lock = &cpg_lock;
-
- clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
- &rpcd2->fixed.hw, &clk_fixed_factor_ops,
- &rpcd2->gate.hw, &clk_gate_ops,
- CLK_SET_RATE_PARENT);
- if (IS_ERR(clk))
- kfree(rpcd2);
-
- return clk;
-}
-
-
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
@@ -600,11 +515,11 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
break;
case CLK_TYPE_GEN3_RPC:
- return cpg_rpc_clk_register(core->name, base,
+ return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
__clk_get_name(parent), notifiers);
case CLK_TYPE_GEN3_RPCD2:
- return cpg_rpcd2_clk_register(core->name, base,
+ return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
__clk_get_name(parent));
default:
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 761922ea5db7..4021f6cabda4 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -55,6 +56,14 @@
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
+struct sd_hw_data {
+ struct clk_hw hw;
+ u32 conf;
+ struct rzg2l_cpg_priv *priv;
+};
+
+#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
+
/**
* struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
*
@@ -130,6 +139,132 @@ rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
return clk_hw->clk;
}
+static struct clk * __init
+rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ struct rzg2l_cpg_priv *priv)
+{
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag,
+ base + GET_REG_OFFSET(core->conf),
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct sd_hw_data *hwdata = to_sd_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+ u32 off = GET_REG_OFFSET(hwdata->conf);
+ u32 shift = GET_SHIFT(hwdata->conf);
+ const u32 clk_src_266 = 2;
+ u32 bitmask;
+
+ /*
+ * As per the HW manual, we should not directly switch from 533 MHz to
+ * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
+ * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
+ * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
+ * (400 MHz)).
+ * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
+ * switching register is prohibited.
+ * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
+ * the index to value mapping is done by adding 1 to the index.
+ */
+ bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
+ if (index != clk_src_266) {
+ u32 msk, val;
+ int ret;
+
+ writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+
+ msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+
+ ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+ !(val & msk), 100,
+ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(priv->dev, "failed to switch clk source\n");
+ return ret;
+ }
+ }
+
+ writel(bitmask | ((index + 1) << shift), priv->base + off);
+
+ return 0;
+}
+
+static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct sd_hw_data *hwdata = to_sd_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+ u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
+
+ val >>= GET_SHIFT(hwdata->conf);
+ val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+ if (val) {
+ val--;
+ } else {
+ /* Prohibited clk source, change it to 533 MHz(reset value) */
+ rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+ }
+
+ return val;
+}
+
+static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+ .determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
+ .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
+ .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
+};
+
+static struct clk * __init
+rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ struct rzg2l_cpg_priv *priv)
+{
+ struct sd_hw_data *clk_hw_data;
+ struct clk_init_data init;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->conf = core->conf;
+
+ init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
+ init.ops = &rzg2l_cpg_sd_clk_mux_ops;
+ init.flags = 0;
+ init.num_parents = core->num_parents;
+ init.parent_names = core->parent_names;
+
+ clk_hw = &clk_hw_data->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw->clk;
+}
+
struct pll_clk {
struct clk_hw hw;
unsigned int conf;
@@ -288,6 +423,12 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
clk = rzg2l_cpg_div_clk_register(core, priv->clks,
priv->base, priv);
break;
+ case CLK_TYPE_MUX:
+ clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
+ break;
+ case CLK_TYPE_SD_MUX:
+ clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
+ break;
default:
goto fail;
}
@@ -310,13 +451,17 @@ fail:
* @hw: handle between common and hardware-specific interfaces
* @off: register offset
* @bit: ON/MON bit
+ * @enabled: soft state of the clock, if it is coupled with another clock
* @priv: CPG/MSTP private data
+ * @sibling: pointer to the other coupled clock
*/
struct mstp_clock {
struct clk_hw hw;
u16 off;
u8 bit;
+ bool enabled;
struct rzg2l_cpg_priv *priv;
+ struct mstp_clock *sibling;
};
#define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
@@ -369,11 +514,41 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
static int rzg2l_mod_clock_enable(struct clk_hw *hw)
{
+ struct mstp_clock *clock = to_mod_clock(hw);
+
+ if (clock->sibling) {
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ unsigned long flags;
+ bool enabled;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ enabled = clock->sibling->enabled;
+ clock->enabled = true;
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ if (enabled)
+ return 0;
+ }
+
return rzg2l_mod_clock_endisable(hw, true);
}
static void rzg2l_mod_clock_disable(struct clk_hw *hw)
{
+ struct mstp_clock *clock = to_mod_clock(hw);
+
+ if (clock->sibling) {
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ unsigned long flags;
+ bool enabled;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ enabled = clock->sibling->enabled;
+ clock->enabled = false;
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ if (enabled)
+ return;
+ }
+
rzg2l_mod_clock_endisable(hw, false);
}
@@ -389,6 +564,9 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
return 1;
}
+ if (clock->sibling)
+ return clock->enabled;
+
value = readl(priv->base + CLK_MON_R(clock->off));
return value & bitmask;
@@ -400,6 +578,28 @@ static const struct clk_ops rzg2l_mod_clock_ops = {
.is_enabled = rzg2l_mod_clock_is_enabled,
};
+static struct mstp_clock
+*rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
+ struct rzg2l_cpg_priv *priv)
+{
+ struct clk_hw *hw;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_mod_clks; i++) {
+ struct mstp_clock *clk;
+
+ if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
+ continue;
+
+ hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
+ clk = to_mod_clock(hw);
+ if (clock->off == clk->off && clock->bit == clk->bit)
+ return clk;
+ }
+
+ return NULL;
+}
+
static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
const struct rzg2l_cpg_info *info,
@@ -461,6 +661,18 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
priv->clks[id] = clk;
+
+ if (mod->is_coupled) {
+ struct mstp_clock *sibling;
+
+ clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
+ sibling = rzg2l_mod_clock__get_sibling(clock, priv);
+ if (sibling) {
+ clock->sibling = sibling;
+ sibling->sibling = clock;
+ }
+ }
+
return;
fail:
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 63695280ce8b..7fb6b4030f72 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -11,6 +11,15 @@
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
+#define CPG_PL2SDHI_DSEL (0x218)
+#define CPG_CLKSTATUS (0x280)
+#define CPG_PL3_SSEL (0x408)
+#define CPG_PL6_ETH_SSEL (0x418)
+
+#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
+#define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
+
+#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000
/* n = 0/1/2 for PLL1/4/6 */
#define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
@@ -23,6 +32,16 @@
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
+#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
+
+#define SEL_PLL_PACK(offset, bitpos, size) \
+ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
+
+#define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1)
+#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
+
+#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
+#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
/**
* Definitions of CPG Core Clocks
@@ -43,6 +62,7 @@ struct cpg_core_clk {
const struct clk_div_table *dtable;
const char * const *parent_names;
int flag;
+ int mux_flags;
int num_parents;
};
@@ -54,6 +74,12 @@ enum clk_types {
/* Clock with divider */
CLK_TYPE_DIV,
+
+ /* Clock with clock source selector */
+ CLK_TYPE_MUX,
+
+ /* Clock with SD clock source selector */
+ CLK_TYPE_SD_MUX,
};
#define DEF_TYPE(_name, _id, _type...) \
@@ -69,6 +95,14 @@ enum clk_types {
#define DEF_DIV(_name, _id, _parent, _conf, _dtable, _flag) \
DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \
.parent = _parent, .dtable = _dtable, .flag = _flag)
+#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _flag, \
+ _mux_flags) \
+ DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents, \
+ .flag = _flag, .mux_flags = _mux_flags)
+#define DEF_SD_MUX(_name, _id, _conf, _parent_names, _num_parents) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents)
/**
* struct rzg2l_mod_clk - Module Clocks definitions
@@ -78,6 +112,7 @@ enum clk_types {
* @parent: id of parent clock
* @off: register offset
* @bit: ON/MON bit
+ * @is_coupled: flag to indicate coupled clock
*/
struct rzg2l_mod_clk {
const char *name;
@@ -85,17 +120,25 @@ struct rzg2l_mod_clk {
unsigned int parent;
u16 off;
u8 bit;
+ bool is_coupled;
};
-#define DEF_MOD(_name, _id, _parent, _off, _bit) \
+#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _is_coupled) \
{ \
.name = _name, \
.id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
.off = (_off), \
.bit = (_bit), \
+ .is_coupled = (_is_coupled), \
}
+#define DEF_MOD(_name, _id, _parent, _off, _bit) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, false)
+
+#define DEF_COUPLED(_name, _id, _parent, _off, _bit) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, true)
+
/**
* struct rzg2l_reset - Reset definitions
*
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 62a4f2543960..7924598747b6 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -481,7 +481,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "atclk_core_l", "armclkl", CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3399_CLKGATE_CON(0), 5, GFLAGS),
- COMPOSITE_NOMUX(0, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ COMPOSITE_NOMUX(PCLK_COREDBG_L, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3399_CLKGATE_CON(0), 6, GFLAGS),
@@ -531,7 +531,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_core_adb400_core_b_2_gic", "armclkb", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(14), 4, GFLAGS),
- DIV(0, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
+ DIV(PCLK_COREDBG_B, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(3), 13, 2, DFLAGS | CLK_DIVIDER_READ_ONLY),
GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
@@ -1514,7 +1514,10 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
"aclk_vio_noc",
/* ddrc */
- "sclk_ddrc"
+ "sclk_ddrc",
+
+ "armclkl",
+ "armclkb",
};
static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
@@ -1549,9 +1552,6 @@ static void __init rk3399_clk_init(struct device_node *np)
rockchip_clk_register_branches(ctx, rk3399_clk_branches,
ARRAY_SIZE(rk3399_clk_branches));
- rockchip_clk_protect_critical(rk3399_cru_critical_clocks,
- ARRAY_SIZE(rk3399_cru_critical_clocks));
-
rockchip_clk_register_armclk(ctx, ARMCLKL, "armclkl",
mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
&rk3399_cpuclkl_data, rk3399_cpuclkl_rates,
@@ -1562,6 +1562,9 @@ static void __init rk3399_clk_init(struct device_node *np)
&rk3399_cpuclkb_data, rk3399_cpuclkb_rates,
ARRAY_SIZE(rk3399_cpuclkb_rates));
+ rockchip_clk_protect_critical(rk3399_cru_critical_clocks,
+ ARRAY_SIZE(rk3399_cru_critical_clocks));
+
rockchip_register_softrst(np, 21, reg_base + RK3399_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -1653,7 +1656,7 @@ static struct platform_driver clk_rk3399_driver = {
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
+module_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
MODULE_DESCRIPTION("Rockchip RK3399 Clock Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 75ca855e720d..939e7079c334 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -1719,7 +1719,7 @@ static struct platform_driver clk_rk3568_driver = {
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe);
+module_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe);
MODULE_DESCRIPTION("Rockchip RK3568 Clock Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 0441c4f73ac9..0e18d6ff2916 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -67,7 +67,8 @@ config EXYNOS_5420_COMMON_CLK
depends on COMMON_CLK_SAMSUNG
help
Support for the clock controller present on the Samsung
- Exynos5420 SoCs. Choose Y here only if you build for this SoC.
+ Exynos5420/Exynos5422/Exynos5800 SoCs. Choose Y here only if you
+ build for this SoC.
config EXYNOS_ARM64_COMMON_CLK
bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
@@ -79,38 +80,47 @@ config EXYNOS_AUDSS_CLK_CON
default y if ARCH_EXYNOS
help
Support for the Audio Subsystem CLKCON clock controller present
- on some Exynos SoC variants. Choose M or Y here if you want to
- use audio devices such as I2S, PCM, etc.
+ on some Samsung Exynos SoC variants. Choose M or Y here if you want
+ to use audio devices such as I2S, PCM, etc.
config EXYNOS_CLKOUT
tristate "Samsung Exynos clock output driver"
depends on COMMON_CLK_SAMSUNG
default y if ARCH_EXYNOS
help
- Support for the clock output (XCLKOUT) present on some of Exynos SoC
- variants. Usually the XCLKOUT is used to monitor the status of the
- certains clocks from SoC, but it could also be tied to other devices
- as an input clock.
+ Support for the clock output (XCLKOUT) present on some of Samsung
+ Exynos SoC variants. Usually the XCLKOUT is used to monitor the
+ status of the certains clocks from SoC, but it could also be tied to
+ other devices as an input clock.
# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
help
- Build the s3c2410 clock driver based on the common clock framework.
+ Support for the clock controller present on the Samsung
+ S3C2410/S3C2440/S3C2442 SoCs. Choose Y here only if you build for
+ this SoC.
config S3C2410_COMMON_DCLK
bool
select COMMON_CLK_SAMSUNG
select REGMAP_MMIO
help
- Temporary symbol to build the dclk driver based on the common clock
- framework.
+ Support for the dclk clock controller present on the Samsung
+ S3C2410/S3C2412/S3C2440/S3C2443 SoCs. Choose Y here only if you build
+ for this SoC.
config S3C2412_COMMON_CLK
bool "Samsung S3C2412 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung S3C2412 SoCs.
+ Choose Y here only if you build for this SoC.
config S3C2443_COMMON_CLK
bool "Samsung S3C2443 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ S3C2416/S3C2443 SoCs. Choose Y here only if you build for this SoC.
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 028b2e27a37e..c46cf11e4d0b 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 00ef4d1b0888..7f20d9aedaa9 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -469,3 +469,21 @@ free_cpuclk:
kfree(cpuclk);
return ret;
}
+
+void __init samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
+ const struct samsung_cpu_clock *list, unsigned int nr_clk)
+{
+ unsigned int idx;
+ unsigned int num_cfgs;
+ struct clk_hw **hws = ctx->clk_data.hws;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ /* find count of configuration rates in cfg */
+ for (num_cfgs = 0; list->cfg[num_cfgs].prate != 0; )
+ num_cfgs++;
+
+ exynos_register_cpu_clock(ctx, list->id, list->name, hws[list->parent_id],
+ hws[list->alt_parent_id], list->offset, list->cfg, num_cfgs,
+ list->flags);
+ }
+}
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 42b5d32c6cc7..9cc127a162ad 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -129,7 +129,6 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in;
const struct exynos_audss_clk_drvdata *variant;
struct clk_hw **clk_table;
- struct resource *res;
struct device *dev = &pdev->dev;
int i, ret = 0;
@@ -137,8 +136,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index b69e381b8c0c..471a6fb82670 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -110,11 +110,9 @@ static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev)
struct samsung_clk_provider *ctx;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res;
void __iomem *reg_base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index f203074d858b..f9daae20f393 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -3675,44 +3675,32 @@ static const struct exynos_cpuclk_cfg_data exynos5433_apolloclk_d[] __initconst
{ 0 },
};
+static const struct samsung_cpu_clock apollo_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_SCLK_APOLLO, "apolloclk", CLK_MOUT_APOLLO_PLL,
+ CLK_MOUT_BUS_PLL_APOLLO_USER,
+ CLK_CPU_HAS_E5433_REGS_LAYOUT, 0x200,
+ exynos5433_apolloclk_d),
+};
+
+static const struct samsung_cmu_info apollo_cmu_info __initconst = {
+ .pll_clks = apollo_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(apollo_pll_clks),
+ .mux_clks = apollo_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(apollo_mux_clks),
+ .div_clks = apollo_div_clks,
+ .nr_div_clks = ARRAY_SIZE(apollo_div_clks),
+ .gate_clks = apollo_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(apollo_gate_clks),
+ .cpu_clks = apollo_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(apollo_cpu_clks),
+ .nr_clk_ids = APOLLO_NR_CLK,
+ .clk_regs = apollo_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(apollo_clk_regs),
+};
+
static void __init exynos5433_cmu_apollo_init(struct device_node *np)
{
- void __iomem *reg_base;
- struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- panic("%s: failed to map registers\n", __func__);
- return;
- }
-
- ctx = samsung_clk_init(np, reg_base, APOLLO_NR_CLK);
- if (!ctx) {
- panic("%s: unable to allocate ctx\n", __func__);
- return;
- }
-
- samsung_clk_register_pll(ctx, apollo_pll_clks,
- ARRAY_SIZE(apollo_pll_clks), reg_base);
- samsung_clk_register_mux(ctx, apollo_mux_clks,
- ARRAY_SIZE(apollo_mux_clks));
- samsung_clk_register_div(ctx, apollo_div_clks,
- ARRAY_SIZE(apollo_div_clks));
- samsung_clk_register_gate(ctx, apollo_gate_clks,
- ARRAY_SIZE(apollo_gate_clks));
-
- hws = ctx->clk_data.hws;
-
- exynos_register_cpu_clock(ctx, CLK_SCLK_APOLLO, "apolloclk",
- hws[CLK_MOUT_APOLLO_PLL], hws[CLK_MOUT_BUS_PLL_APOLLO_USER], 0x200,
- exynos5433_apolloclk_d, ARRAY_SIZE(exynos5433_apolloclk_d),
- CLK_CPU_HAS_E5433_REGS_LAYOUT);
-
- samsung_clk_sleep_init(reg_base, apollo_clk_regs,
- ARRAY_SIZE(apollo_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
+ samsung_cmu_register_one(np, &apollo_cmu_info);
}
CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo",
exynos5433_cmu_apollo_init);
@@ -3932,44 +3920,32 @@ static const struct exynos_cpuclk_cfg_data exynos5433_atlasclk_d[] __initconst =
{ 0 },
};
-static void __init exynos5433_cmu_atlas_init(struct device_node *np)
-{
- void __iomem *reg_base;
- struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- panic("%s: failed to map registers\n", __func__);
- return;
- }
-
- ctx = samsung_clk_init(np, reg_base, ATLAS_NR_CLK);
- if (!ctx) {
- panic("%s: unable to allocate ctx\n", __func__);
- return;
- }
-
- samsung_clk_register_pll(ctx, atlas_pll_clks,
- ARRAY_SIZE(atlas_pll_clks), reg_base);
- samsung_clk_register_mux(ctx, atlas_mux_clks,
- ARRAY_SIZE(atlas_mux_clks));
- samsung_clk_register_div(ctx, atlas_div_clks,
- ARRAY_SIZE(atlas_div_clks));
- samsung_clk_register_gate(ctx, atlas_gate_clks,
- ARRAY_SIZE(atlas_gate_clks));
-
- hws = ctx->clk_data.hws;
-
- exynos_register_cpu_clock(ctx, CLK_SCLK_ATLAS, "atlasclk",
- hws[CLK_MOUT_ATLAS_PLL], hws[CLK_MOUT_BUS_PLL_ATLAS_USER], 0x200,
- exynos5433_atlasclk_d, ARRAY_SIZE(exynos5433_atlasclk_d),
- CLK_CPU_HAS_E5433_REGS_LAYOUT);
+static const struct samsung_cpu_clock atlas_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_SCLK_ATLAS, "atlasclk", CLK_MOUT_ATLAS_PLL,
+ CLK_MOUT_BUS_PLL_ATLAS_USER,
+ CLK_CPU_HAS_E5433_REGS_LAYOUT, 0x200,
+ exynos5433_atlasclk_d),
+};
- samsung_clk_sleep_init(reg_base, atlas_clk_regs,
- ARRAY_SIZE(atlas_clk_regs));
+static const struct samsung_cmu_info atlas_cmu_info __initconst = {
+ .pll_clks = atlas_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(atlas_pll_clks),
+ .mux_clks = atlas_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(atlas_mux_clks),
+ .div_clks = atlas_div_clks,
+ .nr_div_clks = ARRAY_SIZE(atlas_div_clks),
+ .gate_clks = atlas_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(atlas_gate_clks),
+ .cpu_clks = atlas_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(atlas_cpu_clks),
+ .nr_clk_ids = ATLAS_NR_CLK,
+ .clk_regs = atlas_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(atlas_clk_regs),
+};
- samsung_clk_of_add_provider(np, ctx);
+static void __init exynos5433_cmu_atlas_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &atlas_cmu_info);
}
CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
exynos5433_cmu_atlas_init);
@@ -5564,7 +5540,6 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
struct exynos5433_cmu_data *data;
struct samsung_clk_provider *ctx;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *reg_base;
int i;
@@ -5577,8 +5552,7 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
return -ENOMEM;
ctx = &data->ctx;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
new file mode 100644
index 000000000000..2294989e244c
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Common Clock Framework support for Exynos850 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/exynos850.h>
+
+#include "clk.h"
+
+/* Gate register bits */
+#define GATE_MANUAL BIT(20)
+#define GATE_ENABLE_HWACG BIT(28)
+
+/* Gate register offsets range */
+#define GATE_OFF_START 0x2000
+#define GATE_OFF_END 0x2fff
+
+/**
+ * exynos850_init_clocks - Set clocks initial configuration
+ * @np: CMU device tree node with "reg" property (CMU addr)
+ * @reg_offs: Register offsets array for clocks to init
+ * @reg_offs_len: Number of register offsets in reg_offs array
+ *
+ * Set manual control mode for all gate clocks.
+ */
+static void __init exynos850_init_clocks(struct device_node *np,
+ const unsigned long *reg_offs, size_t reg_offs_len)
+{
+ void __iomem *reg_base;
+ size_t i;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ for (i = 0; i < reg_offs_len; ++i) {
+ void __iomem *reg = reg_base + reg_offs[i];
+ u32 val;
+
+ /* Modify only gate clock registers */
+ if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
+ continue;
+
+ val = readl(reg);
+ val |= GATE_MANUAL;
+ val &= ~GATE_ENABLE_HWACG;
+ writel(val, reg);
+ }
+
+ iounmap(reg_base);
+}
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x120e0000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0000
+#define PLL_LOCKTIME_PLL_SHARED0 0x0004
+#define PLL_LOCKTIME_PLL_SHARED1 0x0008
+#define PLL_CON0_PLL_MMC 0x0100
+#define PLL_CON3_PLL_MMC 0x010c
+#define PLL_CON0_PLL_SHARED0 0x0140
+#define PLL_CON3_PLL_SHARED0 0x014c
+#define PLL_CON0_PLL_SHARED1 0x0180
+#define PLL_CON3_PLL_SHARED1 0x018c
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_SSS 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
+#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
+#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
+#define CLK_CON_DIV_CLKCMU_CORE_SSS 0x182c
+#define CLK_CON_DIV_CLKCMU_DPU 0x1840
+#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_HSI_MMC_CARD 0x184c
+#define CLK_CON_DIV_CLKCMU_HSI_USB20DRD 0x1850
+#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x187c
+#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1880
+#define CLK_CON_DIV_CLKCMU_PERI_UART 0x1884
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x188c
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x1890
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x1894
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1898
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_SSS 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_IP 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART 0x2088
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_MMC,
+ PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CORE_CCI,
+ CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
+ CLK_CON_DIV_CLKCMU_CORE_SSS,
+ CLK_CON_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_CLKCMU_HSI_BUS,
+ CLK_CON_DIV_CLKCMU_HSI_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI_USB20DRD,
+ CLK_CON_DIV_CLKCMU_PERI_BUS,
+ CLK_CON_DIV_CLKCMU_PERI_IP,
+ CLK_CON_DIV_CLKCMU_PERI_UART,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART,
+};
+
+/*
+ * Do not provide PLL tables to core PLLs, as MANUAL_PLL_CTRL bit is not set
+ * for those PLLs by default, so set_rate operation would fail.
+ */
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_0822x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0,
+ NULL),
+ PLL(pll_0822x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1,
+ NULL),
+ PLL(pll_0831x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
+PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_core_cci_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_core_mmc_embd_p) = { "oscclk", "dout_shared0_div2",
+ "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "mout_mmc_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_core_sss_p) = { "dout_shared0_div3", "dout_shared1_div3",
+ "dout_shared0_div4", "dout_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_HSI */
+PNAME(mout_hsi_bus_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_hsi_mmc_card_p) = { "oscclk", "dout_shared0_div2",
+ "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "mout_mmc_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_hsi_usb20drd_p) = { "oscclk", "dout_shared0_div4",
+ "dout_shared1_div4", "oscclk" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
+PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+PNAME(mout_peri_uart_p) = { "oscclk", "dout_shared0_div4",
+ "dout_shared1_div4", "oscclk" };
+PNAME(mout_peri_ip_p) = { "oscclk", "dout_shared0_div4",
+ "dout_shared1_div4", "oscclk" };
+
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_DPU */
+PNAME(mout_dpu_p) = { "dout_shared0_div3", "dout_shared1_div3",
+ "dout_shared0_div4", "dout_shared1_div4" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ MUX(CLK_MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+
+ /* CORE */
+ MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(CLK_MOUT_CORE_CCI, "mout_core_cci", mout_core_cci_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI, 0, 2),
+ MUX(CLK_MOUT_CORE_MMC_EMBD, "mout_core_mmc_embd", mout_core_mmc_embd_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD, 0, 3),
+ MUX(CLK_MOUT_CORE_SSS, "mout_core_sss", mout_core_sss_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_SSS, 0, 2),
+
+ /* DPU */
+ MUX(CLK_MOUT_DPU, "mout_dpu", mout_dpu_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 2),
+
+ /* HSI */
+ MUX(CLK_MOUT_HSI_BUS, "mout_hsi_bus", mout_hsi_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_BUS, 0, 1),
+ MUX(CLK_MOUT_HSI_MMC_CARD, "mout_hsi_mmc_card", mout_hsi_mmc_card_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD, 0, 3),
+ MUX(CLK_MOUT_HSI_USB20DRD, "mout_hsi_usb20drd", mout_hsi_usb20drd_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD, 0, 2),
+
+ /* PERI */
+ MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
+ MUX(CLK_MOUT_PERI_UART, "mout_peri_uart", mout_peri_uart_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART, 0, 2),
+ MUX(CLK_MOUT_PERI_IP, "mout_peri_ip", mout_peri_ip_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_IP, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* CORE */
+ DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CORE_CCI, "dout_core_cci", "gout_core_cci",
+ CLK_CON_DIV_CLKCMU_CORE_CCI, 0, 4),
+ DIV(CLK_DOUT_CORE_MMC_EMBD, "dout_core_mmc_embd", "gout_core_mmc_embd",
+ CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD, 0, 9),
+ DIV(CLK_DOUT_CORE_SSS, "dout_core_sss", "gout_core_sss",
+ CLK_CON_DIV_CLKCMU_CORE_SSS, 0, 4),
+
+ /* DPU */
+ DIV(CLK_DOUT_DPU, "dout_dpu", "gout_dpu",
+ CLK_CON_DIV_CLKCMU_DPU, 0, 4),
+
+ /* HSI */
+ DIV(CLK_DOUT_HSI_BUS, "dout_hsi_bus", "gout_hsi_bus",
+ CLK_CON_DIV_CLKCMU_HSI_BUS, 0, 4),
+ DIV(CLK_DOUT_HSI_MMC_CARD, "dout_hsi_mmc_card", "gout_hsi_mmc_card",
+ CLK_CON_DIV_CLKCMU_HSI_MMC_CARD, 0, 9),
+ DIV(CLK_DOUT_HSI_USB20DRD, "dout_hsi_usb20drd", "gout_hsi_usb20drd",
+ CLK_CON_DIV_CLKCMU_HSI_USB20DRD, 0, 4),
+
+ /* PERI */
+ DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
+ CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
+ DIV(CLK_DOUT_PERI_UART, "dout_peri_uart", "gout_peri_uart",
+ CLK_CON_DIV_CLKCMU_PERI_UART, 0, 4),
+ DIV(CLK_DOUT_PERI_IP, "dout_peri_ip", "gout_peri_ip",
+ CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ /* CORE */
+ GATE(CLK_GOUT_CORE_BUS, "gout_core_bus", "mout_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_CCI, "gout_core_cci", "mout_core_cci",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_MMC_EMBD, "gout_core_mmc_embd", "mout_core_mmc_embd",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_SSS, "gout_core_sss", "mout_core_sss",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_SSS, 21, 0, 0),
+
+ /* DPU */
+ GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
+ CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+
+ /* HSI */
+ GATE(CLK_GOUT_HSI_BUS, "gout_hsi_bus", "mout_hsi_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_HSI_MMC_CARD, "gout_hsi_mmc_card", "mout_hsi_mmc_card",
+ CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD, 21, 0, 0),
+ GATE(CLK_GOUT_HSI_USB20DRD, "gout_hsi_usb20drd", "mout_hsi_usb20drd",
+ CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD, 21, 0, 0),
+
+ /* PERI */
+ GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART, "gout_peri_uart", "mout_peri_uart",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_IP, "gout_peri_ip", "mout_peri_ip",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_IP, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos850_cmu_top_init(struct device_node *np)
+{
+ exynos850_init_clocks(np, top_clk_regs, ARRAY_SIZE(top_clk_regs));
+ samsung_cmu_register_one(np, &top_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos850_cmu_top, "samsung,exynos850-cmu-top",
+ exynos850_cmu_top_init);
+
+/* ---- CMU_HSI ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI (0x13400000) */
+#define PLL_CON0_MUX_CLKCMU_HSI_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER 0x0620
+#define CLK_CON_MUX_MUX_CLK_HSI_RTC 0x1000
+#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV 0x2008
+#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50 0x200c
+#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26 0x2010
+#define CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK 0x2024
+#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN 0x2028
+#define CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20 0x203c
+#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY 0x2040
+
+static const unsigned long hsi_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI_RTC,
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV,
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50,
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26,
+ CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK,
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK,
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN,
+ CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK,
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20,
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY,
+};
+
+/* List of parent clocks for Muxes in CMU_PERI */
+PNAME(mout_hsi_bus_user_p) = { "oscclk", "dout_hsi_bus" };
+PNAME(mout_hsi_mmc_card_user_p) = { "oscclk", "dout_hsi_mmc_card" };
+PNAME(mout_hsi_usb20drd_user_p) = { "oscclk", "dout_hsi_usb20drd" };
+PNAME(mout_hsi_rtc_p) = { "rtcclk", "oscclk" };
+
+static const struct samsung_mux_clock hsi_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI_BUS_USER, "mout_hsi_bus_user", mout_hsi_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI_BUS_USER, 4, 1),
+ MUX_F(CLK_MOUT_HSI_MMC_CARD_USER, "mout_hsi_mmc_card_user",
+ mout_hsi_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_HSI_USB20DRD_USER, "mout_hsi_usb20drd_user",
+ mout_hsi_usb20drd_user_p, PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI_RTC, "mout_hsi_rtc", mout_hsi_rtc_p,
+ CLK_CON_MUX_MUX_CLK_HSI_RTC, 0, 1),
+};
+
+static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_USB_RTC_CLK, "gout_usb_rtc", "mout_hsi_rtc",
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV, 21, 0, 0),
+ GATE(CLK_GOUT_USB_REF_CLK, "gout_usb_ref", "mout_hsi_usb20drd_user",
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50, 21, 0, 0),
+ GATE(CLK_GOUT_USB_PHY_REF_CLK, "gout_usb_phy_ref", "oscclk",
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26, 21, 0, 0),
+ GATE(CLK_GOUT_GPIO_HSI_PCLK, "gout_gpio_hsi_pclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
+ "mout_hsi_mmc_card_user",
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_SYSREG_HSI_PCLK, "gout_sysreg_hsi_pclk",
+ "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USB_PHY_ACLK, "gout_usb_phy_aclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20, 21, 0, 0),
+ GATE(CLK_GOUT_USB_BUS_EARLY_CLK, "gout_usb_bus_early",
+ "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info hsi_cmu_info __initconst = {
+ .mux_clks = hsi_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi_mux_clks),
+ .gate_clks = hsi_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi_gate_clks),
+ .nr_clk_ids = HSI_NR_CLK,
+ .clk_regs = hsi_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi_clk_regs),
+ .clk_name = "dout_hsi_bus",
+};
+
+/* ---- CMU_PERI ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_PERI (0x10030000) */
+#define PLL_CON0_MUX_CLKCMU_PERI_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_PERI_SPI_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_PERI_UART_USER 0x0630
+#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERI_SPI_0 0x180c
+#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0 0x200c
+#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1 0x2010
+#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2 0x2014
+#define CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK 0x2024
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK 0x202c
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK 0x2034
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_PERI_MCT_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK 0x209c
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK 0x20a0
+#define CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK 0x20a4
+#define CLK_CON_GAT_GOUT_PERI_UART_IPCLK 0x20a8
+#define CLK_CON_GAT_GOUT_PERI_UART_PCLK 0x20ac
+#define CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK 0x20b0
+#define CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK 0x20b4
+
+static const unsigned long peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART_USER,
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0,
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1,
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2,
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0,
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0,
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1,
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2,
+ CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK,
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK,
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERI */
+PNAME(mout_peri_bus_user_p) = { "oscclk", "dout_peri_bus" };
+PNAME(mout_peri_uart_user_p) = { "oscclk", "dout_peri_uart" };
+PNAME(mout_peri_hsi2c_user_p) = { "oscclk", "dout_peri_ip" };
+PNAME(mout_peri_spi_user_p) = { "oscclk", "dout_peri_ip" };
+
+static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_BUS_USER, "mout_peri_bus_user", mout_peri_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART_USER, "mout_peri_uart_user",
+ mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
+ mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
+};
+
+static const struct samsung_div_clock peri_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERI_HSI2C0, "dout_peri_hsi2c0", "gout_peri_hsi2c0",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0, 0, 5),
+ DIV(CLK_DOUT_PERI_HSI2C1, "dout_peri_hsi2c1", "gout_peri_hsi2c1",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
+ DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
+ DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
+};
+
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_HSI2C0, "gout_peri_hsi2c0", "mout_peri_hsi2c_user",
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_HSI2C1, "gout_peri_hsi2c1", "mout_peri_hsi2c_user",
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_HSI2C2, "gout_peri_hsi2c2", "mout_peri_hsi2c_user",
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C0_IPCLK, "gout_hsi2c0_ipclk", "dout_peri_hsi2c0",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C0_PCLK, "gout_hsi2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C1_IPCLK, "gout_hsi2c1_ipclk", "dout_peri_hsi2c1",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C1_PCLK, "gout_hsi2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C2_IPCLK, "gout_hsi2c2_ipclk", "dout_peri_hsi2c2",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C2_PCLK, "gout_hsi2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C0_PCLK, "gout_i2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C1_PCLK, "gout_i2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C2_PCLK, "gout_i2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C3_PCLK, "gout_i2c3_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C4_PCLK, "gout_i2c4_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C5_PCLK, "gout_i2c5_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C6_PCLK, "gout_i2c6_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_MCT_PCLK, "gout_mct_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PWM_MOTOR_PCLK, "gout_pwm_motor_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART_IPCLK, "gout_uart_ipclk", "mout_peri_uart_user",
+ CLK_CON_GAT_GOUT_PERI_UART_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART_PCLK, "gout_uart_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT0_PCLK, "gout_wdt0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peri_cmu_info __initconst = {
+ .mux_clks = peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peri_mux_clks),
+ .div_clks = peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peri_div_clks),
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .nr_clk_ids = PERI_NR_CLK,
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+ .clk_name = "dout_peri_bus",
+};
+
+/* ---- CMU_CORE ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_CORE (0x12000000) */
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_CORE_SSS_USER 0x0630
+#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_CORE_GIC_CLK 0x2040
+#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK 0x20e8
+#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN 0x20ec
+#define CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK 0x2128
+#define CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK 0x212c
+
+static const unsigned long core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_SSS_USER,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC,
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP,
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
+ CLK_CON_GAT_GOUT_CORE_GIC_CLK,
+ CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK,
+ CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
+ CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK,
+ CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_core_bus_user_p) = { "oscclk", "dout_core_bus" };
+PNAME(mout_core_cci_user_p) = { "oscclk", "dout_core_cci" };
+PNAME(mout_core_mmc_embd_user_p) = { "oscclk", "dout_core_mmc_embd" };
+PNAME(mout_core_sss_user_p) = { "oscclk", "dout_core_sss" };
+PNAME(mout_core_gic_p) = { "dout_core_busp", "oscclk" };
+
+static const struct samsung_mux_clock core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_CCI_USER, "mout_core_cci_user", mout_core_cci_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER, 4, 1),
+ MUX_F(CLK_MOUT_CORE_MMC_EMBD_USER, "mout_core_mmc_embd_user",
+ mout_core_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_CORE_SSS_USER, "mout_core_sss_user", mout_core_sss_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_SSS_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_GIC, "mout_core_gic", mout_core_gic_p,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC, 0, 1),
+};
+
+static const struct samsung_div_clock core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user",
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 2),
+};
+
+static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_GIC_CLK, "gout_gic_clk", "mout_core_gic",
+ CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
+ "mout_core_mmc_embd_user", CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_SSS_ACLK, "gout_sss_aclk", "mout_core_sss_user",
+ CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_SSS_PCLK, "gout_sss_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info core_cmu_info __initconst = {
+ .mux_clks = core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(core_mux_clks),
+ .div_clks = core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(core_div_clks),
+ .gate_clks = core_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(core_gate_clks),
+ .nr_clk_ids = CORE_NR_CLK,
+ .clk_regs = core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(core_clk_regs),
+ .clk_name = "dout_core_bus",
+};
+
+/* ---- CMU_DPU ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_DPU (0x13000000) */
+#define PLL_CON0_MUX_CLKCMU_DPU_USER 0x0600
+#define CLK_CON_DIV_DIV_CLK_DPU_BUSP 0x1800
+#define CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK 0x2004
+#define CLK_CON_GAT_GOUT_DPU_ACLK_DECON0 0x2010
+#define CLK_CON_GAT_GOUT_DPU_ACLK_DMA 0x2014
+#define CLK_CON_GAT_GOUT_DPU_ACLK_DPP 0x2018
+#define CLK_CON_GAT_GOUT_DPU_PPMU_ACLK 0x2028
+#define CLK_CON_GAT_GOUT_DPU_PPMU_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_DPU_SMMU_CLK 0x2038
+#define CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK 0x203c
+
+static const unsigned long dpu_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_DPU_USER,
+ CLK_CON_DIV_DIV_CLK_DPU_BUSP,
+ CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK,
+ CLK_CON_GAT_GOUT_DPU_ACLK_DECON0,
+ CLK_CON_GAT_GOUT_DPU_ACLK_DMA,
+ CLK_CON_GAT_GOUT_DPU_ACLK_DPP,
+ CLK_CON_GAT_GOUT_DPU_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_DPU_PPMU_PCLK,
+ CLK_CON_GAT_GOUT_DPU_SMMU_CLK,
+ CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_dpu_user_p) = { "oscclk", "dout_dpu" };
+
+static const struct samsung_mux_clock dpu_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_DPU_USER, "mout_dpu_user", mout_dpu_user_p,
+ PLL_CON0_MUX_CLKCMU_DPU_USER, 4, 1),
+};
+
+static const struct samsung_div_clock dpu_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DPU_BUSP, "dout_dpu_busp", "mout_dpu_user",
+ CLK_CON_DIV_DIV_CLK_DPU_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock dpu_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_DPU_CMU_DPU_PCLK, "gout_dpu_cmu_dpu_pclk",
+ "dout_dpu_busp", CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_DECON0_ACLK, "gout_dpu_decon0_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_ACLK_DECON0, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_DMA_ACLK, "gout_dpu_dma_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_ACLK_DMA, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_DPP_ACLK, "gout_dpu_dpp_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_ACLK_DPP, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_PPMU_ACLK, "gout_dpu_ppmu_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_PPMU_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_PPMU_PCLK, "gout_dpu_ppmu_pclk", "dout_dpu_busp",
+ CLK_CON_GAT_GOUT_DPU_PPMU_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_SMMU_CLK, "gout_dpu_smmu_clk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_SMMU_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_SYSREG_PCLK, "gout_dpu_sysreg_pclk", "dout_dpu_busp",
+ CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info dpu_cmu_info __initconst = {
+ .mux_clks = dpu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(dpu_mux_clks),
+ .div_clks = dpu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(dpu_div_clks),
+ .gate_clks = dpu_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(dpu_gate_clks),
+ .nr_clk_ids = DPU_NR_CLK,
+ .clk_regs = dpu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(dpu_clk_regs),
+ .clk_name = "dout_dpu",
+};
+
+/* ---- platform_driver ----------------------------------------------------- */
+
+static int __init exynos850_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ info = of_device_get_match_data(dev);
+ exynos850_init_clocks(np, info->clk_regs, info->nr_clk_regs);
+ samsung_cmu_register_one(np, info);
+
+ /* Keep bus clock running, so it's possible to access CMU registers */
+ if (info->clk_name) {
+ struct clk *bus_clk;
+
+ bus_clk = clk_get(dev, info->clk_name);
+ if (IS_ERR(bus_clk)) {
+ pr_err("%s: could not find bus clock %s; err = %ld\n",
+ __func__, info->clk_name, PTR_ERR(bus_clk));
+ } else {
+ clk_prepare_enable(bus_clk);
+ }
+ }
+
+ return 0;
+}
+
+/* CMUs which belong to Power Domains and need runtime PM to be implemented */
+static const struct of_device_id exynos850_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos850-cmu-hsi",
+ .data = &hsi_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-peri",
+ .data = &peri_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-core",
+ .data = &core_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-dpu",
+ .data = &dpu_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynos850_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos850-cmu",
+ .of_match_table = exynos850_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos850_cmu_probe,
+};
+
+static int __init exynos850_cmu_init(void)
+{
+ return platform_driver_register(&exynos850_cmu_driver);
+}
+core_initcall(exynos850_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 5873a9354b50..83d1b03647db 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -416,6 +416,186 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
};
/*
+ * PLL0822x Clock Type
+ */
+/* Maximum lock time can be 150 * PDIV cycles */
+#define PLL0822X_LOCK_FACTOR (150)
+
+#define PLL0822X_MDIV_MASK (0x3FF)
+#define PLL0822X_PDIV_MASK (0x3F)
+#define PLL0822X_SDIV_MASK (0x7)
+#define PLL0822X_MDIV_SHIFT (16)
+#define PLL0822X_PDIV_SHIFT (8)
+#define PLL0822X_SDIV_SHIFT (0)
+#define PLL0822X_LOCK_STAT_SHIFT (29)
+#define PLL0822X_ENABLE_SHIFT (31)
+
+static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con3;
+ u64 fvco = parent_rate;
+
+ pll_con3 = readl_relaxed(pll->con_reg);
+ mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
+ pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
+ sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
+
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
+
+ return (unsigned long)fvco;
+}
+
+static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ const struct samsung_pll_rate_table *rate;
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con3;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ /* Change PLL PMS values */
+ pll_con3 = readl_relaxed(pll->con_reg);
+ pll_con3 &= ~((PLL0822X_MDIV_MASK << PLL0822X_MDIV_SHIFT) |
+ (PLL0822X_PDIV_MASK << PLL0822X_PDIV_SHIFT) |
+ (PLL0822X_SDIV_MASK << PLL0822X_SDIV_SHIFT));
+ pll_con3 |= (rate->mdiv << PLL0822X_MDIV_SHIFT) |
+ (rate->pdiv << PLL0822X_PDIV_SHIFT) |
+ (rate->sdiv << PLL0822X_SDIV_SHIFT);
+
+ /* Set PLL lock time */
+ writel_relaxed(rate->pdiv * PLL0822X_LOCK_FACTOR,
+ pll->lock_reg);
+
+ /* Write PMS values */
+ writel_relaxed(pll_con3, pll->con_reg);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ if (pll_con3 & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll0822x_clk_ops = {
+ .recalc_rate = samsung_pll0822x_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll0822x_set_rate,
+ .enable = samsung_pll3xxx_enable,
+ .disable = samsung_pll3xxx_disable,
+};
+
+static const struct clk_ops samsung_pll0822x_clk_min_ops = {
+ .recalc_rate = samsung_pll0822x_recalc_rate,
+};
+
+/*
+ * PLL0831x Clock Type
+ */
+/* Maximum lock time can be 500 * PDIV cycles */
+#define PLL0831X_LOCK_FACTOR (500)
+
+#define PLL0831X_KDIV_MASK (0xFFFF)
+#define PLL0831X_MDIV_MASK (0x1FF)
+#define PLL0831X_PDIV_MASK (0x3F)
+#define PLL0831X_SDIV_MASK (0x7)
+#define PLL0831X_MDIV_SHIFT (16)
+#define PLL0831X_PDIV_SHIFT (8)
+#define PLL0831X_SDIV_SHIFT (0)
+#define PLL0831X_KDIV_SHIFT (0)
+#define PLL0831X_LOCK_STAT_SHIFT (29)
+#define PLL0831X_ENABLE_SHIFT (31)
+
+static unsigned long samsung_pll0831x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con3, pll_con5;
+ s16 kdiv;
+ u64 fvco = parent_rate;
+
+ pll_con3 = readl_relaxed(pll->con_reg);
+ pll_con5 = readl_relaxed(pll->con_reg + 8);
+ mdiv = (pll_con3 >> PLL0831X_MDIV_SHIFT) & PLL0831X_MDIV_MASK;
+ pdiv = (pll_con3 >> PLL0831X_PDIV_SHIFT) & PLL0831X_PDIV_MASK;
+ sdiv = (pll_con3 >> PLL0831X_SDIV_SHIFT) & PLL0831X_SDIV_MASK;
+ kdiv = (s16)((pll_con5 >> PLL0831X_KDIV_SHIFT) & PLL0831X_KDIV_MASK);
+
+ fvco *= (mdiv << 16) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= 16;
+
+ return (unsigned long)fvco;
+}
+
+static int samsung_pll0831x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate)
+{
+ const struct samsung_pll_rate_table *rate;
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con3, pll_con5;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ pll_con3 = readl_relaxed(pll->con_reg);
+ pll_con5 = readl_relaxed(pll->con_reg + 8);
+
+ /* Change PLL PMSK values */
+ pll_con3 &= ~((PLL0831X_MDIV_MASK << PLL0831X_MDIV_SHIFT) |
+ (PLL0831X_PDIV_MASK << PLL0831X_PDIV_SHIFT) |
+ (PLL0831X_SDIV_MASK << PLL0831X_SDIV_SHIFT));
+ pll_con3 |= (rate->mdiv << PLL0831X_MDIV_SHIFT) |
+ (rate->pdiv << PLL0831X_PDIV_SHIFT) |
+ (rate->sdiv << PLL0831X_SDIV_SHIFT);
+ pll_con5 &= ~(PLL0831X_KDIV_MASK << PLL0831X_KDIV_SHIFT);
+ /*
+ * kdiv is 16-bit 2's complement (s16), but stored as unsigned int.
+ * Cast it to u16 to avoid leading 0xffff's in case of negative value.
+ */
+ pll_con5 |= ((u16)rate->kdiv << PLL0831X_KDIV_SHIFT);
+
+ /* Set PLL lock time */
+ writel_relaxed(rate->pdiv * PLL0831X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Write PMSK values */
+ writel_relaxed(pll_con3, pll->con_reg);
+ writel_relaxed(pll_con5, pll->con_reg + 8);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ if (pll_con3 & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll0831x_clk_ops = {
+ .recalc_rate = samsung_pll0831x_recalc_rate,
+ .set_rate = samsung_pll0831x_set_rate,
+ .round_rate = samsung_pll_round_rate,
+ .enable = samsung_pll3xxx_enable,
+ .disable = samsung_pll3xxx_disable,
+};
+
+static const struct clk_ops samsung_pll0831x_clk_min_ops = {
+ .recalc_rate = samsung_pll0831x_recalc_rate,
+};
+
+/*
* PLL45xx Clock Type
*/
#define PLL4502_LOCK_FACTOR 400
@@ -1296,6 +1476,14 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll35xx_clk_ops;
break;
+ case pll_0822x:
+ pll->enable_offs = PLL0822X_ENABLE_SHIFT;
+ pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
+ if (!pll->rate_table)
+ init.ops = &samsung_pll0822x_clk_min_ops;
+ else
+ init.ops = &samsung_pll0822x_clk_ops;
+ break;
case pll_4500:
init.ops = &samsung_pll45xx_clk_min_ops;
break;
@@ -1316,6 +1504,14 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll36xx_clk_ops;
break;
+ case pll_0831x:
+ pll->enable_offs = PLL0831X_ENABLE_SHIFT;
+ pll->lock_offs = PLL0831X_LOCK_STAT_SHIFT;
+ if (!pll->rate_table)
+ init.ops = &samsung_pll0831x_clk_min_ops;
+ else
+ init.ops = &samsung_pll0831x_clk_ops;
+ break;
case pll_6552:
case pll_6552_s3c2416:
init.ops = &samsung_pll6552_clk_ops;
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 79e41c226b90..a739f2b7ae80 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -36,6 +36,8 @@ enum samsung_pll_type {
pll_1451x,
pll_1452x,
pll_1460x,
+ pll_0822x,
+ pll_0831x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index a7827a120695..b31c00ea331f 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -63,15 +63,13 @@ static struct syscore_ops s5pv210_audss_clk_syscore_ops = {
static int s5pv210_audss_clk_probe(struct platform_device *pdev)
{
int i, ret = 0;
- struct resource *res;
const char *mout_audss_p[2];
const char *mout_i2s_p[3];
const char *hclk_p;
struct clk_hw **clk_table;
struct clk *hclk, *pll_ref, *pll_in, *cdclk, *sclk_audio;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 1949ae7851b2..336243c6f120 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -378,6 +378,8 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
samsung_clk_extended_sleep_init(reg_base,
cmu->clk_regs, cmu->nr_clk_regs,
cmu->suspend_regs, cmu->nr_suspend_regs);
+ if (cmu->cpu_clks)
+ samsung_clk_register_cpu(ctx, cmu->cpu_clks, cmu->nr_cpu_clks);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index c1e1a6b2f499..26499e97275b 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -271,6 +271,27 @@ struct samsung_pll_clock {
__PLL(_typ, _id, _name, _pname, CLK_GET_RATE_NOCACHE, _lock, \
_con, _rtable)
+struct samsung_cpu_clock {
+ unsigned int id;
+ const char *name;
+ unsigned int parent_id;
+ unsigned int alt_parent_id;
+ unsigned long flags;
+ int offset;
+ const struct exynos_cpuclk_cfg_data *cfg;
+};
+
+#define CPU_CLK(_id, _name, _pid, _apid, _flags, _offset, _cfg) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_id = _pid, \
+ .alt_parent_id = _apid, \
+ .flags = _flags, \
+ .offset = _offset, \
+ .cfg = _cfg, \
+ }
+
struct samsung_clock_reg_cache {
struct list_head node;
void __iomem *reg_base;
@@ -301,6 +322,9 @@ struct samsung_cmu_info {
unsigned int nr_fixed_factor_clks;
/* total number of clocks with IDs assigned*/
unsigned int nr_clk_ids;
+ /* list of cpu clocks and respective count */
+ const struct samsung_cpu_clock *cpu_clks;
+ unsigned int nr_cpu_clks;
/* list and number of clocks registers */
const unsigned long *clk_regs;
@@ -350,6 +374,8 @@ extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_list,
unsigned int nr_clk, void __iomem *base);
+extern void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
+ const struct samsung_cpu_clock *list, unsigned int nr_clk);
extern struct samsung_clk_provider __init *samsung_cmu_register_one(
struct device_node *,
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index cd46d8853876..e76e1676f0f0 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -71,6 +71,7 @@ config SUN8I_A33_CCU
config SUN8I_A83T_CCU
bool "Support for the Allwinner A83T CCU"
default MACH_SUN8I
+ depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
bool "Support for the Allwinner H3 CCU"
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index f32366d9336e..bd9a8782fec3 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1464,7 +1464,7 @@ static void __init sun4i_ccu_init(struct device_node *node,
val &= ~GENMASK(7, 6);
writel(val | (2 << 6), reg + SUN4I_AHB_REG);
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun4i_a10_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index a56142b90993..804729e0a208 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -196,7 +196,7 @@ static int sun50i_a100_r_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_r_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a100_r_ccu_desc);
}
static const struct of_device_id sun50i_a100_r_ccu_ids[] = {
@@ -208,6 +208,7 @@ static struct platform_driver sun50i_a100_r_ccu_driver = {
.probe = sun50i_a100_r_ccu_probe,
.driver = {
.name = "sun50i-a100-r-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_a100_r_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 81b48c73d389..1d475d5a3d91 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -1247,7 +1247,7 @@ static int sun50i_a100_ccu_probe(struct platform_device *pdev)
writel(val, reg + sun50i_a100_usb2_clk_regs[i]);
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a100_ccu_desc);
if (ret)
return ret;
@@ -1270,6 +1270,7 @@ static struct platform_driver sun50i_a100_ccu_driver = {
.probe = sun50i_a100_ccu_probe,
.driver = {
.name = "sun50i-a100-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_a100_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 149cfde817cb..a8c5a92b7d0c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -938,13 +938,11 @@ static struct ccu_mux_nb sun50i_a64_cpu_nb = {
static int sun50i_a64_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -955,7 +953,7 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
@@ -978,6 +976,7 @@ static struct platform_driver sun50i_a64_ccu_driver = {
.probe = sun50i_a64_ccu_probe,
.driver = {
.name = "sun50i-a64-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_a64_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index f8909a7ed553..f30d7eb5424d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -232,7 +232,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
return;
}
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index bff446b78290..e5672c10d065 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1183,13 +1183,11 @@ static const u32 usb2_clk_regs[] = {
static int sun50i_h6_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -1240,7 +1238,7 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
}
static const struct of_device_id sun50i_h6_ccu_ids[] = {
@@ -1252,6 +1250,7 @@ static struct platform_driver sun50i_h6_ccu_driver = {
.probe = sun50i_h6_ccu_probe,
.driver = {
.name = "sun50i-h6-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_h6_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 225307305880..22eb18079a15 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1141,9 +1141,7 @@ static void __init sun50i_h616_ccu_setup(struct device_node *node)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- i = sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
- if (i)
- pr_err("%pOF: probing clocks fails: %d\n", node, i);
+ of_sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
}
CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu",
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index b78e9b507c1c..1f4bc0e773a7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -1012,7 +1012,7 @@ static void __init sun5i_ccu_init(struct device_node *node,
val &= ~GENMASK(7, 6);
writel(val | (2 << 6), reg + SUN5I_AHB_REG);
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun5i_a10s_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 9b40d53266a3..3df5c0b41580 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -1257,7 +1257,7 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val |= 0x3 << 12;
writel(val, reg + SUN6I_A31_AHB1_REG);
- sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&sun6i_a31_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 103aa504f6c8..577bb235d658 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -745,7 +745,7 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A23_PLL_MIPI_REG);
- sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a23_ccu, "allwinner,sun8i-a23-ccu",
sun8i_a23_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 91838cd11037..8f65cd03f5ac 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -805,7 +805,7 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
- sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 2b434521c5cc..3c310aea8cfa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -887,12 +887,10 @@ static void sun8i_a83t_cpu_pll_fixup(void __iomem *reg)
static int sun8i_a83t_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -906,7 +904,7 @@ static int sun8i_a83t_ccu_probe(struct platform_device *pdev)
sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C0CPUX_REG);
sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C1CPUX_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_a83t_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a83t_ccu_desc);
}
static const struct of_device_id sun8i_a83t_ccu_ids[] = {
@@ -918,6 +916,7 @@ static struct platform_driver sun8i_a83t_ccu_driver = {
.probe = sun8i_a83t_ccu_probe,
.driver = {
.name = "sun8i-a83t-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun8i_a83t_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 524f33275bc7..573b5051d305 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -280,7 +280,6 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
- struct resource *res;
struct clk *bus_clk, *mod_clk;
struct reset_control *rstc;
void __iomem *reg;
@@ -291,8 +290,7 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
if (!ccu_desc)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -342,7 +340,7 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 7e629a4493af..d2fc2903787d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -1154,7 +1154,7 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
val &= ~GENMASK(19, 16);
writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 4c8c491b87c2..9e754d1f754a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -265,7 +265,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
return;
}
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 84153418453f..8bb18d9add05 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -1307,14 +1307,12 @@ static struct regmap_config sun8i_r40_ccu_regmap_config = {
static int sun8i_r40_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
struct regmap *regmap;
void __iomem *reg;
u32 val;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -1346,7 +1344,7 @@ static int sun8i_r40_ccu_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_r40_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_r40_ccu_desc);
if (ret)
return ret;
@@ -1369,6 +1367,7 @@ static struct platform_driver sun8i_r40_ccu_driver = {
.probe = sun8i_r40_ccu_probe,
.driver = {
.name = "sun8i-r40-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun8i_r40_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index f49724a22540..ce150f83ab54 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -822,7 +822,7 @@ static void __init sun8i_v3_v3s_ccu_init(struct device_node *node,
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
- sunxi_ccu_probe(node, reg, ccu_desc);
+ of_sunxi_ccu_probe(node, reg, ccu_desc);
}
static void __init sun8i_v3s_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 6616e8114f62..3cde2610f467 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -203,14 +203,12 @@ static const struct sunxi_ccu_desc sun9i_a80_de_clk_desc = {
static int sun9i_a80_de_clk_probe(struct platform_device *pdev)
{
- struct resource *res;
struct clk *bus_clk;
struct reset_control *rstc;
void __iomem *reg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -246,8 +244,7 @@ static int sun9i_a80_de_clk_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg,
- &sun9i_a80_de_clk_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_de_clk_desc);
if (ret)
goto err_assert_reset;
@@ -269,6 +266,7 @@ static struct platform_driver sun9i_a80_de_clk_driver = {
.probe = sun9i_a80_de_clk_probe,
.driver = {
.name = "sun9i-a80-de-clks",
+ .suppress_bind_attrs = true,
.of_match_table = sun9i_a80_de_clk_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index 4b4a507d04ed..0740e8978ae8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -92,13 +92,11 @@ static const struct sunxi_ccu_desc sun9i_a80_usb_clk_desc = {
static int sun9i_a80_usb_clk_probe(struct platform_device *pdev)
{
- struct resource *res;
struct clk *bus_clk;
void __iomem *reg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -117,8 +115,7 @@ static int sun9i_a80_usb_clk_probe(struct platform_device *pdev)
return ret;
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg,
- &sun9i_a80_usb_clk_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_usb_clk_desc);
if (ret)
goto err_disable_clk;
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index ef29582676f6..d416af29e0d3 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1213,12 +1213,10 @@ static void sun9i_a80_cpu_pll_fixup(void __iomem *reg)
static int sun9i_a80_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -1231,7 +1229,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C0CPUX_REG);
sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C1CPUX_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun9i_a80_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_ccu_desc);
}
static const struct of_device_id sun9i_a80_ccu_ids[] = {
@@ -1243,6 +1241,7 @@ static struct platform_driver sun9i_a80_ccu_driver = {
.probe = sun9i_a80_ccu_probe,
.driver = {
.name = "sun9i-a80-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun9i_a80_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 7ecc3a5a5b5e..61ad7ee91c11 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -538,7 +538,7 @@ static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
val &= ~GENMASK(19, 16);
writel(val | (3 << 16), reg + SUNIV_PLL_AUDIO_REG);
- sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&suniv_pll_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 2e20e650b6c0..31af8b6b5286 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -14,7 +15,11 @@
#include "ccu_gate.h"
#include "ccu_reset.h"
-static DEFINE_SPINLOCK(ccu_lock);
+struct sunxi_ccu {
+ const struct sunxi_ccu_desc *desc;
+ spinlock_t lock;
+ struct ccu_reset reset;
+};
void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
{
@@ -79,12 +84,17 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
&pll_nb->clk_nb);
}
-int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
- const struct sunxi_ccu_desc *desc)
+static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
{
struct ccu_reset *reset;
int i, ret;
+ ccu->desc = desc;
+
+ spin_lock_init(&ccu->lock);
+
for (i = 0; i < desc->num_ccu_clks; i++) {
struct ccu_common *cclk = desc->ccu_clks[i];
@@ -92,7 +102,7 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
continue;
cclk->base = reg;
- cclk->lock = &ccu_lock;
+ cclk->lock = &ccu->lock;
}
for (i = 0; i < desc->hw_clks->num ; i++) {
@@ -103,7 +113,10 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
continue;
name = hw->init->name;
- ret = of_clk_hw_register(node, hw);
+ if (dev)
+ ret = clk_hw_register(dev, hw);
+ else
+ ret = of_clk_hw_register(node, hw);
if (ret) {
pr_err("Couldn't register clock %d - %s\n", i, name);
goto err_clk_unreg;
@@ -115,29 +128,22 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
if (ret)
goto err_clk_unreg;
- reset = kzalloc(sizeof(*reset), GFP_KERNEL);
- if (!reset) {
- ret = -ENOMEM;
- goto err_alloc_reset;
- }
-
+ reset = &ccu->reset;
reset->rcdev.of_node = node;
reset->rcdev.ops = &ccu_reset_ops;
- reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.owner = dev ? dev->driver->owner : THIS_MODULE;
reset->rcdev.nr_resets = desc->num_resets;
reset->base = reg;
- reset->lock = &ccu_lock;
+ reset->lock = &ccu->lock;
reset->reset_map = desc->resets;
ret = reset_controller_register(&reset->rcdev);
if (ret)
- goto err_of_clk_unreg;
+ goto err_del_provider;
return 0;
-err_of_clk_unreg:
- kfree(reset);
-err_alloc_reset:
+err_del_provider:
of_clk_del_provider(node);
err_clk_unreg:
while (--i >= 0) {
@@ -149,3 +155,59 @@ err_clk_unreg:
}
return ret;
}
+
+static void devm_sunxi_ccu_release(struct device *dev, void *res)
+{
+ struct sunxi_ccu *ccu = res;
+ const struct sunxi_ccu_desc *desc = ccu->desc;
+ int i;
+
+ reset_controller_unregister(&ccu->reset.rcdev);
+ of_clk_del_provider(dev->of_node);
+
+ for (i = 0; i < desc->hw_clks->num; i++) {
+ struct clk_hw *hw = desc->hw_clks->hws[i];
+
+ if (!hw)
+ continue;
+ clk_hw_unregister(hw);
+ }
+}
+
+int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
+{
+ struct sunxi_ccu *ccu;
+ int ret;
+
+ ccu = devres_alloc(devm_sunxi_ccu_release, sizeof(*ccu), GFP_KERNEL);
+ if (!ccu)
+ return -ENOMEM;
+
+ ret = sunxi_ccu_probe(ccu, dev, dev->of_node, reg, desc);
+ if (ret) {
+ devres_free(ccu);
+ return ret;
+ }
+
+ devres_add(dev, ccu);
+
+ return 0;
+}
+
+void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
+{
+ struct sunxi_ccu *ccu;
+ int ret;
+
+ ccu = kzalloc(sizeof(*ccu), GFP_KERNEL);
+ if (!ccu)
+ return;
+
+ ret = sunxi_ccu_probe(ccu, NULL, node, reg, desc);
+ if (ret) {
+ pr_err("%pOF: probing clocks failed: %d\n", node, ret);
+ kfree(ccu);
+ }
+}
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 04e7a12200a2..98a1834b58bb 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -63,7 +63,9 @@ struct ccu_pll_nb {
int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
-int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
- const struct sunxi_ccu_desc *desc);
+int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc);
+void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc);
#endif /* _COMMON_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index f165395effb5..e31efc509b3d 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -40,7 +40,6 @@ struct ccu_mux_internal {
_SUNXI_CCU_MUX_TABLE(_shift, _width, NULL)
struct ccu_mux {
- u16 reg;
u32 enable;
struct ccu_mux_internal mux;
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index f9d715ec9908..51800289ada9 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -88,14 +88,12 @@ CLK_OF_DECLARE_DRIVER(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk",
static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
void __iomem *reg;
if (!np)
return -ENODEV;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 4c75b0770c74..e4cf1180b088 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -40,7 +40,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
const struct gates_data *data;
const char *clk_parent;
const char *clk_name;
- struct resource *r;
void __iomem *reg;
int ngates;
int i;
@@ -53,8 +52,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
index 10f70c35c265..f80c67bafe38 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -32,12 +32,10 @@ static int sun6i_a31_apb0_clk_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const char *clk_name = np->name;
const char *clk_parent;
- struct resource *r;
void __iomem *reg;
struct clk *clk;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 54babc2b4b9e..9f9a2cf54f41 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -71,12 +71,10 @@ static DEFINE_SPINLOCK(sun6i_ar100_lock);
static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
void __iomem *reg;
struct clk *clk;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index fc5d6e3b77d1..f605ecca879f 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -87,12 +87,10 @@ CLK_OF_DECLARE_DRIVER(sun8i_a23_apb0, "allwinner,sun8i-a23-apb0-clk",
static int sun8i_a23_apb0_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
void __iomem *reg;
struct clk *clk;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index 53fd29002401..c29b83df403e 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -8,6 +8,9 @@ obj-y += clk-prcc.o
obj-y += clk-prcmu.o
obj-y += clk-sysctrl.o
+# Reset control
+obj-y += reset-prcc.o
+
# Clock definitions
obj-y += u8500_of_clk.o
diff --git a/drivers/clk/ux500/prcc.h b/drivers/clk/ux500/prcc.h
new file mode 100644
index 000000000000..5b6774d79506
--- /dev/null
+++ b/drivers/clk/ux500/prcc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PRCC_H
+#define __PRCC_H
+
+#define PRCC_NUM_PERIPH_CLUSTERS 6
+#define PRCC_PERIPHS_PER_CLUSTER 32
+
+/* CLKRST4 is missing making it hard to index things */
+enum clkrst_index {
+ CLKRST1_INDEX = 0,
+ CLKRST2_INDEX,
+ CLKRST3_INDEX,
+ CLKRST5_INDEX,
+ CLKRST6_INDEX,
+ CLKRST_MAX,
+};
+
+#endif
diff --git a/drivers/clk/ux500/reset-prcc.c b/drivers/clk/ux500/reset-prcc.c
new file mode 100644
index 000000000000..fcd5d042806a
--- /dev/null
+++ b/drivers/clk/ux500/reset-prcc.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Reset controller portions for the U8500 PRCC
+ * Copyright (C) 2021 Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/reset-controller.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+
+#include "prcc.h"
+#include "reset-prcc.h"
+
+#define to_u8500_prcc_reset(p) container_of((p), struct u8500_prcc_reset, rcdev)
+
+/* This macro flattens the 2-dimensional PRCC numberspace */
+#define PRCC_RESET_LINE(prcc_num, bit) \
+ (((prcc_num) * PRCC_PERIPHS_PER_CLUSTER) + (bit))
+
+/*
+ * Reset registers in each PRCC - the reset lines are active low
+ * so what you need to do is write a bit for the peripheral you
+ * want to put into reset into the CLEAR register, this will assert
+ * the reset by pulling the line low. SET take the device out of
+ * reset. The status reflects the actual state of the line.
+ */
+#define PRCC_K_SOFTRST_SET 0x018
+#define PRCC_K_SOFTRST_CLEAR 0x01c
+#define PRCC_K_RST_STATUS 0x020
+
+static int prcc_num_to_index(unsigned int num)
+{
+ switch (num) {
+ case 1:
+ return CLKRST1_INDEX;
+ case 2:
+ return CLKRST2_INDEX;
+ case 3:
+ return CLKRST3_INDEX;
+ case 5:
+ return CLKRST5_INDEX;
+ case 6:
+ return CLKRST6_INDEX;
+ }
+ return -EINVAL;
+}
+
+static void __iomem *u8500_prcc_reset_base(struct u8500_prcc_reset *ur,
+ unsigned long id)
+{
+ unsigned int prcc_num, index;
+
+ prcc_num = id / PRCC_PERIPHS_PER_CLUSTER;
+ index = prcc_num_to_index(prcc_num);
+
+ if (index > ARRAY_SIZE(ur->base))
+ return NULL;
+
+ return ur->base[index];
+}
+
+static int u8500_prcc_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+
+ pr_debug("PRCC cycle reset id %lu, bit %u\n", id, bit);
+
+ /*
+ * Assert reset and then release it. The one microsecond
+ * delay is found in the vendor reference code.
+ */
+ writel(BIT(bit), base + PRCC_K_SOFTRST_CLEAR);
+ udelay(1);
+ writel(BIT(bit), base + PRCC_K_SOFTRST_SET);
+ udelay(1);
+
+ return 0;
+}
+
+static int u8500_prcc_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+
+ pr_debug("PRCC assert reset id %lu, bit %u\n", id, bit);
+ writel(BIT(bit), base + PRCC_K_SOFTRST_CLEAR);
+
+ return 0;
+}
+
+static int u8500_prcc_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+
+ pr_debug("PRCC deassert reset id %lu, bit %u\n", id, bit);
+ writel(BIT(bit), base + PRCC_K_SOFTRST_SET);
+
+ return 0;
+}
+
+static int u8500_prcc_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+ u32 val;
+
+ pr_debug("PRCC check status on reset line id %lu, bit %u\n", id, bit);
+ val = readl(base + PRCC_K_RST_STATUS);
+
+ /* Active low so return the inverse value of the bit */
+ return !(val & BIT(bit));
+}
+
+static const struct reset_control_ops u8500_prcc_reset_ops = {
+ .reset = u8500_prcc_reset,
+ .assert = u8500_prcc_reset_assert,
+ .deassert = u8500_prcc_reset_deassert,
+ .status = u8500_prcc_reset_status,
+};
+
+static int u8500_prcc_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned int prcc_num, bit;
+
+ if (reset_spec->args_count != 2)
+ return -EINVAL;
+
+ prcc_num = reset_spec->args[0];
+ bit = reset_spec->args[1];
+
+ if (prcc_num != 1 && prcc_num != 2 && prcc_num != 3 &&
+ prcc_num != 5 && prcc_num != 6) {
+ pr_err("%s: invalid PRCC %d\n", __func__, prcc_num);
+ return -EINVAL;
+ }
+
+ pr_debug("located reset line %d at PRCC %d bit %d\n",
+ PRCC_RESET_LINE(prcc_num, bit), prcc_num, bit);
+
+ return PRCC_RESET_LINE(prcc_num, bit);
+}
+
+void u8500_prcc_reset_init(struct device_node *np, struct u8500_prcc_reset *ur)
+{
+ struct reset_controller_dev *rcdev = &ur->rcdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < CLKRST_MAX; i++) {
+ ur->base[i] = ioremap(ur->phy_base[i], SZ_4K);
+ if (!ur->base[i])
+ pr_err("PRCC failed to remap for reset base %d (%08x)\n",
+ i, ur->phy_base[i]);
+ }
+
+ rcdev->owner = THIS_MODULE;
+ rcdev->ops = &u8500_prcc_reset_ops;
+ rcdev->of_node = np;
+ rcdev->of_reset_n_cells = 2;
+ rcdev->of_xlate = u8500_prcc_reset_xlate;
+
+ ret = reset_controller_register(rcdev);
+ if (ret)
+ pr_err("PRCC failed to register reset controller\n");
+}
diff --git a/drivers/clk/ux500/reset-prcc.h b/drivers/clk/ux500/reset-prcc.h
new file mode 100644
index 000000000000..353c9719f2e6
--- /dev/null
+++ b/drivers/clk/ux500/reset-prcc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __RESET_PRCC_H
+#define __RESET_PRCC_H
+
+#include <linux/reset-controller.h>
+#include <linux/io.h>
+
+/**
+ * struct u8500_prcc_reset - U8500 PRCC reset controller state
+ * @rcdev: reset controller device
+ * @phy_base: the physical base address for each PRCC block
+ * @base: the remapped PRCC bases
+ */
+struct u8500_prcc_reset {
+ struct reset_controller_dev rcdev;
+ u32 phy_base[CLKRST_MAX];
+ void __iomem *base[CLKRST_MAX];
+};
+
+void u8500_prcc_reset_init(struct device_node *np, struct u8500_prcc_reset *ur);
+
+#endif
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 528c5bb397cc..e86ed2eec3fd 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -10,10 +10,10 @@
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include "clk.h"
-#define PRCC_NUM_PERIPH_CLUSTERS 6
-#define PRCC_PERIPHS_PER_CLUSTER 32
+#include "clk.h"
+#include "prcc.h"
+#include "reset-prcc.h"
static struct clk *prcmu_clk[PRCMU_NUM_CLKS];
static struct clk *prcc_pclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
@@ -46,16 +46,6 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
return PRCC_SHOW(clk_data, base, bit);
}
-/* CLKRST4 is missing making it hard to index things */
-enum clkrst_index {
- CLKRST1_INDEX = 0,
- CLKRST2_INDEX,
- CLKRST3_INDEX,
- CLKRST5_INDEX,
- CLKRST6_INDEX,
- CLKRST_MAX,
-};
-
static void u8500_clk_init(struct device_node *np)
{
struct prcmu_fw_version *fw_version;
@@ -63,8 +53,18 @@ static void u8500_clk_init(struct device_node *np)
const char *sgaclk_parent = NULL;
struct clk *clk, *rtc_clk, *twd_clk;
u32 bases[CLKRST_MAX];
+ struct u8500_prcc_reset *rstc;
int i;
+ /*
+ * We allocate the reset controller here so that we can fill in the
+ * base addresses properly and pass to the reset controller init
+ * function later on.
+ */
+ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return;
+
for (i = 0; i < ARRAY_SIZE(bases); i++) {
struct resource r;
@@ -73,6 +73,7 @@ static void u8500_clk_init(struct device_node *np)
pr_err("failed to get CLKRST %d base address\n",
i + 1);
bases[i] = r.start;
+ rstc->phy_base[i] = r.start;
}
/* Clock sources */
@@ -563,6 +564,9 @@ static void u8500_clk_init(struct device_node *np)
if (of_node_name_eq(child, "smp-twd-clock"))
of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
+
+ if (of_node_name_eq(child, "prcc-reset-controller"))
+ u8500_prcc_reset_init(child, rstc);
}
}
CLK_OF_DECLARE(u8500_clks, "stericsson,u8500-clks", u8500_clk_init);
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index 481de5657d85..403f164954c2 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -2,8 +2,9 @@
menu "Clock driver for ARM Reference designs"
depends on HAS_IOMEM
+ depends on ARM || ARM64 || COMPILE_TEST
-config ICST
+config CLK_ICST
bool "Clock driver for ARM Reference designs ICST"
select REGMAP_MMIO
help
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index 4ff563e6e3a0..e7d05308e4f0 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
# Makefile for Versatile-specific clocks
-obj-$(CONFIG_ICST) += icst.o clk-icst.o clk-versatile.o
+obj-$(CONFIG_CLK_ICST) += icst.o clk-icst.o clk-versatile.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_CLK_SP810) += clk-sp810.o
obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index fdd6aa3cb1fc..77fd0ecaf155 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -501,7 +501,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
return;
}
- if (of_property_read_u32(np, "vco-offset", &icst_desc.vco_offset)) {
+ if (of_property_read_u32(np, "reg", &icst_desc.vco_offset) &&
+ of_property_read_u32(np, "vco-offset", &icst_desc.vco_offset)) {
pr_err("no VCO register offset for ICST clock\n");
return;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 0f5e3983951a..f65e31bab9ae 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -24,6 +24,7 @@ config I8253_LOCK
config OMAP_DM_TIMER
bool
+ select TIMER_OF
config CLKBLD_I8253
def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
@@ -418,12 +419,14 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
+ depends on ARCH_EXYNOS || COMPILE_TEST
help
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on HAS_IOMEM
+ depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index de93dd1a8c7b..cb18524cc13d 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -225,7 +225,7 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
- write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH);
sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
@@ -245,7 +245,7 @@ static void arc_timer_event_setup(unsigned int cycles)
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
- write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH);
}
@@ -294,7 +294,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* explicitly clears IP bit
* 2. Re-arm interrupt if periodic by writing to IE bit [0]
*/
- write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | ARC_TIMER_CTRL_NH);
evt->event_handler(evt);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index be6d741d404c..9a04eacc4412 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -44,23 +44,29 @@
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
-#define CNTVCT_LO 0x08
-#define CNTVCT_HI 0x0c
+#define CNTVCT_LO 0x00
+#define CNTPCT_LO 0x08
#define CNTFRQ 0x10
-#define CNTP_TVAL 0x28
+#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
-#define CNTV_TVAL 0x38
+#define CNTV_CVAL_LO 0x30
#define CNTV_CTL 0x3c
-static unsigned arch_timers_present __initdata;
+/*
+ * The minimum amount of time a generic counter is guaranteed to not roll over
+ * (40 years)
+ */
+#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static void __iomem *arch_counter_base __ro_after_init;
+static unsigned arch_timers_present __initdata;
struct arch_timer {
void __iomem *base;
struct clock_event_device evt;
};
+static struct arch_timer *arch_timer_mem __ro_after_init;
+
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init;
@@ -96,32 +102,57 @@ static int __init early_evtstrm_cfg(char *buf)
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
/*
+ * Makes an educated guess at a valid counter width based on the Generic Timer
+ * specification. Of note:
+ * 1) the system counter is at least 56 bits wide
+ * 2) a roll-over time of not less than 40 years
+ *
+ * See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
+ */
+static int arch_counter_get_width(void)
+{
+ u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
+
+ /* guarantee the returned width is within the valid range */
+ return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
+}
+
+/*
* Architected system timer support.
*/
static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
+void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
struct clock_event_device *clk)
{
if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- writel_relaxed(val, timer->base + CNTP_CTL);
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- writel_relaxed(val, timer->base + CNTP_TVAL);
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- writel_relaxed(val, timer->base + CNTV_CTL);
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- writel_relaxed(val, timer->base + CNTV_TVAL);
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
break;
+ default:
+ BUILD_BUG();
}
} else {
arch_timer_reg_write_cp15(access, reg, val);
@@ -140,9 +171,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTP_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- val = readl_relaxed(timer->base + CNTP_TVAL);
- break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
@@ -150,9 +180,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTV_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- val = readl_relaxed(timer->base + CNTV_TVAL);
- break;
+ default:
+ BUILD_BUG();
}
} else {
val = arch_timer_reg_read_cp15(access, reg);
@@ -205,13 +234,11 @@ static struct clocksource clocksource_counter = {
.id = CSID_ARM_ARCH_COUNTER,
.rating = 400,
.read = arch_counter_read,
- .mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter __ro_after_init = {
.read = arch_counter_read_cc,
- .mask = CLOCKSOURCE_MASK(56),
};
struct ate_acpi_oem_info {
@@ -239,16 +266,6 @@ struct ate_acpi_oem_info {
_new; \
})
-static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
-{
- return __fsl_a008585_read_reg(cntp_tval_el0);
-}
-
-static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
-{
- return __fsl_a008585_read_reg(cntv_tval_el0);
-}
-
static u64 notrace fsl_a008585_read_cntpct_el0(void)
{
return __fsl_a008585_read_reg(cntpct_el0);
@@ -285,16 +302,6 @@ static u64 notrace fsl_a008585_read_cntvct_el0(void)
_new; \
})
-static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
-{
- return __hisi_161010101_read_reg(cntp_tval_el0);
-}
-
-static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
-{
- return __hisi_161010101_read_reg(cntv_tval_el0);
-}
-
static u64 notrace hisi_161010101_read_cntpct_el0(void)
{
return __hisi_161010101_read_reg(cntpct_el0);
@@ -379,16 +386,6 @@ static u64 notrace sun50i_a64_read_cntvct_el0(void)
{
return __sun50i_a64_read_reg(cntvct_el0);
}
-
-static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
-{
- return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
-}
-
-static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
-{
- return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
-}
#endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
@@ -397,7 +394,7 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
-static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
+static void erratum_set_next_event_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
@@ -418,17 +415,17 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
-static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
+static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
-static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
+static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -438,12 +435,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "fsl,erratum-a008585",
.desc = "Freescale erratum a005858",
- .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
- .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_HISILICON_ERRATUM_161010101
@@ -451,23 +446,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "hisilicon,erratum-161010101",
.desc = "HiSilicon erratum 161010101",
- .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
- .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
{
.match_type = ate_match_acpi_oem_info,
.id = hisi_161010101_oem_info,
.desc = "HiSilicon erratum 161010101",
- .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
- .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
@@ -484,12 +475,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "allwinner,erratum-unknown1",
.desc = "Allwinner erratum UNKNOWN1",
- .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
- .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1418040
@@ -727,10 +716,18 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
+ u64 cnt;
+
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
+
+ if (access == ARCH_TIMER_PHYS_ACCESS)
+ cnt = __arch_counter_get_cntpct();
+ else
+ cnt = __arch_counter_get_cntvct();
+
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
@@ -748,23 +745,79 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
+static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
+{
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = readl_relaxed(t->base + offset_lo + 4);
+ cnt_lo = readl_relaxed(t->base + offset_lo);
+ tmp_hi = readl_relaxed(t->base + offset_lo + 4);
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static __always_inline void set_next_event_mem(const int access, unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
+ cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
+ else
+ cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
+
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+}
+
static int arch_timer_set_next_event_virt_mem(unsigned long evt,
struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
+ set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys_mem(unsigned long evt,
struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
+ set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
return 0;
}
+static u64 __arch_timer_check_delta(void)
+{
+#ifdef CONFIG_ARM64
+ const struct midr_range broken_cval_midrs[] = {
+ /*
+ * XGene-1 implements CVAL in terms of TVAL, meaning
+ * that the maximum timer range is 32bit. Shame on them.
+ */
+ MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+ APM_CPU_PART_POTENZA)),
+ {},
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits");
+ return CLOCKSOURCE_MASK(32);
+ }
+#endif
+ return CLOCKSOURCE_MASK(arch_counter_get_width());
+}
+
static void __arch_timer_setup(unsigned type,
struct clock_event_device *clk)
{
+ u64 max_delta;
+
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
@@ -796,6 +849,7 @@ static void __arch_timer_setup(unsigned type,
}
clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -812,11 +866,13 @@ static void __arch_timer_setup(unsigned type,
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
+
+ max_delta = CLOCKSOURCE_MASK(56);
}
clk->set_state_shutdown(clk);
- clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
+ clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
}
static void arch_timer_evtstrm_enable(int divider)
@@ -986,15 +1042,7 @@ bool arch_timer_evtstrm_available(void)
static u64 arch_counter_get_cntvct_mem(void)
{
- u32 vct_lo, vct_hi, tmp_hi;
-
- do {
- vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
- vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
- tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
- } while (vct_hi != tmp_hi);
-
- return ((u64) vct_hi << 32) | vct_lo;
+ return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
}
static struct arch_timer_kvm_info arch_timer_kvm_info;
@@ -1007,6 +1055,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
static void __init arch_counter_register(unsigned type)
{
u64 start_count;
+ int width;
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
@@ -1031,6 +1080,10 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ width = arch_counter_get_width();
+ clocksource_counter.mask = CLOCKSOURCE_MASK(width);
+ cyclecounter.mask = CLOCKSOURCE_MASK(width);
+
if (!arch_counter_suspend_stop)
clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
@@ -1040,8 +1093,7 @@ static void __init arch_counter_register(unsigned type)
timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count);
- /* 56 bits minimum, so we assume worst case rollover */
- sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
+ sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
@@ -1182,25 +1234,25 @@ static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
{
int ret;
irq_handler_t func;
- struct arch_timer *t;
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
+ arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
+ if (!arch_timer_mem)
return -ENOMEM;
- t->base = base;
- t->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
+ arch_timer_mem->base = base;
+ arch_timer_mem->evt.irq = irq;
+ __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
if (arch_timer_mem_use_virtual)
func = arch_timer_handler_virt_mem;
else
func = arch_timer_handler_phys_mem;
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
+ ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
if (ret) {
pr_err("Failed to request mem timer irq\n");
- kfree(t);
+ kfree(arch_timer_mem);
+ arch_timer_mem = NULL;
}
return ret;
@@ -1458,7 +1510,6 @@ arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
return ret;
}
- arch_counter_base = base;
arch_timers_present |= ARCH_TIMER_TYPE_MEM;
return 0;
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c51c5ed15aa7..1767f8bf2013 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -13,10 +13,12 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
#include <asm/sbi.h>
#include <asm/timex.h>
@@ -79,6 +81,13 @@ static int riscv_timer_dying_cpu(unsigned int cpu)
return 0;
}
+void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
+{
+ *mult = riscv_clocksource.mult;
+ *shift = riscv_clocksource.shift;
+}
+EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
+
/* called directly from the low-level interrupt handler */
static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
diff --git a/drivers/comedi/drivers/dt9812.c b/drivers/comedi/drivers/dt9812.c
index 634f57730c1e..704b04d2980d 100644
--- a/drivers/comedi/drivers/dt9812.c
+++ b/drivers/comedi/drivers/dt9812.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include "../comedi_usb.h"
@@ -237,22 +238,42 @@ static int dt9812_read_info(struct comedi_device *dev,
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
+ struct dt9812_usb_cmd *cmd;
+ size_t tbuf_size;
int count, ret;
+ void *tbuf;
- cmd.cmd = cpu_to_le32(DT9812_R_FLASH_DATA);
- cmd.u.flash_data_info.address =
+ tbuf_size = max(sizeof(*cmd), buf_size);
+
+ tbuf = kzalloc(tbuf_size, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ cmd = tbuf;
+
+ cmd->cmd = cpu_to_le32(DT9812_R_FLASH_DATA);
+ cmd->u.flash_data_info.address =
cpu_to_le16(DT9812_DIAGS_BOARD_INFO_ADDR + offset);
- cmd.u.flash_data_info.numbytes = cpu_to_le16(buf_size);
+ cmd->u.flash_data_info.numbytes = cpu_to_le16(buf_size);
/* DT9812 only responds to 32 byte writes!! */
ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT);
if (ret)
- return ret;
+ goto out;
+
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
+ tbuf, buf_size, &count, DT9812_USB_TIMEOUT);
+ if (!ret) {
+ if (count == buf_size)
+ memcpy(buf, tbuf, buf_size);
+ else
+ ret = -EREMOTEIO;
+ }
+out:
+ kfree(tbuf);
- return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
- buf, buf_size, &count, DT9812_USB_TIMEOUT);
+ return ret;
}
static int dt9812_read_multiple_registers(struct comedi_device *dev,
@@ -261,22 +282,42 @@ static int dt9812_read_multiple_registers(struct comedi_device *dev,
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
+ struct dt9812_usb_cmd *cmd;
int i, count, ret;
+ size_t buf_size;
+ void *buf;
- cmd.cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG);
- cmd.u.read_multi_info.count = reg_count;
+ buf_size = max_t(size_t, sizeof(*cmd), reg_count);
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = buf;
+
+ cmd->cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG);
+ cmd->u.read_multi_info.count = reg_count;
for (i = 0; i < reg_count; i++)
- cmd.u.read_multi_info.address[i] = address[i];
+ cmd->u.read_multi_info.address[i] = address[i];
/* DT9812 only responds to 32 byte writes!! */
ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT);
if (ret)
- return ret;
+ goto out;
+
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
+ buf, reg_count, &count, DT9812_USB_TIMEOUT);
+ if (!ret) {
+ if (count == reg_count)
+ memcpy(value, buf, reg_count);
+ else
+ ret = -EREMOTEIO;
+ }
+out:
+ kfree(buf);
- return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
- value, reg_count, &count, DT9812_USB_TIMEOUT);
+ return ret;
}
static int dt9812_write_multiple_registers(struct comedi_device *dev,
@@ -285,19 +326,27 @@ static int dt9812_write_multiple_registers(struct comedi_device *dev,
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
+ struct dt9812_usb_cmd *cmd;
int i, count;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
- cmd.cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG);
- cmd.u.read_multi_info.count = reg_count;
+ cmd->cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG);
+ cmd->u.read_multi_info.count = reg_count;
for (i = 0; i < reg_count; i++) {
- cmd.u.write_multi_info.write[i].address = address[i];
- cmd.u.write_multi_info.write[i].value = value[i];
+ cmd->u.write_multi_info.write[i].address = address[i];
+ cmd->u.write_multi_info.write[i].value = value[i];
}
/* DT9812 only responds to 32 byte writes!! */
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT);
+ kfree(cmd);
+
+ return ret;
}
static int dt9812_rmw_multiple_registers(struct comedi_device *dev,
@@ -306,17 +355,25 @@ static int dt9812_rmw_multiple_registers(struct comedi_device *dev,
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
+ struct dt9812_usb_cmd *cmd;
int i, count;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
- cmd.cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG);
- cmd.u.rmw_multi_info.count = reg_count;
+ cmd->cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG);
+ cmd->u.rmw_multi_info.count = reg_count;
for (i = 0; i < reg_count; i++)
- cmd.u.rmw_multi_info.rmw[i] = rmw[i];
+ cmd->u.rmw_multi_info.rmw[i] = rmw[i];
/* DT9812 only responds to 32 byte writes!! */
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT);
+ kfree(cmd);
+
+ return ret;
}
static int dt9812_digital_in(struct comedi_device *dev, u8 *bits)
diff --git a/drivers/comedi/drivers/ni_usb6501.c b/drivers/comedi/drivers/ni_usb6501.c
index 5b6d9d783b2f..c42987b74b1d 100644
--- a/drivers/comedi/drivers/ni_usb6501.c
+++ b/drivers/comedi/drivers/ni_usb6501.c
@@ -144,6 +144,10 @@ static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00};
+/* Largest supported packets */
+static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST);
+static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE);
+
enum commands {
READ_PORT,
WRITE_PORT,
@@ -501,6 +505,12 @@ static int ni6501_find_endpoints(struct comedi_device *dev)
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
+ if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE)
+ return -ENODEV;
+
+ if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE)
+ return -ENODEV;
+
return 0;
}
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 9f920819cd74..4b00a9ea611a 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -90,6 +90,9 @@ enum {
#define IC3_VERSION BIT(0)
#define IC6_VERSION BIT(1)
+#define MIN_BUF_SIZE 64
+#define PACKET_TIMEOUT 10000 /* ms */
+
enum vmk80xx_model {
VMK8055_MODEL,
VMK8061_MODEL
@@ -157,22 +160,21 @@ static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
__u8 rx_addr;
unsigned int tx_pipe;
unsigned int rx_pipe;
- size_t size;
+ size_t tx_size;
+ size_t rx_size;
tx_addr = devpriv->ep_tx->bEndpointAddress;
rx_addr = devpriv->ep_rx->bEndpointAddress;
tx_pipe = usb_sndbulkpipe(usb, tx_addr);
rx_pipe = usb_rcvbulkpipe(usb, rx_addr);
+ tx_size = usb_endpoint_maxp(devpriv->ep_tx);
+ rx_size = usb_endpoint_maxp(devpriv->ep_rx);
- /*
- * The max packet size attributes of the K8061
- * input/output endpoints are identical
- */
- size = usb_endpoint_maxp(devpriv->ep_tx);
+ usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, tx_size, NULL,
+ PACKET_TIMEOUT);
- usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf,
- size, NULL, devpriv->ep_tx->bInterval);
- usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10);
+ usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, rx_size, NULL,
+ PACKET_TIMEOUT);
}
static int vmk80xx_read_packet(struct comedi_device *dev)
@@ -191,7 +193,7 @@ static int vmk80xx_read_packet(struct comedi_device *dev)
pipe = usb_rcvintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf,
usb_endpoint_maxp(ep), NULL,
- HZ * 10);
+ PACKET_TIMEOUT);
}
static int vmk80xx_write_packet(struct comedi_device *dev, int cmd)
@@ -212,7 +214,7 @@ static int vmk80xx_write_packet(struct comedi_device *dev, int cmd)
pipe = usb_sndintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf,
usb_endpoint_maxp(ep), NULL,
- HZ * 10);
+ PACKET_TIMEOUT);
}
static int vmk80xx_reset_device(struct comedi_device *dev)
@@ -678,12 +680,12 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
size_t size;
- size = usb_endpoint_maxp(devpriv->ep_rx);
+ size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_rx_buf)
return -ENOMEM;
- size = usb_endpoint_maxp(devpriv->ep_tx);
+ size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf)
return -ENOMEM;
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 0caa60537b14..1cbd60aaed69 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -11,11 +11,13 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/interrupt.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#define QUAD8_EXTENT 32
@@ -24,6 +26,10 @@ static unsigned int num_quad8;
module_param_hw_array(base, uint, ioport, &num_quad8, 0);
MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+static unsigned int irq[max_num_isa_dev(QUAD8_EXTENT)];
+module_param_hw_array(irq, uint, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
+
#define QUAD8_NUM_COUNTERS 8
/**
@@ -37,13 +43,15 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
* @quadrature_scale: array of quadrature mode scale configurations
* @ab_enable: array of A and B inputs enable configurations
* @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @irq_trigger: array of current IRQ trigger function configurations
+ * @next_irq_trigger: array of next IRQ trigger function configurations
* @synchronous_mode: array of index function synchronous mode configurations
* @index_polarity: array of index function polarity configurations
* @cable_fault_enable: differential encoder cable status enable configurations
* @base: base port address of the device
*/
struct quad8 {
- struct mutex lock;
+ spinlock_t lock;
struct counter_device counter;
unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
@@ -52,13 +60,17 @@ struct quad8 {
unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
unsigned int ab_enable[QUAD8_NUM_COUNTERS];
unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int irq_trigger[QUAD8_NUM_COUNTERS];
+ unsigned int next_irq_trigger[QUAD8_NUM_COUNTERS];
unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
unsigned int index_polarity[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
unsigned int base;
};
+#define QUAD8_REG_INTERRUPT_STATUS 0x10
#define QUAD8_REG_CHAN_OP 0x11
+#define QUAD8_REG_INDEX_INTERRUPT 0x12
#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
#define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17
/* Borrow Toggle flip-flop */
@@ -91,8 +103,8 @@ struct quad8 {
#define QUAD8_RLD_CNTR_OUT 0x10
/* Transfer Preset Register LSB to FCK Prescaler */
#define QUAD8_RLD_PRESET_PSC 0x18
-#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
+#define QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC 0x04
#define QUAD8_CMR_QUADRATURE_X1 0x08
#define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18
@@ -117,13 +129,14 @@ static int quad8_signal_read(struct counter_device *counter,
}
static int quad8_count_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct quad8 *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
+ unsigned long irqflags;
int i;
flags = inb(base_offset + 1);
@@ -133,7 +146,7 @@ static int quad8_count_read(struct counter_device *counter,
/* Borrow XOR Carry effectively doubles count range */
*val = (unsigned long)(borrow ^ carry) << 24;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
@@ -142,23 +155,24 @@ static int quad8_count_read(struct counter_device *counter,
for (i = 0; i < 3; i++)
*val |= (unsigned long)inb(base_offset) << (8 * i);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
static int quad8_count_write(struct counter_device *counter,
- struct counter_count *count, unsigned long val)
+ struct counter_count *count, u64 val)
{
struct quad8 *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
+ unsigned long irqflags;
int i;
/* Only 24-bit values are supported */
if (val > 0xFFFFFF)
return -ERANGE;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@@ -183,55 +197,51 @@ static int quad8_count_write(struct counter_device *counter,
/* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-enum quad8_count_function {
- QUAD8_COUNT_FUNCTION_PULSE_DIRECTION = 0,
- QUAD8_COUNT_FUNCTION_QUADRATURE_X1,
- QUAD8_COUNT_FUNCTION_QUADRATURE_X2,
- QUAD8_COUNT_FUNCTION_QUADRATURE_X4
-};
-
static const enum counter_function quad8_count_functions_list[] = {
- [QUAD8_COUNT_FUNCTION_PULSE_DIRECTION] = COUNTER_FUNCTION_PULSE_DIRECTION,
- [QUAD8_COUNT_FUNCTION_QUADRATURE_X1] = COUNTER_FUNCTION_QUADRATURE_X1_A,
- [QUAD8_COUNT_FUNCTION_QUADRATURE_X2] = COUNTER_FUNCTION_QUADRATURE_X2_A,
- [QUAD8_COUNT_FUNCTION_QUADRATURE_X4] = COUNTER_FUNCTION_QUADRATURE_X4
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_QUADRATURE_X1_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
-static int quad8_function_get(struct counter_device *counter,
- struct counter_count *count, size_t *function)
+static int quad8_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
struct quad8 *const priv = counter->priv;
const int id = count->id;
+ unsigned long irqflags;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
if (priv->quadrature_mode[id])
switch (priv->quadrature_scale[id]) {
case 0:
- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
+ *function = COUNTER_FUNCTION_QUADRATURE_X1_A;
break;
case 1:
- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X2;
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
break;
case 2:
- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X4;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
break;
}
else
- *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+ *function = COUNTER_FUNCTION_PULSE_DIRECTION;
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static int quad8_function_set(struct counter_device *counter,
- struct counter_count *count, size_t function)
+static int quad8_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
struct quad8 *const priv = counter->priv;
const int id = count->id;
@@ -239,15 +249,16 @@ static int quad8_function_set(struct counter_device *counter,
unsigned int *const scale = priv->quadrature_scale + id;
unsigned int *const synchronous_mode = priv->synchronous_mode + id;
const int base_offset = priv->base + 2 * id + 1;
+ unsigned long irqflags;
unsigned int mode_cfg;
unsigned int idr_cfg;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
mode_cfg = priv->count_mode[id] << 1;
idr_cfg = priv->index_polarity[id] << 1;
- if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
+ if (function == COUNTER_FUNCTION_PULSE_DIRECTION) {
*quadrature_mode = 0;
/* Quadrature scaling only available in quadrature mode */
@@ -263,21 +274,21 @@ static int quad8_function_set(struct counter_device *counter,
*quadrature_mode = 1;
switch (function) {
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ case COUNTER_FUNCTION_QUADRATURE_X1_A:
*scale = 0;
mode_cfg |= QUAD8_CMR_QUADRATURE_X1;
break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
*scale = 1;
mode_cfg |= QUAD8_CMR_QUADRATURE_X2;
break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
*scale = 2;
mode_cfg |= QUAD8_CMR_QUADRATURE_X4;
break;
default:
/* should never reach this path */
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
}
@@ -285,13 +296,14 @@ static int quad8_function_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static void quad8_direction_get(struct counter_device *counter,
- struct counter_count *count, enum counter_count_direction *direction)
+static int quad8_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_direction *direction)
{
const struct quad8 *const priv = counter->priv;
unsigned int ud_flag;
@@ -302,76 +314,74 @@ static void quad8_direction_get(struct counter_device *counter,
*direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD :
COUNTER_COUNT_DIRECTION_BACKWARD;
-}
-enum quad8_synapse_action {
- QUAD8_SYNAPSE_ACTION_NONE = 0,
- QUAD8_SYNAPSE_ACTION_RISING_EDGE,
- QUAD8_SYNAPSE_ACTION_FALLING_EDGE,
- QUAD8_SYNAPSE_ACTION_BOTH_EDGES
-};
+ return 0;
+}
static const enum counter_synapse_action quad8_index_actions_list[] = {
- [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
static const enum counter_synapse_action quad8_synapse_actions_list[] = {
- [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [QUAD8_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- [QUAD8_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
-static int quad8_action_get(struct counter_device *counter,
- struct counter_count *count, struct counter_synapse *synapse,
- size_t *action)
+static int quad8_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
struct quad8 *const priv = counter->priv;
int err;
- size_t function = 0;
+ enum counter_function function;
const size_t signal_a_id = count->synapses[0].signal->id;
enum counter_count_direction direction;
/* Handle Index signals */
if (synapse->signal->id >= 16) {
if (priv->preset_enable[count->id])
- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
- *action = QUAD8_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
}
- err = quad8_function_get(counter, count, &function);
+ err = quad8_function_read(counter, count, &function);
if (err)
return err;
/* Default action mode */
- *action = QUAD8_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
/* Determine action mode based on current count function mode */
switch (function) {
- case QUAD8_COUNT_FUNCTION_PULSE_DIRECTION:
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
if (synapse->signal->id == signal_a_id)
- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ case COUNTER_FUNCTION_QUADRATURE_X1_A:
if (synapse->signal->id == signal_a_id) {
- quad8_direction_get(counter, count, &direction);
+ err = quad8_direction_read(counter, count, &direction);
+ if (err)
+ return err;
if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
- *action = QUAD8_SYNAPSE_ACTION_FALLING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
}
return 0;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
if (synapse->signal->id == signal_a_id)
- *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
- *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
@@ -379,13 +389,103 @@ static int quad8_action_get(struct counter_device *counter,
}
}
+enum {
+ QUAD8_EVENT_NONE = -1,
+ QUAD8_EVENT_CARRY = 0,
+ QUAD8_EVENT_COMPARE = 1,
+ QUAD8_EVENT_CARRY_BORROW = 2,
+ QUAD8_EVENT_INDEX = 3,
+};
+
+static int quad8_events_configure(struct counter_device *counter)
+{
+ struct quad8 *const priv = counter->priv;
+ unsigned long irq_enabled = 0;
+ unsigned long irqflags;
+ size_t channel;
+ unsigned long ior_cfg;
+ unsigned long base_offset;
+
+ spin_lock_irqsave(&priv->lock, irqflags);
+
+ /* Enable interrupts for the requested channels, disable for the rest */
+ for (channel = 0; channel < QUAD8_NUM_COUNTERS; channel++) {
+ if (priv->next_irq_trigger[channel] == QUAD8_EVENT_NONE)
+ continue;
+
+ if (priv->irq_trigger[channel] != priv->next_irq_trigger[channel]) {
+ /* Save new IRQ function configuration */
+ priv->irq_trigger[channel] = priv->next_irq_trigger[channel];
+
+ /* Load configuration to I/O Control Register */
+ ior_cfg = priv->ab_enable[channel] |
+ priv->preset_enable[channel] << 1 |
+ priv->irq_trigger[channel] << 3;
+ base_offset = priv->base + 2 * channel + 1;
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ }
+
+ /* Reset next IRQ trigger function configuration */
+ priv->next_irq_trigger[channel] = QUAD8_EVENT_NONE;
+
+ /* Enable IRQ line */
+ irq_enabled |= BIT(channel);
+ }
+
+ outb(irq_enabled, priv->base + QUAD8_REG_INDEX_INTERRUPT);
+
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+
+ return 0;
+}
+
+static int quad8_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ struct quad8 *const priv = counter->priv;
+
+ if (watch->channel > QUAD8_NUM_COUNTERS - 1)
+ return -EINVAL;
+
+ switch (watch->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
+ priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY;
+ else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY)
+ return -EINVAL;
+ return 0;
+ case COUNTER_EVENT_THRESHOLD:
+ if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
+ priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_COMPARE;
+ else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_COMPARE)
+ return -EINVAL;
+ return 0;
+ case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
+ if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
+ priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY_BORROW;
+ else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY_BORROW)
+ return -EINVAL;
+ return 0;
+ case COUNTER_EVENT_INDEX:
+ if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
+ priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_INDEX;
+ else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_INDEX)
+ return -EINVAL;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct counter_ops quad8_ops = {
.signal_read = quad8_signal_read,
.count_read = quad8_count_read,
.count_write = quad8_count_write,
- .function_get = quad8_function_get,
- .function_set = quad8_function_set,
- .action_get = quad8_action_get
+ .function_read = quad8_function_read,
+ .function_write = quad8_function_write,
+ .action_read = quad8_action_read,
+ .events_configure = quad8_events_configure,
+ .watch_validate = quad8_watch_validate,
};
static const char *const quad8_index_polarity_modes[] = {
@@ -394,7 +494,8 @@ static const char *const quad8_index_polarity_modes[] = {
};
static int quad8_index_polarity_get(struct counter_device *counter,
- struct counter_signal *signal, size_t *index_polarity)
+ struct counter_signal *signal,
+ u32 *index_polarity)
{
const struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
@@ -405,14 +506,16 @@ static int quad8_index_polarity_get(struct counter_device *counter,
}
static int quad8_index_polarity_set(struct counter_device *counter,
- struct counter_signal *signal, size_t index_polarity)
+ struct counter_signal *signal,
+ u32 index_polarity)
{
struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
const int base_offset = priv->base + 2 * channel_id + 1;
+ unsigned long irqflags;
unsigned int idr_cfg = index_polarity << 1;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
idr_cfg |= priv->synchronous_mode[channel_id];
@@ -421,25 +524,19 @@ static int quad8_index_polarity_set(struct counter_device *counter,
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static struct counter_signal_enum_ext quad8_index_pol_enum = {
- .items = quad8_index_polarity_modes,
- .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
- .get = quad8_index_polarity_get,
- .set = quad8_index_polarity_set
-};
-
static const char *const quad8_synchronous_modes[] = {
"non-synchronous",
"synchronous"
};
static int quad8_synchronous_mode_get(struct counter_device *counter,
- struct counter_signal *signal, size_t *synchronous_mode)
+ struct counter_signal *signal,
+ u32 *synchronous_mode)
{
const struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
@@ -450,20 +547,22 @@ static int quad8_synchronous_mode_get(struct counter_device *counter,
}
static int quad8_synchronous_mode_set(struct counter_device *counter,
- struct counter_signal *signal, size_t synchronous_mode)
+ struct counter_signal *signal,
+ u32 synchronous_mode)
{
struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
const int base_offset = priv->base + 2 * channel_id + 1;
+ unsigned long irqflags;
unsigned int idr_cfg = synchronous_mode;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
idr_cfg |= priv->index_polarity[channel_id] << 1;
/* Index function must be non-synchronous in non-quadrature mode */
if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
@@ -472,27 +571,23 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static struct counter_signal_enum_ext quad8_syn_mode_enum = {
- .items = quad8_synchronous_modes,
- .num_items = ARRAY_SIZE(quad8_synchronous_modes),
- .get = quad8_synchronous_mode_get,
- .set = quad8_synchronous_mode_set
-};
-
-static ssize_t quad8_count_floor_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_floor_read(struct counter_device *counter,
+ struct counter_count *count, u64 *floor)
{
/* Only a floor of 0 is supported */
- return sprintf(buf, "0\n");
+ *floor = 0;
+
+ return 0;
}
-static int quad8_count_mode_get(struct counter_device *counter,
- struct counter_count *count, size_t *cnt_mode)
+static int quad8_count_mode_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_mode *cnt_mode)
{
const struct quad8 *const priv = counter->priv;
@@ -515,38 +610,41 @@ static int quad8_count_mode_get(struct counter_device *counter,
return 0;
}
-static int quad8_count_mode_set(struct counter_device *counter,
- struct counter_count *count, size_t cnt_mode)
+static int quad8_count_mode_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_mode cnt_mode)
{
struct quad8 *const priv = counter->priv;
+ unsigned int count_mode;
unsigned int mode_cfg;
const int base_offset = priv->base + 2 * count->id + 1;
+ unsigned long irqflags;
/* Map Generic Counter count mode to 104-QUAD-8 count mode */
switch (cnt_mode) {
case COUNTER_COUNT_MODE_NORMAL:
- cnt_mode = 0;
+ count_mode = 0;
break;
case COUNTER_COUNT_MODE_RANGE_LIMIT:
- cnt_mode = 1;
+ count_mode = 1;
break;
case COUNTER_COUNT_MODE_NON_RECYCLE:
- cnt_mode = 2;
+ count_mode = 2;
break;
case COUNTER_COUNT_MODE_MODULO_N:
- cnt_mode = 3;
+ count_mode = 3;
break;
default:
/* should never reach this path */
return -EINVAL;
}
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
- priv->count_mode[count->id] = cnt_mode;
+ priv->count_mode[count->id] = count_mode;
/* Set count mode configuration value */
- mode_cfg = cnt_mode << 1;
+ mode_cfg = count_mode << 1;
/* Add quadrature mode configuration */
if (priv->quadrature_mode[count->id])
@@ -555,61 +653,42 @@ static int quad8_count_mode_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static struct counter_count_enum_ext quad8_cnt_mode_enum = {
- .items = counter_count_mode_str,
- .num_items = ARRAY_SIZE(counter_count_mode_str),
- .get = quad8_count_mode_get,
- .set = quad8_count_mode_set
-};
-
-static ssize_t quad8_count_direction_read(struct counter_device *counter,
- struct counter_count *count, void *priv, char *buf)
-{
- enum counter_count_direction dir;
-
- quad8_direction_get(counter, count, &dir);
-
- return sprintf(buf, "%s\n", counter_count_direction_str[dir]);
-}
-
-static ssize_t quad8_count_enable_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
const struct quad8 *const priv = counter->priv;
- return sprintf(buf, "%u\n", priv->ab_enable[count->id]);
+ *enable = priv->ab_enable[count->id];
+
+ return 0;
}
-static ssize_t quad8_count_enable_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
struct quad8 *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
- int err;
- bool ab_enable;
+ unsigned long irqflags;
unsigned int ior_cfg;
- err = kstrtobool(buf, &ab_enable);
- if (err)
- return err;
-
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
- priv->ab_enable[count->id] = ab_enable;
+ priv->ab_enable[count->id] = enable;
- ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
+ ior_cfg = enable | priv->preset_enable[count->id] << 1 |
+ priv->irq_trigger[count->id] << 3;
/* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
static const char *const quad8_noise_error_states[] = {
@@ -618,7 +697,7 @@ static const char *const quad8_noise_error_states[] = {
};
static int quad8_error_noise_get(struct counter_device *counter,
- struct counter_count *count, size_t *noise_error)
+ struct counter_count *count, u32 *noise_error)
{
const struct quad8 *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id + 1;
@@ -628,18 +707,14 @@ static int quad8_error_noise_get(struct counter_device *counter,
return 0;
}
-static struct counter_count_enum_ext quad8_error_noise_enum = {
- .items = quad8_noise_error_states,
- .num_items = ARRAY_SIZE(quad8_noise_error_states),
- .get = quad8_error_noise_get
-};
-
-static ssize_t quad8_count_preset_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_preset_read(struct counter_device *counter,
+ struct counter_count *count, u64 *preset)
{
const struct quad8 *const priv = counter->priv;
- return sprintf(buf, "%u\n", priv->preset[count->id]);
+ *preset = priv->preset[count->id];
+
+ return 0;
}
static void quad8_preset_register_set(struct quad8 *const priv, const int id,
@@ -658,176 +733,166 @@ static void quad8_preset_register_set(struct quad8 *const priv, const int id,
outb(preset >> (8 * i), base_offset);
}
-static ssize_t quad8_count_preset_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_preset_write(struct counter_device *counter,
+ struct counter_count *count, u64 preset)
{
struct quad8 *const priv = counter->priv;
- unsigned int preset;
- int ret;
-
- ret = kstrtouint(buf, 0, &preset);
- if (ret)
- return ret;
+ unsigned long irqflags;
/* Only 24-bit values are supported */
if (preset > 0xFFFFFF)
return -ERANGE;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
quad8_preset_register_set(priv, count->id, preset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *ceiling)
{
struct quad8 *const priv = counter->priv;
+ unsigned long irqflags;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
- mutex_unlock(&priv->lock);
- return sprintf(buf, "%u\n", priv->preset[count->id]);
+ *ceiling = priv->preset[count->id];
+ break;
+ default:
+ /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+ *ceiling = 0x1FFFFFF;
+ break;
}
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- return sprintf(buf, "33554431\n");
+ return 0;
}
-static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, u64 ceiling)
{
struct quad8 *const priv = counter->priv;
- unsigned int ceiling;
- int ret;
-
- ret = kstrtouint(buf, 0, &ceiling);
- if (ret)
- return ret;
+ unsigned long irqflags;
/* Only 24-bit values are supported */
if (ceiling > 0xFFFFFF)
return -ERANGE;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
quad8_preset_register_set(priv, count->id, ceiling);
- mutex_unlock(&priv->lock);
- return len;
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return 0;
}
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
-static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_preset_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ u8 *preset_enable)
{
const struct quad8 *const priv = counter->priv;
- return sprintf(buf, "%u\n", !priv->preset_enable[count->id]);
+ *preset_enable = !priv->preset_enable[count->id];
+
+ return 0;
}
-static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_preset_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ u8 preset_enable)
{
struct quad8 *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id + 1;
- bool preset_enable;
- int ret;
+ unsigned long irqflags;
unsigned int ior_cfg;
- ret = kstrtobool(buf, &preset_enable);
- if (ret)
- return ret;
-
/* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
priv->preset_enable[count->id] = preset_enable;
- ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
+ ior_cfg = priv->ab_enable[count->id] | preset_enable << 1 |
+ priv->irq_trigger[count->id] << 3;
/* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *private, char *buf)
+static int quad8_signal_cable_fault_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 *cable_fault)
{
struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id / 2;
+ unsigned long irqflags;
bool disabled;
unsigned int status;
- unsigned int fault;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
disabled = !(priv->cable_fault_enable & BIT(channel_id));
if (disabled) {
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
/* Logic 0 = cable fault */
status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
/* Mask respective channel and invert logic */
- fault = !(status & BIT(channel_id));
+ *cable_fault = !(status & BIT(channel_id));
- return sprintf(buf, "%u\n", fault);
+ return 0;
}
-static ssize_t quad8_signal_cable_fault_enable_read(
- struct counter_device *counter, struct counter_signal *signal,
- void *private, char *buf)
+static int quad8_signal_cable_fault_enable_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 *enable)
{
const struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id / 2;
- const unsigned int enb = !!(priv->cable_fault_enable & BIT(channel_id));
- return sprintf(buf, "%u\n", enb);
+ *enable = !!(priv->cable_fault_enable & BIT(channel_id));
+
+ return 0;
}
-static ssize_t quad8_signal_cable_fault_enable_write(
- struct counter_device *counter, struct counter_signal *signal,
- void *private, const char *buf, size_t len)
+static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 enable)
{
struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id / 2;
- bool enable;
- int ret;
+ unsigned long irqflags;
unsigned int cable_fault_enable;
- ret = kstrtobool(buf, &enable);
- if (ret)
- return ret;
-
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
if (enable)
priv->cable_fault_enable |= BIT(channel_id);
@@ -839,35 +904,32 @@ static ssize_t quad8_signal_cable_fault_enable_write(
outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static ssize_t quad8_signal_fck_prescaler_read(struct counter_device *counter,
- struct counter_signal *signal, void *private, char *buf)
+static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 *prescaler)
{
const struct quad8 *const priv = counter->priv;
- const size_t channel_id = signal->id / 2;
- return sprintf(buf, "%u\n", priv->fck_prescaler[channel_id]);
+ *prescaler = priv->fck_prescaler[signal->id / 2];
+
+ return 0;
}
-static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
- struct counter_signal *signal, void *private, const char *buf,
- size_t len)
+static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 prescaler)
{
struct quad8 *const priv = counter->priv;
const size_t channel_id = signal->id / 2;
const int base_offset = priv->base + 2 * channel_id;
- u8 prescaler;
- int ret;
-
- ret = kstrtou8(buf, 0, &prescaler);
- if (ret)
- return ret;
+ unsigned long irqflags;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
priv->fck_prescaler[channel_id] = prescaler;
@@ -879,33 +941,32 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
base_offset + 1);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static const struct counter_signal_ext quad8_signal_ext[] = {
- {
- .name = "cable_fault",
- .read = quad8_signal_cable_fault_read
- },
- {
- .name = "cable_fault_enable",
- .read = quad8_signal_cable_fault_enable_read,
- .write = quad8_signal_cable_fault_enable_write
- },
- {
- .name = "filter_clock_prescaler",
- .read = quad8_signal_fck_prescaler_read,
- .write = quad8_signal_fck_prescaler_write
- }
+static struct counter_comp quad8_signal_ext[] = {
+ COUNTER_COMP_SIGNAL_BOOL("cable_fault", quad8_signal_cable_fault_read,
+ NULL),
+ COUNTER_COMP_SIGNAL_BOOL("cable_fault_enable",
+ quad8_signal_cable_fault_enable_read,
+ quad8_signal_cable_fault_enable_write),
+ COUNTER_COMP_SIGNAL_U8("filter_clock_prescaler",
+ quad8_signal_fck_prescaler_read,
+ quad8_signal_fck_prescaler_write)
};
-static const struct counter_signal_ext quad8_index_ext[] = {
- COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum),
- COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum),
- COUNTER_SIGNAL_ENUM("synchronous_mode", &quad8_syn_mode_enum),
- COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum)
+static DEFINE_COUNTER_ENUM(quad8_index_pol_enum, quad8_index_polarity_modes);
+static DEFINE_COUNTER_ENUM(quad8_synch_mode_enum, quad8_synchronous_modes);
+
+static struct counter_comp quad8_index_ext[] = {
+ COUNTER_COMP_SIGNAL_ENUM("index_polarity", quad8_index_polarity_get,
+ quad8_index_polarity_set,
+ quad8_index_pol_enum),
+ COUNTER_COMP_SIGNAL_ENUM("synchronous_mode", quad8_synchronous_mode_get,
+ quad8_synchronous_mode_set,
+ quad8_synch_mode_enum),
};
#define QUAD8_QUAD_SIGNAL(_id, _name) { \
@@ -974,39 +1035,30 @@ static struct counter_synapse quad8_count_synapses[][3] = {
QUAD8_COUNT_SYNAPSES(6), QUAD8_COUNT_SYNAPSES(7)
};
-static const struct counter_count_ext quad8_count_ext[] = {
- {
- .name = "ceiling",
- .read = quad8_count_ceiling_read,
- .write = quad8_count_ceiling_write
- },
- {
- .name = "floor",
- .read = quad8_count_floor_read
- },
- COUNTER_COUNT_ENUM("count_mode", &quad8_cnt_mode_enum),
- COUNTER_COUNT_ENUM_AVAILABLE("count_mode", &quad8_cnt_mode_enum),
- {
- .name = "direction",
- .read = quad8_count_direction_read
- },
- {
- .name = "enable",
- .read = quad8_count_enable_read,
- .write = quad8_count_enable_write
- },
- COUNTER_COUNT_ENUM("error_noise", &quad8_error_noise_enum),
- COUNTER_COUNT_ENUM_AVAILABLE("error_noise", &quad8_error_noise_enum),
- {
- .name = "preset",
- .read = quad8_count_preset_read,
- .write = quad8_count_preset_write
- },
- {
- .name = "preset_enable",
- .read = quad8_count_preset_enable_read,
- .write = quad8_count_preset_enable_write
- }
+static const enum counter_count_mode quad8_cnt_modes[] = {
+ COUNTER_COUNT_MODE_NORMAL,
+ COUNTER_COUNT_MODE_RANGE_LIMIT,
+ COUNTER_COUNT_MODE_NON_RECYCLE,
+ COUNTER_COUNT_MODE_MODULO_N,
+};
+
+static DEFINE_COUNTER_AVAILABLE(quad8_count_mode_available, quad8_cnt_modes);
+
+static DEFINE_COUNTER_ENUM(quad8_error_noise_enum, quad8_noise_error_states);
+
+static struct counter_comp quad8_count_ext[] = {
+ COUNTER_COMP_CEILING(quad8_count_ceiling_read,
+ quad8_count_ceiling_write),
+ COUNTER_COMP_FLOOR(quad8_count_floor_read, NULL),
+ COUNTER_COMP_COUNT_MODE(quad8_count_mode_read, quad8_count_mode_write,
+ quad8_count_mode_available),
+ COUNTER_COMP_DIRECTION(quad8_direction_read),
+ COUNTER_COMP_ENABLE(quad8_count_enable_read, quad8_count_enable_write),
+ COUNTER_COMP_COUNT_ENUM("error_noise", quad8_error_noise_get, NULL,
+ quad8_error_noise_enum),
+ COUNTER_COMP_PRESET(quad8_count_preset_read, quad8_count_preset_write),
+ COUNTER_COMP_PRESET_ENABLE(quad8_count_preset_enable_read,
+ quad8_count_preset_enable_write),
};
#define QUAD8_COUNT(_id, _cntname) { \
@@ -1031,11 +1083,54 @@ static struct counter_count quad8_counts[] = {
QUAD8_COUNT(7, "Channel 8 Count")
};
+static irqreturn_t quad8_irq_handler(int irq, void *private)
+{
+ struct quad8 *const priv = private;
+ const unsigned long base = priv->base;
+ unsigned long irq_status;
+ unsigned long channel;
+ u8 event;
+
+ irq_status = inb(base + QUAD8_REG_INTERRUPT_STATUS);
+ if (!irq_status)
+ return IRQ_NONE;
+
+ for_each_set_bit(channel, &irq_status, QUAD8_NUM_COUNTERS) {
+ switch (priv->irq_trigger[channel]) {
+ case QUAD8_EVENT_CARRY:
+ event = COUNTER_EVENT_OVERFLOW;
+ break;
+ case QUAD8_EVENT_COMPARE:
+ event = COUNTER_EVENT_THRESHOLD;
+ break;
+ case QUAD8_EVENT_CARRY_BORROW:
+ event = COUNTER_EVENT_OVERFLOW_UNDERFLOW;
+ break;
+ case QUAD8_EVENT_INDEX:
+ event = COUNTER_EVENT_INDEX;
+ break;
+ default:
+ /* should never reach this path */
+ WARN_ONCE(true, "invalid interrupt trigger function %u configured for channel %lu\n",
+ priv->irq_trigger[channel], channel);
+ continue;
+ }
+
+ counter_push_event(&priv->counter, event, channel);
+ }
+
+ /* Clear pending interrupts on device */
+ outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base + QUAD8_REG_CHAN_OP);
+
+ return IRQ_HANDLED;
+}
+
static int quad8_probe(struct device *dev, unsigned int id)
{
struct quad8 *priv;
int i, j;
unsigned int base_offset;
+ int err;
if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -1058,9 +1153,10 @@ static int quad8_probe(struct device *dev, unsigned int id)
priv->counter.priv = priv;
priv->base = base[id];
- /* Initialize mutex */
- mutex_init(&priv->lock);
+ spin_lock_init(&priv->lock);
+ /* Reset Index/Interrupt Register */
+ outb(0x00, base[id] + QUAD8_REG_INDEX_INTERRUPT);
/* Reset all counters and disable interrupt function */
outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
/* Set initial configuration for all counters */
@@ -1087,11 +1183,18 @@ static int quad8_probe(struct device *dev, unsigned int id)
outb(QUAD8_CTR_IOR, base_offset + 1);
/* Disable index function; negative index polarity */
outb(QUAD8_CTR_IDR, base_offset + 1);
+ /* Initialize next IRQ trigger function configuration */
+ priv->next_irq_trigger[i] = QUAD8_EVENT_NONE;
}
/* Disable Differential Encoder Cable Status for all channels */
outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS);
- /* Enable all counters */
- outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ /* Enable all counters and enable interrupt function */
+ outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base[id] + QUAD8_REG_CHAN_OP);
+
+ err = devm_request_irq(dev, irq[id], quad8_irq_handler, IRQF_SHARED,
+ priv->counter.name, priv);
+ if (err)
+ return err;
return devm_counter_register(dev, &priv->counter);
}
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index d5d2540b30c2..3dcdb681c4e4 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -23,11 +23,11 @@ config 104_QUAD_8
A counter's respective error flag may be cleared by performing a write
operation on the respective count value attribute. Although the
104-QUAD-8 counters have a 25-bit range, only the lower 24 bits may be
- set, either directly or via the counter's preset attribute. Interrupts
- are not supported by this driver.
+ set, either directly or via the counter's preset attribute.
The base port addresses for the devices may be configured via the base
- array module parameter.
+ array module parameter. The interrupt line numbers for the devices may
+ be configured via the irq array module parameter.
config INTERRUPT_CNT
tristate "Interrupt counter driver"
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 19742e6f5e3e..8fde6c100ebc 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_COUNTER) += counter.o
+counter-y := counter-core.o counter-sysfs.o counter-chrdev.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
obj-$(CONFIG_INTERRUPT_CNT) += interrupt-cnt.o
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
new file mode 100644
index 000000000000..b7c62f957a6a
--- /dev/null
+++ b/drivers/counter/counter-chrdev.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter character device interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#include <linux/cdev.h>
+#include <linux/counter.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "counter-chrdev.h"
+
+struct counter_comp_node {
+ struct list_head l;
+ struct counter_component component;
+ struct counter_comp comp;
+ void *parent;
+};
+
+#define counter_comp_read_is_equal(a, b) \
+ (a.action_read == b.action_read || \
+ a.device_u8_read == b.device_u8_read || \
+ a.count_u8_read == b.count_u8_read || \
+ a.signal_u8_read == b.signal_u8_read || \
+ a.device_u32_read == b.device_u32_read || \
+ a.count_u32_read == b.count_u32_read || \
+ a.signal_u32_read == b.signal_u32_read || \
+ a.device_u64_read == b.device_u64_read || \
+ a.count_u64_read == b.count_u64_read || \
+ a.signal_u64_read == b.signal_u64_read)
+
+#define counter_comp_read_is_set(comp) \
+ (comp.action_read || \
+ comp.device_u8_read || \
+ comp.count_u8_read || \
+ comp.signal_u8_read || \
+ comp.device_u32_read || \
+ comp.count_u32_read || \
+ comp.signal_u32_read || \
+ comp.device_u64_read || \
+ comp.count_u64_read || \
+ comp.signal_u64_read)
+
+static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *f_ps)
+{
+ struct counter_device *const counter = filp->private_data;
+ int err;
+ unsigned int copied;
+
+ if (!counter->ops)
+ return -ENODEV;
+
+ if (len < sizeof(struct counter_event))
+ return -EINVAL;
+
+ do {
+ if (kfifo_is_empty(&counter->events)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ err = wait_event_interruptible(counter->events_wait,
+ !kfifo_is_empty(&counter->events) ||
+ !counter->ops);
+ if (err < 0)
+ return err;
+ if (!counter->ops)
+ return -ENODEV;
+ }
+
+ if (mutex_lock_interruptible(&counter->events_out_lock))
+ return -ERESTARTSYS;
+ err = kfifo_to_user(&counter->events, buf, len, &copied);
+ mutex_unlock(&counter->events_out_lock);
+ if (err < 0)
+ return err;
+ } while (!copied);
+
+ return copied;
+}
+
+static __poll_t counter_chrdev_poll(struct file *filp,
+ struct poll_table_struct *pollt)
+{
+ struct counter_device *const counter = filp->private_data;
+ __poll_t events = 0;
+
+ if (!counter->ops)
+ return events;
+
+ poll_wait(filp, &counter->events_wait, pollt);
+
+ if (!kfifo_is_empty(&counter->events))
+ events = EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static void counter_events_list_free(struct list_head *const events_list)
+{
+ struct counter_event_node *p, *n;
+ struct counter_comp_node *q, *o;
+
+ list_for_each_entry_safe(p, n, events_list, l) {
+ /* Free associated component nodes */
+ list_for_each_entry_safe(q, o, &p->comp_list, l) {
+ list_del(&q->l);
+ kfree(q);
+ }
+
+ /* Free event node */
+ list_del(&p->l);
+ kfree(p);
+ }
+}
+
+static int counter_set_event_node(struct counter_device *const counter,
+ struct counter_watch *const watch,
+ const struct counter_comp_node *const cfg)
+{
+ struct counter_event_node *event_node;
+ int err = 0;
+ struct counter_comp_node *comp_node;
+
+ /* Search for event in the list */
+ list_for_each_entry(event_node, &counter->next_events_list, l)
+ if (event_node->event == watch->event &&
+ event_node->channel == watch->channel)
+ break;
+
+ /* If event is not already in the list */
+ if (&event_node->l == &counter->next_events_list) {
+ /* Allocate new event node */
+ event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
+ if (!event_node)
+ return -ENOMEM;
+
+ /* Configure event node and add to the list */
+ event_node->event = watch->event;
+ event_node->channel = watch->channel;
+ INIT_LIST_HEAD(&event_node->comp_list);
+ list_add(&event_node->l, &counter->next_events_list);
+ }
+
+ /* Check if component watch has already been set before */
+ list_for_each_entry(comp_node, &event_node->comp_list, l)
+ if (comp_node->parent == cfg->parent &&
+ counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
+ err = -EINVAL;
+ goto exit_free_event_node;
+ }
+
+ /* Allocate component node */
+ comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
+ if (!comp_node) {
+ err = -ENOMEM;
+ goto exit_free_event_node;
+ }
+ *comp_node = *cfg;
+
+ /* Add component node to event node */
+ list_add_tail(&comp_node->l, &event_node->comp_list);
+
+exit_free_event_node:
+ /* Free event node if no one else is watching */
+ if (list_empty(&event_node->comp_list)) {
+ list_del(&event_node->l);
+ kfree(event_node);
+ }
+
+ return err;
+}
+
+static int counter_enable_events(struct counter_device *const counter)
+{
+ unsigned long flags;
+ int err = 0;
+
+ mutex_lock(&counter->n_events_list_lock);
+ spin_lock_irqsave(&counter->events_list_lock, flags);
+
+ counter_events_list_free(&counter->events_list);
+ list_replace_init(&counter->next_events_list,
+ &counter->events_list);
+
+ if (counter->ops->events_configure)
+ err = counter->ops->events_configure(counter);
+
+ spin_unlock_irqrestore(&counter->events_list_lock, flags);
+ mutex_unlock(&counter->n_events_list_lock);
+
+ return err;
+}
+
+static int counter_disable_events(struct counter_device *const counter)
+{
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&counter->events_list_lock, flags);
+
+ counter_events_list_free(&counter->events_list);
+
+ if (counter->ops->events_configure)
+ err = counter->ops->events_configure(counter);
+
+ spin_unlock_irqrestore(&counter->events_list_lock, flags);
+
+ mutex_lock(&counter->n_events_list_lock);
+
+ counter_events_list_free(&counter->next_events_list);
+
+ mutex_unlock(&counter->n_events_list_lock);
+
+ return err;
+}
+
+static int counter_add_watch(struct counter_device *const counter,
+ const unsigned long arg)
+{
+ void __user *const uwatch = (void __user *)arg;
+ struct counter_watch watch;
+ struct counter_comp_node comp_node = {};
+ size_t parent, id;
+ struct counter_comp *ext;
+ size_t num_ext;
+ int err = 0;
+
+ if (copy_from_user(&watch, uwatch, sizeof(watch)))
+ return -EFAULT;
+
+ if (watch.component.type == COUNTER_COMPONENT_NONE)
+ goto no_component;
+
+ parent = watch.component.parent;
+
+ /* Configure parent component info for comp node */
+ switch (watch.component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ext = counter->ext;
+ num_ext = counter->num_ext;
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ if (parent >= counter->num_signals)
+ return -EINVAL;
+ parent = array_index_nospec(parent, counter->num_signals);
+
+ comp_node.parent = counter->signals + parent;
+
+ ext = counter->signals[parent].ext;
+ num_ext = counter->signals[parent].num_ext;
+ break;
+ case COUNTER_SCOPE_COUNT:
+ if (parent >= counter->num_counts)
+ return -EINVAL;
+ parent = array_index_nospec(parent, counter->num_counts);
+
+ comp_node.parent = counter->counts + parent;
+
+ ext = counter->counts[parent].ext;
+ num_ext = counter->counts[parent].num_ext;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ id = watch.component.id;
+
+ /* Configure component info for comp node */
+ switch (watch.component.type) {
+ case COUNTER_COMPONENT_SIGNAL:
+ if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
+ return -EINVAL;
+
+ comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
+ comp_node.comp.signal_u32_read = counter->ops->signal_read;
+ break;
+ case COUNTER_COMPONENT_COUNT:
+ if (watch.component.scope != COUNTER_SCOPE_COUNT)
+ return -EINVAL;
+
+ comp_node.comp.type = COUNTER_COMP_U64;
+ comp_node.comp.count_u64_read = counter->ops->count_read;
+ break;
+ case COUNTER_COMPONENT_FUNCTION:
+ if (watch.component.scope != COUNTER_SCOPE_COUNT)
+ return -EINVAL;
+
+ comp_node.comp.type = COUNTER_COMP_FUNCTION;
+ comp_node.comp.count_u32_read = counter->ops->function_read;
+ break;
+ case COUNTER_COMPONENT_SYNAPSE_ACTION:
+ if (watch.component.scope != COUNTER_SCOPE_COUNT)
+ return -EINVAL;
+ if (id >= counter->counts[parent].num_synapses)
+ return -EINVAL;
+ id = array_index_nospec(id, counter->counts[parent].num_synapses);
+
+ comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
+ comp_node.comp.action_read = counter->ops->action_read;
+ comp_node.comp.priv = counter->counts[parent].synapses + id;
+ break;
+ case COUNTER_COMPONENT_EXTENSION:
+ if (id >= num_ext)
+ return -EINVAL;
+ id = array_index_nospec(id, num_ext);
+
+ comp_node.comp = ext[id];
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!counter_comp_read_is_set(comp_node.comp))
+ return -EOPNOTSUPP;
+
+no_component:
+ mutex_lock(&counter->n_events_list_lock);
+
+ if (counter->ops->watch_validate) {
+ err = counter->ops->watch_validate(counter, &watch);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ comp_node.component = watch.component;
+
+ err = counter_set_event_node(counter, &watch, &comp_node);
+
+err_exit:
+ mutex_unlock(&counter->n_events_list_lock);
+
+ return err;
+}
+
+static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct counter_device *const counter = filp->private_data;
+ int ret = -ENODEV;
+
+ mutex_lock(&counter->ops_exist_lock);
+
+ if (!counter->ops)
+ goto out_unlock;
+
+ switch (cmd) {
+ case COUNTER_ADD_WATCH_IOCTL:
+ ret = counter_add_watch(counter, arg);
+ break;
+ case COUNTER_ENABLE_EVENTS_IOCTL:
+ ret = counter_enable_events(counter);
+ break;
+ case COUNTER_DISABLE_EVENTS_IOCTL:
+ ret = counter_disable_events(counter);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&counter->ops_exist_lock);
+
+ return ret;
+}
+
+static int counter_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct counter_device *const counter = container_of(inode->i_cdev,
+ typeof(*counter),
+ chrdev);
+
+ get_device(&counter->dev);
+ filp->private_data = counter;
+
+ return nonseekable_open(inode, filp);
+}
+
+static int counter_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct counter_device *const counter = filp->private_data;
+ int ret = 0;
+
+ mutex_lock(&counter->ops_exist_lock);
+
+ if (!counter->ops) {
+ /* Free any lingering held memory */
+ counter_events_list_free(&counter->events_list);
+ counter_events_list_free(&counter->next_events_list);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = counter_disable_events(counter);
+ if (ret < 0) {
+ mutex_unlock(&counter->ops_exist_lock);
+ return ret;
+ }
+
+out_unlock:
+ mutex_unlock(&counter->ops_exist_lock);
+
+ put_device(&counter->dev);
+
+ return ret;
+}
+
+static const struct file_operations counter_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = counter_chrdev_read,
+ .poll = counter_chrdev_poll,
+ .unlocked_ioctl = counter_chrdev_ioctl,
+ .open = counter_chrdev_open,
+ .release = counter_chrdev_release,
+};
+
+int counter_chrdev_add(struct counter_device *const counter)
+{
+ /* Initialize Counter events lists */
+ INIT_LIST_HEAD(&counter->events_list);
+ INIT_LIST_HEAD(&counter->next_events_list);
+ spin_lock_init(&counter->events_list_lock);
+ mutex_init(&counter->n_events_list_lock);
+ init_waitqueue_head(&counter->events_wait);
+ spin_lock_init(&counter->events_in_lock);
+ mutex_init(&counter->events_out_lock);
+
+ /* Initialize character device */
+ cdev_init(&counter->chrdev, &counter_fops);
+
+ /* Allocate Counter events queue */
+ return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
+}
+
+void counter_chrdev_remove(struct counter_device *const counter)
+{
+ kfifo_free(&counter->events);
+}
+
+static int counter_get_data(struct counter_device *const counter,
+ const struct counter_comp_node *const comp_node,
+ u64 *const value)
+{
+ const struct counter_comp *const comp = &comp_node->comp;
+ void *const parent = comp_node->parent;
+ u8 value_u8 = 0;
+ u32 value_u32 = 0;
+ int ret;
+
+ if (comp_node->component.type == COUNTER_COMPONENT_NONE)
+ return 0;
+
+ switch (comp->type) {
+ case COUNTER_COMP_U8:
+ case COUNTER_COMP_BOOL:
+ switch (comp_node->component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ret = comp->device_u8_read(counter, &value_u8);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ ret = comp->signal_u8_read(counter, parent, &value_u8);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ ret = comp->count_u8_read(counter, parent, &value_u8);
+ break;
+ }
+ *value = value_u8;
+ return ret;
+ case COUNTER_COMP_SIGNAL_LEVEL:
+ case COUNTER_COMP_FUNCTION:
+ case COUNTER_COMP_ENUM:
+ case COUNTER_COMP_COUNT_DIRECTION:
+ case COUNTER_COMP_COUNT_MODE:
+ switch (comp_node->component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ret = comp->device_u32_read(counter, &value_u32);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ ret = comp->signal_u32_read(counter, parent,
+ &value_u32);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ ret = comp->count_u32_read(counter, parent, &value_u32);
+ break;
+ }
+ *value = value_u32;
+ return ret;
+ case COUNTER_COMP_U64:
+ switch (comp_node->component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ return comp->device_u64_read(counter, value);
+ case COUNTER_SCOPE_SIGNAL:
+ return comp->signal_u64_read(counter, parent, value);
+ case COUNTER_SCOPE_COUNT:
+ return comp->count_u64_read(counter, parent, value);
+ default:
+ return -EINVAL;
+ }
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ ret = comp->action_read(counter, parent, comp->priv,
+ &value_u32);
+ *value = value_u32;
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * counter_push_event - queue event for userspace reading
+ * @counter: pointer to Counter structure
+ * @event: triggered event
+ * @channel: event channel
+ *
+ * Note: If no one is watching for the respective event, it is silently
+ * discarded.
+ */
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel)
+{
+ struct counter_event ev;
+ unsigned int copied = 0;
+ unsigned long flags;
+ struct counter_event_node *event_node;
+ struct counter_comp_node *comp_node;
+
+ ev.timestamp = ktime_get_ns();
+ ev.watch.event = event;
+ ev.watch.channel = channel;
+
+ /* Could be in an interrupt context, so use a spin lock */
+ spin_lock_irqsave(&counter->events_list_lock, flags);
+
+ /* Search for event in the list */
+ list_for_each_entry(event_node, &counter->events_list, l)
+ if (event_node->event == event &&
+ event_node->channel == channel)
+ break;
+
+ /* If event is not in the list */
+ if (&event_node->l == &counter->events_list)
+ goto exit_early;
+
+ /* Read and queue relevant comp for userspace */
+ list_for_each_entry(comp_node, &event_node->comp_list, l) {
+ ev.watch.component = comp_node->component;
+ ev.status = -counter_get_data(counter, comp_node, &ev.value);
+
+ copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
+ 1, &counter->events_in_lock);
+ }
+
+exit_early:
+ spin_unlock_irqrestore(&counter->events_list_lock, flags);
+
+ if (copied)
+ wake_up_poll(&counter->events_wait, EPOLLIN);
+}
+EXPORT_SYMBOL_GPL(counter_push_event);
diff --git a/drivers/counter/counter-chrdev.h b/drivers/counter/counter-chrdev.h
new file mode 100644
index 000000000000..5529d16703c4
--- /dev/null
+++ b/drivers/counter/counter-chrdev.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter character device interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _COUNTER_CHRDEV_H_
+#define _COUNTER_CHRDEV_H_
+
+#include <linux/counter.h>
+
+int counter_chrdev_add(struct counter_device *const counter);
+void counter_chrdev_remove(struct counter_device *const counter);
+
+#endif /* _COUNTER_CHRDEV_H_ */
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
new file mode 100644
index 000000000000..5acc54539623
--- /dev/null
+++ b/drivers/counter/counter-core.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#include <linux/cdev.h>
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "counter-chrdev.h"
+#include "counter-sysfs.h"
+
+/* Provides a unique ID for each counter device */
+static DEFINE_IDA(counter_ida);
+
+static void counter_device_release(struct device *dev)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+
+ counter_chrdev_remove(counter);
+ ida_free(&counter_ida, dev->id);
+}
+
+static struct device_type counter_device_type = {
+ .name = "counter_device",
+ .release = counter_device_release,
+};
+
+static struct bus_type counter_bus_type = {
+ .name = "counter",
+ .dev_name = "counter",
+};
+
+static dev_t counter_devt;
+
+/**
+ * counter_register - register Counter to the system
+ * @counter: pointer to Counter to register
+ *
+ * This function registers a Counter to the system. A sysfs "counter" directory
+ * will be created and populated with sysfs attributes correlating with the
+ * Counter Signals, Synapses, and Counts respectively.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int counter_register(struct counter_device *const counter)
+{
+ struct device *const dev = &counter->dev;
+ int id;
+ int err;
+
+ /* Acquire unique ID */
+ id = ida_alloc(&counter_ida, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ mutex_init(&counter->ops_exist_lock);
+
+ /* Configure device structure for Counter */
+ dev->id = id;
+ dev->type = &counter_device_type;
+ dev->bus = &counter_bus_type;
+ dev->devt = MKDEV(MAJOR(counter_devt), id);
+ if (counter->parent) {
+ dev->parent = counter->parent;
+ dev->of_node = counter->parent->of_node;
+ }
+ device_initialize(dev);
+ dev_set_drvdata(dev, counter);
+
+ err = counter_sysfs_add(counter);
+ if (err < 0)
+ goto err_free_id;
+
+ err = counter_chrdev_add(counter);
+ if (err < 0)
+ goto err_free_id;
+
+ err = cdev_device_add(&counter->chrdev, dev);
+ if (err < 0)
+ goto err_remove_chrdev;
+
+ return 0;
+
+err_remove_chrdev:
+ counter_chrdev_remove(counter);
+err_free_id:
+ put_device(dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(counter_register);
+
+/**
+ * counter_unregister - unregister Counter from the system
+ * @counter: pointer to Counter to unregister
+ *
+ * The Counter is unregistered from the system.
+ */
+void counter_unregister(struct counter_device *const counter)
+{
+ if (!counter)
+ return;
+
+ cdev_device_del(&counter->chrdev, &counter->dev);
+
+ mutex_lock(&counter->ops_exist_lock);
+
+ counter->ops = NULL;
+ wake_up(&counter->events_wait);
+
+ mutex_unlock(&counter->ops_exist_lock);
+
+ put_device(&counter->dev);
+}
+EXPORT_SYMBOL_GPL(counter_unregister);
+
+static void devm_counter_release(void *counter)
+{
+ counter_unregister(counter);
+}
+
+/**
+ * devm_counter_register - Resource-managed counter_register
+ * @dev: device to allocate counter_device for
+ * @counter: pointer to Counter to register
+ *
+ * Managed counter_register. The Counter registered with this function is
+ * automatically unregistered on driver detach. This function calls
+ * counter_register internally. Refer to that function for more information.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_counter_register(struct device *dev,
+ struct counter_device *const counter)
+{
+ int err;
+
+ err = counter_register(counter);
+ if (err < 0)
+ return err;
+
+ return devm_add_action_or_reset(dev, devm_counter_release, counter);
+}
+EXPORT_SYMBOL_GPL(devm_counter_register);
+
+#define COUNTER_DEV_MAX 256
+
+static int __init counter_init(void)
+{
+ int err;
+
+ err = bus_register(&counter_bus_type);
+ if (err < 0)
+ return err;
+
+ err = alloc_chrdev_region(&counter_devt, 0, COUNTER_DEV_MAX, "counter");
+ if (err < 0)
+ goto err_unregister_bus;
+
+ return 0;
+
+err_unregister_bus:
+ bus_unregister(&counter_bus_type);
+ return err;
+}
+
+static void __exit counter_exit(void)
+{
+ unregister_chrdev_region(counter_devt, COUNTER_DEV_MAX);
+ bus_unregister(&counter_bus_type);
+}
+
+subsys_initcall(counter_init);
+module_exit(counter_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Generic Counter interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
new file mode 100644
index 000000000000..7cc4d1d523ea
--- /dev/null
+++ b/drivers/counter/counter-sysfs.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter sysfs interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "counter-sysfs.h"
+
+/**
+ * struct counter_attribute - Counter sysfs attribute
+ * @dev_attr: device attribute for sysfs
+ * @l: node to add Counter attribute to attribute group list
+ * @comp: Counter component callbacks and data
+ * @scope: Counter scope of the attribute
+ * @parent: pointer to the parent component
+ */
+struct counter_attribute {
+ struct device_attribute dev_attr;
+ struct list_head l;
+
+ struct counter_comp comp;
+ enum counter_scope scope;
+ void *parent;
+};
+
+#define to_counter_attribute(_dev_attr) \
+ container_of(_dev_attr, struct counter_attribute, dev_attr)
+
+/**
+ * struct counter_attribute_group - container for attribute group
+ * @name: name of the attribute group
+ * @attr_list: list to keep track of created attributes
+ * @num_attr: number of attributes
+ */
+struct counter_attribute_group {
+ const char *name;
+ struct list_head attr_list;
+ size_t num_attr;
+};
+
+static const char *const counter_function_str[] = {
+ [COUNTER_FUNCTION_INCREASE] = "increase",
+ [COUNTER_FUNCTION_DECREASE] = "decrease",
+ [COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
+ [COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
+ [COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
+ [COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
+ [COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
+ [COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4"
+};
+
+static const char *const counter_signal_value_str[] = {
+ [COUNTER_SIGNAL_LEVEL_LOW] = "low",
+ [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
+};
+
+static const char *const counter_synapse_action_str[] = {
+ [COUNTER_SYNAPSE_ACTION_NONE] = "none",
+ [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge",
+ [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge",
+ [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges"
+};
+
+static const char *const counter_count_direction_str[] = {
+ [COUNTER_COUNT_DIRECTION_FORWARD] = "forward",
+ [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward"
+};
+
+static const char *const counter_count_mode_str[] = {
+ [COUNTER_COUNT_MODE_NORMAL] = "normal",
+ [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
+ [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
+ [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
+};
+
+static ssize_t counter_comp_u8_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ int err;
+ u8 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u8_read(counter, &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u8_read(counter, a->parent, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u8_read(counter, a->parent, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ if (a->comp.type == COUNTER_COMP_BOOL)
+ /* data should already be boolean but ensure just to be safe */
+ data = !!data;
+
+ return sysfs_emit(buf, "%u\n", (unsigned int)data);
+}
+
+static ssize_t counter_comp_u8_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ int err;
+ bool bool_data = 0;
+ u8 data = 0;
+
+ if (a->comp.type == COUNTER_COMP_BOOL) {
+ err = kstrtobool(buf, &bool_data);
+ data = bool_data;
+ } else
+ err = kstrtou8(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u8_write(counter, data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u8_write(counter, a->parent, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u8_write(counter, a->parent, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t counter_comp_u32_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_available *const avail = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u32_read(counter, &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u32_read(counter, a->parent, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION)
+ err = a->comp.action_read(counter, a->parent,
+ a->comp.priv, &data);
+ else
+ err = a->comp.count_u32_read(counter, a->parent, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ switch (a->comp.type) {
+ case COUNTER_COMP_FUNCTION:
+ return sysfs_emit(buf, "%s\n", counter_function_str[data]);
+ case COUNTER_COMP_SIGNAL_LEVEL:
+ return sysfs_emit(buf, "%s\n", counter_signal_value_str[data]);
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ return sysfs_emit(buf, "%s\n", counter_synapse_action_str[data]);
+ case COUNTER_COMP_ENUM:
+ return sysfs_emit(buf, "%s\n", avail->strs[data]);
+ case COUNTER_COMP_COUNT_DIRECTION:
+ return sysfs_emit(buf, "%s\n", counter_count_direction_str[data]);
+ case COUNTER_COMP_COUNT_MODE:
+ return sysfs_emit(buf, "%s\n", counter_count_mode_str[data]);
+ default:
+ return sysfs_emit(buf, "%u\n", (unsigned int)data);
+ }
+}
+
+static int counter_find_enum(u32 *const enum_item, const u32 *const enums,
+ const size_t num_enums, const char *const buf,
+ const char *const string_array[])
+{
+ size_t index;
+
+ for (index = 0; index < num_enums; index++) {
+ *enum_item = enums[index];
+ if (sysfs_streq(buf, string_array[*enum_item]))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t counter_comp_u32_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_count *const count = a->parent;
+ struct counter_synapse *const synapse = a->comp.priv;
+ const struct counter_available *const avail = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ switch (a->comp.type) {
+ case COUNTER_COMP_FUNCTION:
+ err = counter_find_enum(&data, count->functions_list,
+ count->num_functions, buf,
+ counter_function_str);
+ break;
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ err = counter_find_enum(&data, synapse->actions_list,
+ synapse->num_actions, buf,
+ counter_synapse_action_str);
+ break;
+ case COUNTER_COMP_ENUM:
+ err = __sysfs_match_string(avail->strs, avail->num_items, buf);
+ data = err;
+ break;
+ case COUNTER_COMP_COUNT_MODE:
+ err = counter_find_enum(&data, avail->enums, avail->num_items,
+ buf, counter_count_mode_str);
+ break;
+ default:
+ err = kstrtou32(buf, 0, &data);
+ break;
+ }
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u32_write(counter, data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u32_write(counter, a->parent, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION)
+ err = a->comp.action_write(counter, count, synapse,
+ data);
+ else
+ err = a->comp.count_u32_write(counter, count, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t counter_comp_u64_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ int err;
+ u64 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u64_read(counter, &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u64_read(counter, a->parent, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u64_read(counter, a->parent, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)data);
+}
+
+static ssize_t counter_comp_u64_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ int err;
+ u64 data = 0;
+
+ err = kstrtou64(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u64_write(counter, data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u64_write(counter, a->parent, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u64_write(counter, a->parent, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t enums_available_show(const u32 *const enums,
+ const size_t num_enums,
+ const char *const strs[], char *buf)
+{
+ size_t len = 0;
+ size_t index;
+
+ for (index = 0; index < num_enums; index++)
+ len += sysfs_emit_at(buf, len, "%s\n", strs[enums[index]]);
+
+ return len;
+}
+
+static ssize_t strs_available_show(const struct counter_available *const avail,
+ char *buf)
+{
+ size_t len = 0;
+ size_t index;
+
+ for (index = 0; index < avail->num_items; index++)
+ len += sysfs_emit_at(buf, len, "%s\n", avail->strs[index]);
+
+ return len;
+}
+
+static ssize_t counter_comp_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ const struct counter_count *const count = a->parent;
+ const struct counter_synapse *const synapse = a->comp.priv;
+ const struct counter_available *const avail = a->comp.priv;
+
+ switch (a->comp.type) {
+ case COUNTER_COMP_FUNCTION:
+ return enums_available_show(count->functions_list,
+ count->num_functions,
+ counter_function_str, buf);
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ return enums_available_show(synapse->actions_list,
+ synapse->num_actions,
+ counter_synapse_action_str, buf);
+ case COUNTER_COMP_ENUM:
+ return strs_available_show(avail, buf);
+ case COUNTER_COMP_COUNT_MODE:
+ return enums_available_show(avail->enums, avail->num_items,
+ counter_count_mode_str, buf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int counter_avail_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const comp, void *const parent)
+{
+ struct counter_attribute *counter_attr;
+ struct device_attribute *dev_attr;
+
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp.type = comp->type;
+ counter_attr->comp.priv = comp->priv;
+ counter_attr->parent = parent;
+
+ /* Initialize sysfs attribute */
+ dev_attr = &counter_attr->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
+
+ /* Configure device attribute */
+ dev_attr->attr.name = devm_kasprintf(dev, GFP_KERNEL, "%s_available",
+ comp->name);
+ if (!dev_attr->attr.name)
+ return -ENOMEM;
+ dev_attr->attr.mode = 0444;
+ dev_attr->show = counter_comp_available_show;
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ return 0;
+}
+
+static int counter_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const comp,
+ const enum counter_scope scope,
+ void *const parent)
+{
+ struct counter_attribute *counter_attr;
+ struct device_attribute *dev_attr;
+
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp = *comp;
+ counter_attr->scope = scope;
+ counter_attr->parent = parent;
+
+ /* Configure device attribute */
+ dev_attr = &counter_attr->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = comp->name;
+ switch (comp->type) {
+ case COUNTER_COMP_U8:
+ case COUNTER_COMP_BOOL:
+ if (comp->device_u8_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_u8_show;
+ }
+ if (comp->device_u8_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_u8_store;
+ }
+ break;
+ case COUNTER_COMP_SIGNAL_LEVEL:
+ case COUNTER_COMP_FUNCTION:
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ case COUNTER_COMP_ENUM:
+ case COUNTER_COMP_COUNT_DIRECTION:
+ case COUNTER_COMP_COUNT_MODE:
+ if (comp->device_u32_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_u32_show;
+ }
+ if (comp->device_u32_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_u32_store;
+ }
+ break;
+ case COUNTER_COMP_U64:
+ if (comp->device_u64_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_u64_show;
+ }
+ if (comp->device_u64_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_u64_store;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ /* Create "*_available" attribute if needed */
+ switch (comp->type) {
+ case COUNTER_COMP_FUNCTION:
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ case COUNTER_COMP_ENUM:
+ case COUNTER_COMP_COUNT_MODE:
+ return counter_avail_attr_create(dev, group, comp, parent);
+ default:
+ return 0;
+ }
+}
+
+static ssize_t counter_comp_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", to_counter_attribute(attr)->comp.name);
+}
+
+static int counter_name_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const char *const name)
+{
+ struct counter_attribute *counter_attr;
+
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp.name = name;
+
+ /* Configure device attribute */
+ sysfs_attr_init(&counter_attr->dev_attr.attr);
+ counter_attr->dev_attr.attr.name = "name";
+ counter_attr->dev_attr.attr.mode = 0444;
+ counter_attr->dev_attr.show = counter_comp_name_show;
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ return 0;
+}
+
+static ssize_t counter_comp_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const size_t id = (size_t)to_counter_attribute(attr)->comp.priv;
+
+ return sysfs_emit(buf, "%zu\n", id);
+}
+
+static int counter_comp_id_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const char *name, const size_t id)
+{
+ struct counter_attribute *counter_attr;
+
+ /* Allocate Counter attribute */
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Generate component ID name */
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s_component_id", name);
+ if (!name)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp.priv = (void *)id;
+
+ /* Configure device attribute */
+ sysfs_attr_init(&counter_attr->dev_attr.attr);
+ counter_attr->dev_attr.attr.name = name;
+ counter_attr->dev_attr.attr.mode = 0444;
+ counter_attr->dev_attr.show = counter_comp_id_show;
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ return 0;
+}
+
+static struct counter_comp counter_signal_comp = {
+ .type = COUNTER_COMP_SIGNAL_LEVEL,
+ .name = "signal",
+};
+
+static int counter_signal_attrs_create(struct counter_device *const counter,
+ struct counter_attribute_group *const cattr_group,
+ struct counter_signal *const signal)
+{
+ const enum counter_scope scope = COUNTER_SCOPE_SIGNAL;
+ struct device *const dev = &counter->dev;
+ int err;
+ struct counter_comp comp;
+ size_t i;
+ struct counter_comp *ext;
+
+ /* Create main Signal attribute */
+ comp = counter_signal_comp;
+ comp.signal_u32_read = counter->ops->signal_read;
+ err = counter_attr_create(dev, cattr_group, &comp, scope, signal);
+ if (err < 0)
+ return err;
+
+ /* Create Signal name attribute */
+ err = counter_name_attr_create(dev, cattr_group, signal->name);
+ if (err < 0)
+ return err;
+
+ /* Create an attribute for each extension */
+ for (i = 0; i < signal->num_ext; i++) {
+ ext = &signal->ext[i];
+
+ err = counter_attr_create(dev, cattr_group, ext, scope, signal);
+ if (err < 0)
+ return err;
+
+ err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
+ i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_signals_add(struct counter_device *const counter,
+ struct counter_attribute_group *const groups)
+{
+ size_t i;
+ int err;
+
+ /* Add each Signal */
+ for (i = 0; i < counter->num_signals; i++) {
+ /* Generate Signal attribute directory name */
+ groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL,
+ "signal%zu", i);
+ if (!groups[i].name)
+ return -ENOMEM;
+
+ /* Create all attributes associated with Signal */
+ err = counter_signal_attrs_create(counter, groups + i,
+ counter->signals + i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_synapses_add(struct counter_device *const counter,
+ struct counter_attribute_group *const group,
+ struct counter_count *const count)
+{
+ size_t i;
+
+ /* Add each Synapse */
+ for (i = 0; i < count->num_synapses; i++) {
+ struct device *const dev = &counter->dev;
+ struct counter_synapse *synapse;
+ size_t id;
+ struct counter_comp comp;
+ int err;
+
+ synapse = count->synapses + i;
+
+ /* Generate Synapse action name */
+ id = synapse->signal - counter->signals;
+ comp.name = devm_kasprintf(dev, GFP_KERNEL, "signal%zu_action",
+ id);
+ if (!comp.name)
+ return -ENOMEM;
+
+ /* Create action attribute */
+ comp.type = COUNTER_COMP_SYNAPSE_ACTION;
+ comp.action_read = counter->ops->action_read;
+ comp.action_write = counter->ops->action_write;
+ comp.priv = synapse;
+ err = counter_attr_create(dev, group, &comp,
+ COUNTER_SCOPE_COUNT, count);
+ if (err < 0)
+ return err;
+
+ /* Create Synapse component ID attribute */
+ err = counter_comp_id_attr_create(dev, group, comp.name, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct counter_comp counter_count_comp =
+ COUNTER_COMP_COUNT_U64("count", NULL, NULL);
+
+static struct counter_comp counter_function_comp = {
+ .type = COUNTER_COMP_FUNCTION,
+ .name = "function",
+};
+
+static int counter_count_attrs_create(struct counter_device *const counter,
+ struct counter_attribute_group *const cattr_group,
+ struct counter_count *const count)
+{
+ const enum counter_scope scope = COUNTER_SCOPE_COUNT;
+ struct device *const dev = &counter->dev;
+ int err;
+ struct counter_comp comp;
+ size_t i;
+ struct counter_comp *ext;
+
+ /* Create main Count attribute */
+ comp = counter_count_comp;
+ comp.count_u64_read = counter->ops->count_read;
+ comp.count_u64_write = counter->ops->count_write;
+ err = counter_attr_create(dev, cattr_group, &comp, scope, count);
+ if (err < 0)
+ return err;
+
+ /* Create Count name attribute */
+ err = counter_name_attr_create(dev, cattr_group, count->name);
+ if (err < 0)
+ return err;
+
+ /* Create Count function attribute */
+ comp = counter_function_comp;
+ comp.count_u32_read = counter->ops->function_read;
+ comp.count_u32_write = counter->ops->function_write;
+ err = counter_attr_create(dev, cattr_group, &comp, scope, count);
+ if (err < 0)
+ return err;
+
+ /* Create an attribute for each extension */
+ for (i = 0; i < count->num_ext; i++) {
+ ext = &count->ext[i];
+
+ err = counter_attr_create(dev, cattr_group, ext, scope, count);
+ if (err < 0)
+ return err;
+
+ err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
+ i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_counts_add(struct counter_device *const counter,
+ struct counter_attribute_group *const groups)
+{
+ size_t i;
+ struct counter_count *count;
+ int err;
+
+ /* Add each Count */
+ for (i = 0; i < counter->num_counts; i++) {
+ count = counter->counts + i;
+
+ /* Generate Count attribute directory name */
+ groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL,
+ "count%zu", i);
+ if (!groups[i].name)
+ return -ENOMEM;
+
+ /* Add sysfs attributes of the Synapses */
+ err = counter_sysfs_synapses_add(counter, groups + i, count);
+ if (err < 0)
+ return err;
+
+ /* Create all attributes associated with Count */
+ err = counter_count_attrs_create(counter, groups + i, count);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_num_signals_read(struct counter_device *counter, u8 *val)
+{
+ *val = counter->num_signals;
+ return 0;
+}
+
+static int counter_num_counts_read(struct counter_device *counter, u8 *val)
+{
+ *val = counter->num_counts;
+ return 0;
+}
+
+static int counter_events_queue_size_read(struct counter_device *counter,
+ u64 *val)
+{
+ *val = kfifo_size(&counter->events);
+ return 0;
+}
+
+static int counter_events_queue_size_write(struct counter_device *counter,
+ u64 val)
+{
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ int err;
+ unsigned long flags;
+
+ /* Allocate new events queue */
+ err = kfifo_alloc(&events, val, GFP_KERNEL);
+ if (err)
+ return err;
+
+ /* Swap in new events queue */
+ mutex_lock(&counter->events_out_lock);
+ spin_lock_irqsave(&counter->events_in_lock, flags);
+ kfifo_free(&counter->events);
+ counter->events.kfifo = events.kfifo;
+ spin_unlock_irqrestore(&counter->events_in_lock, flags);
+ mutex_unlock(&counter->events_out_lock);
+
+ return 0;
+}
+
+static struct counter_comp counter_num_signals_comp =
+ COUNTER_COMP_DEVICE_U8("num_signals", counter_num_signals_read, NULL);
+
+static struct counter_comp counter_num_counts_comp =
+ COUNTER_COMP_DEVICE_U8("num_counts", counter_num_counts_read, NULL);
+
+static struct counter_comp counter_events_queue_size_comp =
+ COUNTER_COMP_DEVICE_U64("events_queue_size",
+ counter_events_queue_size_read,
+ counter_events_queue_size_write);
+
+static int counter_sysfs_attr_add(struct counter_device *const counter,
+ struct counter_attribute_group *cattr_group)
+{
+ const enum counter_scope scope = COUNTER_SCOPE_DEVICE;
+ struct device *const dev = &counter->dev;
+ int err;
+ size_t i;
+ struct counter_comp *ext;
+
+ /* Add Signals sysfs attributes */
+ err = counter_sysfs_signals_add(counter, cattr_group);
+ if (err < 0)
+ return err;
+ cattr_group += counter->num_signals;
+
+ /* Add Counts sysfs attributes */
+ err = counter_sysfs_counts_add(counter, cattr_group);
+ if (err < 0)
+ return err;
+ cattr_group += counter->num_counts;
+
+ /* Create name attribute */
+ err = counter_name_attr_create(dev, cattr_group, counter->name);
+ if (err < 0)
+ return err;
+
+ /* Create num_signals attribute */
+ err = counter_attr_create(dev, cattr_group, &counter_num_signals_comp,
+ scope, NULL);
+ if (err < 0)
+ return err;
+
+ /* Create num_counts attribute */
+ err = counter_attr_create(dev, cattr_group, &counter_num_counts_comp,
+ scope, NULL);
+ if (err < 0)
+ return err;
+
+ /* Create events_queue_size attribute */
+ err = counter_attr_create(dev, cattr_group,
+ &counter_events_queue_size_comp, scope, NULL);
+ if (err < 0)
+ return err;
+
+ /* Create an attribute for each extension */
+ for (i = 0; i < counter->num_ext; i++) {
+ ext = &counter->ext[i];
+
+ err = counter_attr_create(dev, cattr_group, ext, scope, NULL);
+ if (err < 0)
+ return err;
+
+ err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
+ i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * counter_sysfs_add - Adds Counter sysfs attributes to the device structure
+ * @counter: Pointer to the Counter device structure
+ *
+ * Counter sysfs attributes are created and added to the respective device
+ * structure for later registration to the system. Resource-managed memory
+ * allocation is performed by this function, and this memory should be freed
+ * when no longer needed (automatically by a device_unregister call, or
+ * manually by a devres_release_all call).
+ */
+int counter_sysfs_add(struct counter_device *const counter)
+{
+ struct device *const dev = &counter->dev;
+ const size_t num_groups = counter->num_signals + counter->num_counts + 1;
+ struct counter_attribute_group *cattr_groups;
+ size_t i, j;
+ int err;
+ struct attribute_group *groups;
+ struct counter_attribute *p;
+
+ /* Allocate space for attribute groups (signals, counts, and ext) */
+ cattr_groups = devm_kcalloc(dev, num_groups, sizeof(*cattr_groups),
+ GFP_KERNEL);
+ if (!cattr_groups)
+ return -ENOMEM;
+
+ /* Initialize attribute lists */
+ for (i = 0; i < num_groups; i++)
+ INIT_LIST_HEAD(&cattr_groups[i].attr_list);
+
+ /* Add Counter device sysfs attributes */
+ err = counter_sysfs_attr_add(counter, cattr_groups);
+ if (err < 0)
+ return err;
+
+ /* Allocate attribute group pointers for association with device */
+ dev->groups = devm_kcalloc(dev, num_groups + 1, sizeof(*dev->groups),
+ GFP_KERNEL);
+ if (!dev->groups)
+ return -ENOMEM;
+
+ /* Allocate space for attribute groups */
+ groups = devm_kcalloc(dev, num_groups, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ /* Prepare each group of attributes for association */
+ for (i = 0; i < num_groups; i++) {
+ groups[i].name = cattr_groups[i].name;
+
+ /* Allocate space for attribute pointers */
+ groups[i].attrs = devm_kcalloc(dev,
+ cattr_groups[i].num_attr + 1,
+ sizeof(*groups[i].attrs),
+ GFP_KERNEL);
+ if (!groups[i].attrs)
+ return -ENOMEM;
+
+ /* Add attribute pointers to attribute group */
+ j = 0;
+ list_for_each_entry(p, &cattr_groups[i].attr_list, l)
+ groups[i].attrs[j++] = &p->dev_attr.attr;
+
+ /* Associate attribute group */
+ dev->groups[i] = &groups[i];
+ }
+
+ return 0;
+}
diff --git a/drivers/counter/counter-sysfs.h b/drivers/counter/counter-sysfs.h
new file mode 100644
index 000000000000..14fe566aca0e
--- /dev/null
+++ b/drivers/counter/counter-sysfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter sysfs interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _COUNTER_SYSFS_H_
+#define _COUNTER_SYSFS_H_
+
+#include <linux/counter.h>
+
+int counter_sysfs_add(struct counter_device *const counter);
+
+#endif /* _COUNTER_SYSFS_H_ */
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
deleted file mode 100644
index de921e8a3f72..000000000000
--- a/drivers/counter/counter.c
+++ /dev/null
@@ -1,1496 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Generic Counter interface
- * Copyright (C) 2018 William Breathitt Gray
- */
-#include <linux/counter.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
-const char *const counter_count_direction_str[2] = {
- [COUNTER_COUNT_DIRECTION_FORWARD] = "forward",
- [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward"
-};
-EXPORT_SYMBOL_GPL(counter_count_direction_str);
-
-const char *const counter_count_mode_str[4] = {
- [COUNTER_COUNT_MODE_NORMAL] = "normal",
- [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
- [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
- [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
-};
-EXPORT_SYMBOL_GPL(counter_count_mode_str);
-
-ssize_t counter_signal_enum_read(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- char *buf)
-{
- const struct counter_signal_enum_ext *const e = priv;
- int err;
- size_t index;
-
- if (!e->get)
- return -EINVAL;
-
- err = e->get(counter, signal, &index);
- if (err)
- return err;
-
- if (index >= e->num_items)
- return -EINVAL;
-
- return sprintf(buf, "%s\n", e->items[index]);
-}
-EXPORT_SYMBOL_GPL(counter_signal_enum_read);
-
-ssize_t counter_signal_enum_write(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len)
-{
- const struct counter_signal_enum_ext *const e = priv;
- ssize_t index;
- int err;
-
- if (!e->set)
- return -EINVAL;
-
- index = __sysfs_match_string(e->items, e->num_items, buf);
- if (index < 0)
- return index;
-
- err = e->set(counter, signal, index);
- if (err)
- return err;
-
- return len;
-}
-EXPORT_SYMBOL_GPL(counter_signal_enum_write);
-
-ssize_t counter_signal_enum_available_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *priv, char *buf)
-{
- const struct counter_signal_enum_ext *const e = priv;
- size_t i;
- size_t len = 0;
-
- if (!e->num_items)
- return 0;
-
- for (i = 0; i < e->num_items; i++)
- len += sprintf(buf + len, "%s\n", e->items[i]);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(counter_signal_enum_available_read);
-
-ssize_t counter_count_enum_read(struct counter_device *counter,
- struct counter_count *count, void *priv,
- char *buf)
-{
- const struct counter_count_enum_ext *const e = priv;
- int err;
- size_t index;
-
- if (!e->get)
- return -EINVAL;
-
- err = e->get(counter, count, &index);
- if (err)
- return err;
-
- if (index >= e->num_items)
- return -EINVAL;
-
- return sprintf(buf, "%s\n", e->items[index]);
-}
-EXPORT_SYMBOL_GPL(counter_count_enum_read);
-
-ssize_t counter_count_enum_write(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len)
-{
- const struct counter_count_enum_ext *const e = priv;
- ssize_t index;
- int err;
-
- if (!e->set)
- return -EINVAL;
-
- index = __sysfs_match_string(e->items, e->num_items, buf);
- if (index < 0)
- return index;
-
- err = e->set(counter, count, index);
- if (err)
- return err;
-
- return len;
-}
-EXPORT_SYMBOL_GPL(counter_count_enum_write);
-
-ssize_t counter_count_enum_available_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf)
-{
- const struct counter_count_enum_ext *const e = priv;
- size_t i;
- size_t len = 0;
-
- if (!e->num_items)
- return 0;
-
- for (i = 0; i < e->num_items; i++)
- len += sprintf(buf + len, "%s\n", e->items[i]);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(counter_count_enum_available_read);
-
-ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
- char *buf)
-{
- const struct counter_device_enum_ext *const e = priv;
- int err;
- size_t index;
-
- if (!e->get)
- return -EINVAL;
-
- err = e->get(counter, &index);
- if (err)
- return err;
-
- if (index >= e->num_items)
- return -EINVAL;
-
- return sprintf(buf, "%s\n", e->items[index]);
-}
-EXPORT_SYMBOL_GPL(counter_device_enum_read);
-
-ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
- const char *buf, size_t len)
-{
- const struct counter_device_enum_ext *const e = priv;
- ssize_t index;
- int err;
-
- if (!e->set)
- return -EINVAL;
-
- index = __sysfs_match_string(e->items, e->num_items, buf);
- if (index < 0)
- return index;
-
- err = e->set(counter, index);
- if (err)
- return err;
-
- return len;
-}
-EXPORT_SYMBOL_GPL(counter_device_enum_write);
-
-ssize_t counter_device_enum_available_read(struct counter_device *counter,
- void *priv, char *buf)
-{
- const struct counter_device_enum_ext *const e = priv;
- size_t i;
- size_t len = 0;
-
- if (!e->num_items)
- return 0;
-
- for (i = 0; i < e->num_items; i++)
- len += sprintf(buf + len, "%s\n", e->items[i]);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(counter_device_enum_available_read);
-
-struct counter_attr_parm {
- struct counter_device_attr_group *group;
- const char *prefix;
- const char *name;
- ssize_t (*show)(struct device *dev, struct device_attribute *attr,
- char *buf);
- ssize_t (*store)(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
- void *component;
-};
-
-struct counter_device_attr {
- struct device_attribute dev_attr;
- struct list_head l;
- void *component;
-};
-
-static int counter_attribute_create(const struct counter_attr_parm *const parm)
-{
- struct counter_device_attr *counter_attr;
- struct device_attribute *dev_attr;
- int err;
- struct list_head *const attr_list = &parm->group->attr_list;
-
- /* Allocate a Counter device attribute */
- counter_attr = kzalloc(sizeof(*counter_attr), GFP_KERNEL);
- if (!counter_attr)
- return -ENOMEM;
- dev_attr = &counter_attr->dev_attr;
-
- sysfs_attr_init(&dev_attr->attr);
-
- /* Configure device attribute */
- dev_attr->attr.name = kasprintf(GFP_KERNEL, "%s%s", parm->prefix,
- parm->name);
- if (!dev_attr->attr.name) {
- err = -ENOMEM;
- goto err_free_counter_attr;
- }
- if (parm->show) {
- dev_attr->attr.mode |= 0444;
- dev_attr->show = parm->show;
- }
- if (parm->store) {
- dev_attr->attr.mode |= 0200;
- dev_attr->store = parm->store;
- }
-
- /* Store associated Counter component with attribute */
- counter_attr->component = parm->component;
-
- /* Keep track of the attribute for later cleanup */
- list_add(&counter_attr->l, attr_list);
- parm->group->num_attr++;
-
- return 0;
-
-err_free_counter_attr:
- kfree(counter_attr);
- return err;
-}
-
-#define to_counter_attr(_dev_attr) \
- container_of(_dev_attr, struct counter_device_attr, dev_attr)
-
-struct counter_signal_unit {
- struct counter_signal *signal;
-};
-
-static const char *const counter_signal_level_str[] = {
- [COUNTER_SIGNAL_LEVEL_LOW] = "low",
- [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
-};
-
-static ssize_t counter_signal_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct counter_device *const counter = dev_get_drvdata(dev);
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_signal_unit *const component = devattr->component;
- struct counter_signal *const signal = component->signal;
- int err;
- enum counter_signal_level level;
-
- err = counter->ops->signal_read(counter, signal, &level);
- if (err)
- return err;
-
- return sprintf(buf, "%s\n", counter_signal_level_str[level]);
-}
-
-struct counter_name_unit {
- const char *name;
-};
-
-static ssize_t counter_device_attr_name_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- const struct counter_name_unit *const comp = to_counter_attr(attr)->component;
-
- return sprintf(buf, "%s\n", comp->name);
-}
-
-static int counter_name_attribute_create(
- struct counter_device_attr_group *const group,
- const char *const name)
-{
- struct counter_name_unit *name_comp;
- struct counter_attr_parm parm;
- int err;
-
- /* Skip if no name */
- if (!name)
- return 0;
-
- /* Allocate name attribute component */
- name_comp = kmalloc(sizeof(*name_comp), GFP_KERNEL);
- if (!name_comp)
- return -ENOMEM;
- name_comp->name = name;
-
- /* Allocate Signal name attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = "name";
- parm.show = counter_device_attr_name_show;
- parm.store = NULL;
- parm.component = name_comp;
- err = counter_attribute_create(&parm);
- if (err)
- goto err_free_name_comp;
-
- return 0;
-
-err_free_name_comp:
- kfree(name_comp);
- return err;
-}
-
-struct counter_signal_ext_unit {
- struct counter_signal *signal;
- const struct counter_signal_ext *ext;
-};
-
-static ssize_t counter_signal_ext_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_signal_ext_unit *const comp = devattr->component;
- const struct counter_signal_ext *const ext = comp->ext;
-
- return ext->read(dev_get_drvdata(dev), comp->signal, ext->priv, buf);
-}
-
-static ssize_t counter_signal_ext_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_signal_ext_unit *const comp = devattr->component;
- const struct counter_signal_ext *const ext = comp->ext;
-
- return ext->write(dev_get_drvdata(dev), comp->signal, ext->priv, buf,
- len);
-}
-
-static void counter_device_attr_list_free(struct list_head *attr_list)
-{
- struct counter_device_attr *p, *n;
-
- list_for_each_entry_safe(p, n, attr_list, l) {
- /* free attribute name and associated component memory */
- kfree(p->dev_attr.attr.name);
- kfree(p->component);
- list_del(&p->l);
- kfree(p);
- }
-}
-
-static int counter_signal_ext_register(
- struct counter_device_attr_group *const group,
- struct counter_signal *const signal)
-{
- const size_t num_ext = signal->num_ext;
- size_t i;
- const struct counter_signal_ext *ext;
- struct counter_signal_ext_unit *signal_ext_comp;
- struct counter_attr_parm parm;
- int err;
-
- /* Create an attribute for each extension */
- for (i = 0 ; i < num_ext; i++) {
- ext = signal->ext + i;
-
- /* Allocate signal_ext attribute component */
- signal_ext_comp = kmalloc(sizeof(*signal_ext_comp), GFP_KERNEL);
- if (!signal_ext_comp) {
- err = -ENOMEM;
- goto err_free_attr_list;
- }
- signal_ext_comp->signal = signal;
- signal_ext_comp->ext = ext;
-
- /* Allocate a Counter device attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = ext->name;
- parm.show = (ext->read) ? counter_signal_ext_show : NULL;
- parm.store = (ext->write) ? counter_signal_ext_store : NULL;
- parm.component = signal_ext_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(signal_ext_comp);
- goto err_free_attr_list;
- }
- }
-
- return 0;
-
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-static int counter_signal_attributes_create(
- struct counter_device_attr_group *const group,
- const struct counter_device *const counter,
- struct counter_signal *const signal)
-{
- struct counter_signal_unit *signal_comp;
- struct counter_attr_parm parm;
- int err;
-
- /* Allocate Signal attribute component */
- signal_comp = kmalloc(sizeof(*signal_comp), GFP_KERNEL);
- if (!signal_comp)
- return -ENOMEM;
- signal_comp->signal = signal;
-
- /* Create main Signal attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = "signal";
- parm.show = (counter->ops->signal_read) ? counter_signal_show : NULL;
- parm.store = NULL;
- parm.component = signal_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(signal_comp);
- return err;
- }
-
- /* Create Signal name attribute */
- err = counter_name_attribute_create(group, signal->name);
- if (err)
- goto err_free_attr_list;
-
- /* Register Signal extension attributes */
- err = counter_signal_ext_register(group, signal);
- if (err)
- goto err_free_attr_list;
-
- return 0;
-
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-static int counter_signals_register(
- struct counter_device_attr_group *const groups_list,
- const struct counter_device *const counter)
-{
- const size_t num_signals = counter->num_signals;
- size_t i;
- struct counter_signal *signal;
- const char *name;
- int err;
-
- /* Register each Signal */
- for (i = 0; i < num_signals; i++) {
- signal = counter->signals + i;
-
- /* Generate Signal attribute directory name */
- name = kasprintf(GFP_KERNEL, "signal%d", signal->id);
- if (!name) {
- err = -ENOMEM;
- goto err_free_attr_groups;
- }
- groups_list[i].attr_group.name = name;
-
- /* Create all attributes associated with Signal */
- err = counter_signal_attributes_create(groups_list + i, counter,
- signal);
- if (err)
- goto err_free_attr_groups;
- }
-
- return 0;
-
-err_free_attr_groups:
- do {
- kfree(groups_list[i].attr_group.name);
- counter_device_attr_list_free(&groups_list[i].attr_list);
- } while (i--);
- return err;
-}
-
-static const char *const counter_synapse_action_str[] = {
- [COUNTER_SYNAPSE_ACTION_NONE] = "none",
- [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge",
- [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge",
- [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges"
-};
-
-struct counter_action_unit {
- struct counter_synapse *synapse;
- struct counter_count *count;
-};
-
-static ssize_t counter_action_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- int err;
- struct counter_device *const counter = dev_get_drvdata(dev);
- const struct counter_action_unit *const component = devattr->component;
- struct counter_count *const count = component->count;
- struct counter_synapse *const synapse = component->synapse;
- size_t action_index;
- enum counter_synapse_action action;
-
- err = counter->ops->action_get(counter, count, synapse, &action_index);
- if (err)
- return err;
-
- synapse->action = action_index;
-
- action = synapse->actions_list[action_index];
- return sprintf(buf, "%s\n", counter_synapse_action_str[action]);
-}
-
-static ssize_t counter_action_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_action_unit *const component = devattr->component;
- struct counter_synapse *const synapse = component->synapse;
- size_t action_index;
- const size_t num_actions = synapse->num_actions;
- enum counter_synapse_action action;
- int err;
- struct counter_device *const counter = dev_get_drvdata(dev);
- struct counter_count *const count = component->count;
-
- /* Find requested action mode */
- for (action_index = 0; action_index < num_actions; action_index++) {
- action = synapse->actions_list[action_index];
- if (sysfs_streq(buf, counter_synapse_action_str[action]))
- break;
- }
- /* If requested action mode not found */
- if (action_index >= num_actions)
- return -EINVAL;
-
- err = counter->ops->action_set(counter, count, synapse, action_index);
- if (err)
- return err;
-
- synapse->action = action_index;
-
- return len;
-}
-
-struct counter_action_avail_unit {
- const enum counter_synapse_action *actions_list;
- size_t num_actions;
-};
-
-static ssize_t counter_synapse_action_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_action_avail_unit *const component = devattr->component;
- size_t i;
- enum counter_synapse_action action;
- ssize_t len = 0;
-
- for (i = 0; i < component->num_actions; i++) {
- action = component->actions_list[i];
- len += sprintf(buf + len, "%s\n",
- counter_synapse_action_str[action]);
- }
-
- return len;
-}
-
-static int counter_synapses_register(
- struct counter_device_attr_group *const group,
- const struct counter_device *const counter,
- struct counter_count *const count, const char *const count_attr_name)
-{
- size_t i;
- struct counter_synapse *synapse;
- const char *prefix;
- struct counter_action_unit *action_comp;
- struct counter_attr_parm parm;
- int err;
- struct counter_action_avail_unit *avail_comp;
-
- /* Register each Synapse */
- for (i = 0; i < count->num_synapses; i++) {
- synapse = count->synapses + i;
-
- /* Generate attribute prefix */
- prefix = kasprintf(GFP_KERNEL, "signal%d_",
- synapse->signal->id);
- if (!prefix) {
- err = -ENOMEM;
- goto err_free_attr_list;
- }
-
- /* Allocate action attribute component */
- action_comp = kmalloc(sizeof(*action_comp), GFP_KERNEL);
- if (!action_comp) {
- err = -ENOMEM;
- goto err_free_prefix;
- }
- action_comp->synapse = synapse;
- action_comp->count = count;
-
- /* Create action attribute */
- parm.group = group;
- parm.prefix = prefix;
- parm.name = "action";
- parm.show = (counter->ops->action_get) ? counter_action_show : NULL;
- parm.store = (counter->ops->action_set) ? counter_action_store : NULL;
- parm.component = action_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(action_comp);
- goto err_free_prefix;
- }
-
- /* Allocate action available attribute component */
- avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL);
- if (!avail_comp) {
- err = -ENOMEM;
- goto err_free_prefix;
- }
- avail_comp->actions_list = synapse->actions_list;
- avail_comp->num_actions = synapse->num_actions;
-
- /* Create action_available attribute */
- parm.group = group;
- parm.prefix = prefix;
- parm.name = "action_available";
- parm.show = counter_synapse_action_available_show;
- parm.store = NULL;
- parm.component = avail_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(avail_comp);
- goto err_free_prefix;
- }
-
- kfree(prefix);
- }
-
- return 0;
-
-err_free_prefix:
- kfree(prefix);
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-struct counter_count_unit {
- struct counter_count *count;
-};
-
-static ssize_t counter_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct counter_device *const counter = dev_get_drvdata(dev);
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_count_unit *const component = devattr->component;
- struct counter_count *const count = component->count;
- int err;
- unsigned long val;
-
- err = counter->ops->count_read(counter, count, &val);
- if (err)
- return err;
-
- return sprintf(buf, "%lu\n", val);
-}
-
-static ssize_t counter_count_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct counter_device *const counter = dev_get_drvdata(dev);
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_count_unit *const component = devattr->component;
- struct counter_count *const count = component->count;
- int err;
- unsigned long val;
-
- err = kstrtoul(buf, 0, &val);
- if (err)
- return err;
-
- err = counter->ops->count_write(counter, count, val);
- if (err)
- return err;
-
- return len;
-}
-
-static const char *const counter_function_str[] = {
- [COUNTER_FUNCTION_INCREASE] = "increase",
- [COUNTER_FUNCTION_DECREASE] = "decrease",
- [COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
- [COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
- [COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
- [COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
- [COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
- [COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4"
-};
-
-static ssize_t counter_function_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int err;
- struct counter_device *const counter = dev_get_drvdata(dev);
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_count_unit *const component = devattr->component;
- struct counter_count *const count = component->count;
- size_t func_index;
- enum counter_function function;
-
- err = counter->ops->function_get(counter, count, &func_index);
- if (err)
- return err;
-
- count->function = func_index;
-
- function = count->functions_list[func_index];
- return sprintf(buf, "%s\n", counter_function_str[function]);
-}
-
-static ssize_t counter_function_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_count_unit *const component = devattr->component;
- struct counter_count *const count = component->count;
- const size_t num_functions = count->num_functions;
- size_t func_index;
- enum counter_function function;
- int err;
- struct counter_device *const counter = dev_get_drvdata(dev);
-
- /* Find requested Count function mode */
- for (func_index = 0; func_index < num_functions; func_index++) {
- function = count->functions_list[func_index];
- if (sysfs_streq(buf, counter_function_str[function]))
- break;
- }
- /* Return error if requested Count function mode not found */
- if (func_index >= num_functions)
- return -EINVAL;
-
- err = counter->ops->function_set(counter, count, func_index);
- if (err)
- return err;
-
- count->function = func_index;
-
- return len;
-}
-
-struct counter_count_ext_unit {
- struct counter_count *count;
- const struct counter_count_ext *ext;
-};
-
-static ssize_t counter_count_ext_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_count_ext_unit *const comp = devattr->component;
- const struct counter_count_ext *const ext = comp->ext;
-
- return ext->read(dev_get_drvdata(dev), comp->count, ext->priv, buf);
-}
-
-static ssize_t counter_count_ext_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_count_ext_unit *const comp = devattr->component;
- const struct counter_count_ext *const ext = comp->ext;
-
- return ext->write(dev_get_drvdata(dev), comp->count, ext->priv, buf,
- len);
-}
-
-static int counter_count_ext_register(
- struct counter_device_attr_group *const group,
- struct counter_count *const count)
-{
- size_t i;
- const struct counter_count_ext *ext;
- struct counter_count_ext_unit *count_ext_comp;
- struct counter_attr_parm parm;
- int err;
-
- /* Create an attribute for each extension */
- for (i = 0 ; i < count->num_ext; i++) {
- ext = count->ext + i;
-
- /* Allocate count_ext attribute component */
- count_ext_comp = kmalloc(sizeof(*count_ext_comp), GFP_KERNEL);
- if (!count_ext_comp) {
- err = -ENOMEM;
- goto err_free_attr_list;
- }
- count_ext_comp->count = count;
- count_ext_comp->ext = ext;
-
- /* Allocate count_ext attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = ext->name;
- parm.show = (ext->read) ? counter_count_ext_show : NULL;
- parm.store = (ext->write) ? counter_count_ext_store : NULL;
- parm.component = count_ext_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(count_ext_comp);
- goto err_free_attr_list;
- }
- }
-
- return 0;
-
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-struct counter_func_avail_unit {
- const enum counter_function *functions_list;
- size_t num_functions;
-};
-
-static ssize_t counter_function_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_func_avail_unit *const component = devattr->component;
- const enum counter_function *const func_list = component->functions_list;
- const size_t num_functions = component->num_functions;
- size_t i;
- enum counter_function function;
- ssize_t len = 0;
-
- for (i = 0; i < num_functions; i++) {
- function = func_list[i];
- len += sprintf(buf + len, "%s\n",
- counter_function_str[function]);
- }
-
- return len;
-}
-
-static int counter_count_attributes_create(
- struct counter_device_attr_group *const group,
- const struct counter_device *const counter,
- struct counter_count *const count)
-{
- struct counter_count_unit *count_comp;
- struct counter_attr_parm parm;
- int err;
- struct counter_count_unit *func_comp;
- struct counter_func_avail_unit *avail_comp;
-
- /* Allocate count attribute component */
- count_comp = kmalloc(sizeof(*count_comp), GFP_KERNEL);
- if (!count_comp)
- return -ENOMEM;
- count_comp->count = count;
-
- /* Create main Count attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = "count";
- parm.show = (counter->ops->count_read) ? counter_count_show : NULL;
- parm.store = (counter->ops->count_write) ? counter_count_store : NULL;
- parm.component = count_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(count_comp);
- return err;
- }
-
- /* Allocate function attribute component */
- func_comp = kmalloc(sizeof(*func_comp), GFP_KERNEL);
- if (!func_comp) {
- err = -ENOMEM;
- goto err_free_attr_list;
- }
- func_comp->count = count;
-
- /* Create Count function attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = "function";
- parm.show = (counter->ops->function_get) ? counter_function_show : NULL;
- parm.store = (counter->ops->function_set) ? counter_function_store : NULL;
- parm.component = func_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(func_comp);
- goto err_free_attr_list;
- }
-
- /* Allocate function available attribute component */
- avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL);
- if (!avail_comp) {
- err = -ENOMEM;
- goto err_free_attr_list;
- }
- avail_comp->functions_list = count->functions_list;
- avail_comp->num_functions = count->num_functions;
-
- /* Create Count function_available attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = "function_available";
- parm.show = counter_function_available_show;
- parm.store = NULL;
- parm.component = avail_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(avail_comp);
- goto err_free_attr_list;
- }
-
- /* Create Count name attribute */
- err = counter_name_attribute_create(group, count->name);
- if (err)
- goto err_free_attr_list;
-
- /* Register Count extension attributes */
- err = counter_count_ext_register(group, count);
- if (err)
- goto err_free_attr_list;
-
- return 0;
-
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-static int counter_counts_register(
- struct counter_device_attr_group *const groups_list,
- const struct counter_device *const counter)
-{
- size_t i;
- struct counter_count *count;
- const char *name;
- int err;
-
- /* Register each Count */
- for (i = 0; i < counter->num_counts; i++) {
- count = counter->counts + i;
-
- /* Generate Count attribute directory name */
- name = kasprintf(GFP_KERNEL, "count%d", count->id);
- if (!name) {
- err = -ENOMEM;
- goto err_free_attr_groups;
- }
- groups_list[i].attr_group.name = name;
-
- /* Register the Synapses associated with each Count */
- err = counter_synapses_register(groups_list + i, counter, count,
- name);
- if (err)
- goto err_free_attr_groups;
-
- /* Create all attributes associated with Count */
- err = counter_count_attributes_create(groups_list + i, counter,
- count);
- if (err)
- goto err_free_attr_groups;
- }
-
- return 0;
-
-err_free_attr_groups:
- do {
- kfree(groups_list[i].attr_group.name);
- counter_device_attr_list_free(&groups_list[i].attr_list);
- } while (i--);
- return err;
-}
-
-struct counter_size_unit {
- size_t size;
-};
-
-static ssize_t counter_device_attr_size_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- const struct counter_size_unit *const comp = to_counter_attr(attr)->component;
-
- return sprintf(buf, "%zu\n", comp->size);
-}
-
-static int counter_size_attribute_create(
- struct counter_device_attr_group *const group,
- const size_t size, const char *const name)
-{
- struct counter_size_unit *size_comp;
- struct counter_attr_parm parm;
- int err;
-
- /* Allocate size attribute component */
- size_comp = kmalloc(sizeof(*size_comp), GFP_KERNEL);
- if (!size_comp)
- return -ENOMEM;
- size_comp->size = size;
-
- parm.group = group;
- parm.prefix = "";
- parm.name = name;
- parm.show = counter_device_attr_size_show;
- parm.store = NULL;
- parm.component = size_comp;
- err = counter_attribute_create(&parm);
- if (err)
- goto err_free_size_comp;
-
- return 0;
-
-err_free_size_comp:
- kfree(size_comp);
- return err;
-}
-
-struct counter_ext_unit {
- const struct counter_device_ext *ext;
-};
-
-static ssize_t counter_device_ext_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_ext_unit *const component = devattr->component;
- const struct counter_device_ext *const ext = component->ext;
-
- return ext->read(dev_get_drvdata(dev), ext->priv, buf);
-}
-
-static ssize_t counter_device_ext_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- const struct counter_device_attr *const devattr = to_counter_attr(attr);
- const struct counter_ext_unit *const component = devattr->component;
- const struct counter_device_ext *const ext = component->ext;
-
- return ext->write(dev_get_drvdata(dev), ext->priv, buf, len);
-}
-
-static int counter_device_ext_register(
- struct counter_device_attr_group *const group,
- struct counter_device *const counter)
-{
- size_t i;
- struct counter_ext_unit *ext_comp;
- struct counter_attr_parm parm;
- int err;
-
- /* Create an attribute for each extension */
- for (i = 0 ; i < counter->num_ext; i++) {
- /* Allocate extension attribute component */
- ext_comp = kmalloc(sizeof(*ext_comp), GFP_KERNEL);
- if (!ext_comp) {
- err = -ENOMEM;
- goto err_free_attr_list;
- }
-
- ext_comp->ext = counter->ext + i;
-
- /* Allocate extension attribute */
- parm.group = group;
- parm.prefix = "";
- parm.name = counter->ext[i].name;
- parm.show = (counter->ext[i].read) ? counter_device_ext_show : NULL;
- parm.store = (counter->ext[i].write) ? counter_device_ext_store : NULL;
- parm.component = ext_comp;
- err = counter_attribute_create(&parm);
- if (err) {
- kfree(ext_comp);
- goto err_free_attr_list;
- }
- }
-
- return 0;
-
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-static int counter_global_attr_register(
- struct counter_device_attr_group *const group,
- struct counter_device *const counter)
-{
- int err;
-
- /* Create name attribute */
- err = counter_name_attribute_create(group, counter->name);
- if (err)
- return err;
-
- /* Create num_counts attribute */
- err = counter_size_attribute_create(group, counter->num_counts,
- "num_counts");
- if (err)
- goto err_free_attr_list;
-
- /* Create num_signals attribute */
- err = counter_size_attribute_create(group, counter->num_signals,
- "num_signals");
- if (err)
- goto err_free_attr_list;
-
- /* Register Counter device extension attributes */
- err = counter_device_ext_register(group, counter);
- if (err)
- goto err_free_attr_list;
-
- return 0;
-
-err_free_attr_list:
- counter_device_attr_list_free(&group->attr_list);
- return err;
-}
-
-static void counter_device_groups_list_free(
- struct counter_device_attr_group *const groups_list,
- const size_t num_groups)
-{
- struct counter_device_attr_group *group;
- size_t i;
-
- /* loop through all attribute groups (signals, counts, global, etc.) */
- for (i = 0; i < num_groups; i++) {
- group = groups_list + i;
-
- /* free all attribute group and associated attributes memory */
- kfree(group->attr_group.name);
- kfree(group->attr_group.attrs);
- counter_device_attr_list_free(&group->attr_list);
- }
-
- kfree(groups_list);
-}
-
-static int counter_device_groups_list_prepare(
- struct counter_device *const counter)
-{
- const size_t total_num_groups =
- counter->num_signals + counter->num_counts + 1;
- struct counter_device_attr_group *groups_list;
- size_t i;
- int err;
- size_t num_groups = 0;
-
- /* Allocate space for attribute groups (signals, counts, and ext) */
- groups_list = kcalloc(total_num_groups, sizeof(*groups_list),
- GFP_KERNEL);
- if (!groups_list)
- return -ENOMEM;
-
- /* Initialize attribute lists */
- for (i = 0; i < total_num_groups; i++)
- INIT_LIST_HEAD(&groups_list[i].attr_list);
-
- /* Register Signals */
- err = counter_signals_register(groups_list, counter);
- if (err)
- goto err_free_groups_list;
- num_groups += counter->num_signals;
-
- /* Register Counts and respective Synapses */
- err = counter_counts_register(groups_list + num_groups, counter);
- if (err)
- goto err_free_groups_list;
- num_groups += counter->num_counts;
-
- /* Register Counter global attributes */
- err = counter_global_attr_register(groups_list + num_groups, counter);
- if (err)
- goto err_free_groups_list;
- num_groups++;
-
- /* Store groups_list in device_state */
- counter->device_state->groups_list = groups_list;
- counter->device_state->num_groups = num_groups;
-
- return 0;
-
-err_free_groups_list:
- counter_device_groups_list_free(groups_list, num_groups);
- return err;
-}
-
-static int counter_device_groups_prepare(
- struct counter_device_state *const device_state)
-{
- size_t i, j;
- struct counter_device_attr_group *group;
- int err;
- struct counter_device_attr *p;
-
- /* Allocate attribute groups for association with device */
- device_state->groups = kcalloc(device_state->num_groups + 1,
- sizeof(*device_state->groups),
- GFP_KERNEL);
- if (!device_state->groups)
- return -ENOMEM;
-
- /* Prepare each group of attributes for association */
- for (i = 0; i < device_state->num_groups; i++) {
- group = device_state->groups_list + i;
-
- /* Allocate space for attribute pointers in attribute group */
- group->attr_group.attrs = kcalloc(group->num_attr + 1,
- sizeof(*group->attr_group.attrs), GFP_KERNEL);
- if (!group->attr_group.attrs) {
- err = -ENOMEM;
- goto err_free_groups;
- }
-
- /* Add attribute pointers to attribute group */
- j = 0;
- list_for_each_entry(p, &group->attr_list, l)
- group->attr_group.attrs[j++] = &p->dev_attr.attr;
-
- /* Group attributes in attribute group */
- device_state->groups[i] = &group->attr_group;
- }
- /* Associate attributes with device */
- device_state->dev.groups = device_state->groups;
-
- return 0;
-
-err_free_groups:
- do {
- group = device_state->groups_list + i;
- kfree(group->attr_group.attrs);
- group->attr_group.attrs = NULL;
- } while (i--);
- kfree(device_state->groups);
- return err;
-}
-
-/* Provides a unique ID for each counter device */
-static DEFINE_IDA(counter_ida);
-
-static void counter_device_release(struct device *dev)
-{
- struct counter_device *const counter = dev_get_drvdata(dev);
- struct counter_device_state *const device_state = counter->device_state;
-
- kfree(device_state->groups);
- counter_device_groups_list_free(device_state->groups_list,
- device_state->num_groups);
- ida_simple_remove(&counter_ida, device_state->id);
- kfree(device_state);
-}
-
-static struct device_type counter_device_type = {
- .name = "counter_device",
- .release = counter_device_release
-};
-
-static struct bus_type counter_bus_type = {
- .name = "counter"
-};
-
-/**
- * counter_register - register Counter to the system
- * @counter: pointer to Counter to register
- *
- * This function registers a Counter to the system. A sysfs "counter" directory
- * will be created and populated with sysfs attributes correlating with the
- * Counter Signals, Synapses, and Counts respectively.
- */
-int counter_register(struct counter_device *const counter)
-{
- struct counter_device_state *device_state;
- int err;
-
- /* Allocate internal state container for Counter device */
- device_state = kzalloc(sizeof(*device_state), GFP_KERNEL);
- if (!device_state)
- return -ENOMEM;
- counter->device_state = device_state;
-
- /* Acquire unique ID */
- device_state->id = ida_simple_get(&counter_ida, 0, 0, GFP_KERNEL);
- if (device_state->id < 0) {
- err = device_state->id;
- goto err_free_device_state;
- }
-
- /* Configure device structure for Counter */
- device_state->dev.type = &counter_device_type;
- device_state->dev.bus = &counter_bus_type;
- if (counter->parent) {
- device_state->dev.parent = counter->parent;
- device_state->dev.of_node = counter->parent->of_node;
- }
- dev_set_name(&device_state->dev, "counter%d", device_state->id);
- device_initialize(&device_state->dev);
- dev_set_drvdata(&device_state->dev, counter);
-
- /* Prepare device attributes */
- err = counter_device_groups_list_prepare(counter);
- if (err)
- goto err_free_id;
-
- /* Organize device attributes to groups and match to device */
- err = counter_device_groups_prepare(device_state);
- if (err)
- goto err_free_groups_list;
-
- /* Add device to system */
- err = device_add(&device_state->dev);
- if (err)
- goto err_free_groups;
-
- return 0;
-
-err_free_groups:
- kfree(device_state->groups);
-err_free_groups_list:
- counter_device_groups_list_free(device_state->groups_list,
- device_state->num_groups);
-err_free_id:
- ida_simple_remove(&counter_ida, device_state->id);
-err_free_device_state:
- kfree(device_state);
- return err;
-}
-EXPORT_SYMBOL_GPL(counter_register);
-
-/**
- * counter_unregister - unregister Counter from the system
- * @counter: pointer to Counter to unregister
- *
- * The Counter is unregistered from the system; all allocated memory is freed.
- */
-void counter_unregister(struct counter_device *const counter)
-{
- if (counter)
- device_del(&counter->device_state->dev);
-}
-EXPORT_SYMBOL_GPL(counter_unregister);
-
-static void devm_counter_unreg(struct device *dev, void *res)
-{
- counter_unregister(*(struct counter_device **)res);
-}
-
-/**
- * devm_counter_register - Resource-managed counter_register
- * @dev: device to allocate counter_device for
- * @counter: pointer to Counter to register
- *
- * Managed counter_register. The Counter registered with this function is
- * automatically unregistered on driver detach. This function calls
- * counter_register internally. Refer to that function for more information.
- *
- * If an Counter registered with this function needs to be unregistered
- * separately, devm_counter_unregister must be used.
- *
- * RETURNS:
- * 0 on success, negative error number on failure.
- */
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter)
-{
- struct counter_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_counter_unreg, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = counter_register(counter);
- if (!ret) {
- *ptr = counter;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_counter_register);
-
-static int devm_counter_match(struct device *dev, void *res, void *data)
-{
- struct counter_device **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
-/**
- * devm_counter_unregister - Resource-managed counter_unregister
- * @dev: device this counter_device belongs to
- * @counter: pointer to Counter associated with the device
- *
- * Unregister Counter registered with devm_counter_register.
- */
-void devm_counter_unregister(struct device *dev,
- struct counter_device *const counter)
-{
- int rc;
-
- rc = devres_release(dev, devm_counter_unreg, devm_counter_match,
- counter);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_counter_unregister);
-
-static int __init counter_init(void)
-{
- return bus_register(&counter_bus_type);
-}
-
-static void __exit counter_exit(void)
-{
- bus_unregister(&counter_bus_type);
-}
-
-subsys_initcall(counter_init);
-module_exit(counter_exit);
-
-MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
-MODULE_DESCRIPTION("Generic Counter interface");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 53c15f84909b..5ef0478709cd 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/counter.h>
#include <linux/bitfield.h>
+#include <linux/types.h>
#define FTM_FIELD_UPDATE(ftm, offset, mask, val) \
({ \
@@ -115,8 +116,7 @@ static void ftm_quaddec_disable(void *ftm)
}
static int ftm_quaddec_get_prescaler(struct counter_device *counter,
- struct counter_count *count,
- size_t *cnt_mode)
+ struct counter_count *count, u32 *cnt_mode)
{
struct ftm_quaddec *ftm = counter->priv;
uint32_t scflags;
@@ -129,8 +129,7 @@ static int ftm_quaddec_get_prescaler(struct counter_device *counter,
}
static int ftm_quaddec_set_prescaler(struct counter_device *counter,
- struct counter_count *count,
- size_t cnt_mode)
+ struct counter_count *count, u32 cnt_mode)
{
struct ftm_quaddec *ftm = counter->priv;
@@ -151,33 +150,17 @@ static const char * const ftm_quaddec_prescaler[] = {
"1", "2", "4", "8", "16", "32", "64", "128"
};
-static struct counter_count_enum_ext ftm_quaddec_prescaler_enum = {
- .items = ftm_quaddec_prescaler,
- .num_items = ARRAY_SIZE(ftm_quaddec_prescaler),
- .get = ftm_quaddec_get_prescaler,
- .set = ftm_quaddec_set_prescaler
-};
-
-enum ftm_quaddec_synapse_action {
- FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES,
-};
-
static const enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
- [FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES] =
COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
-enum ftm_quaddec_count_function {
- FTM_QUADDEC_COUNT_ENCODER_MODE_1,
-};
-
static const enum counter_function ftm_quaddec_count_functions[] = {
- [FTM_QUADDEC_COUNT_ENCODER_MODE_1] = COUNTER_FUNCTION_QUADRATURE_X4
+ COUNTER_FUNCTION_QUADRATURE_X4
};
static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
- unsigned long *val)
+ u64 *val)
{
struct ftm_quaddec *const ftm = counter->priv;
uint32_t cntval;
@@ -191,7 +174,7 @@ static int ftm_quaddec_count_read(struct counter_device *counter,
static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
- const unsigned long val)
+ const u64 val)
{
struct ftm_quaddec *const ftm = counter->priv;
@@ -205,21 +188,21 @@ static int ftm_quaddec_count_write(struct counter_device *counter,
return 0;
}
-static int ftm_quaddec_count_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int ftm_quaddec_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- *function = FTM_QUADDEC_COUNT_ENCODER_MODE_1;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
-static int ftm_quaddec_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int ftm_quaddec_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- *action = FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
}
@@ -227,8 +210,8 @@ static int ftm_quaddec_action_get(struct counter_device *counter,
static const struct counter_ops ftm_quaddec_cnt_ops = {
.count_read = ftm_quaddec_count_read,
.count_write = ftm_quaddec_count_write,
- .function_get = ftm_quaddec_count_function_get,
- .action_get = ftm_quaddec_action_get,
+ .function_read = ftm_quaddec_count_function_read,
+ .action_read = ftm_quaddec_action_read,
};
static struct counter_signal ftm_quaddec_signals[] = {
@@ -255,9 +238,12 @@ static struct counter_synapse ftm_quaddec_count_synapses[] = {
}
};
-static const struct counter_count_ext ftm_quaddec_count_ext[] = {
- COUNTER_COUNT_ENUM("prescaler", &ftm_quaddec_prescaler_enum),
- COUNTER_COUNT_ENUM_AVAILABLE("prescaler", &ftm_quaddec_prescaler_enum),
+static DEFINE_COUNTER_ENUM(ftm_quaddec_prescaler_enum, ftm_quaddec_prescaler);
+
+static struct counter_comp ftm_quaddec_count_ext[] = {
+ COUNTER_COMP_COUNT_ENUM("prescaler", ftm_quaddec_get_prescaler,
+ ftm_quaddec_set_prescaler,
+ ftm_quaddec_prescaler_enum),
};
static struct counter_count ftm_quaddec_counts = {
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index 8a6847d5fb2b..0924d16de6e2 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -62,13 +62,6 @@
#define INTEL_QEP_CLK_PERIOD_NS 10
-#define INTEL_QEP_COUNTER_EXT_RW(_name) \
-{ \
- .name = #_name, \
- .read = _name##_read, \
- .write = _name##_write, \
-}
-
struct intel_qep {
struct counter_device counter;
struct mutex lock;
@@ -114,8 +107,7 @@ static void intel_qep_init(struct intel_qep *qep)
}
static int intel_qep_count_read(struct counter_device *counter,
- struct counter_count *count,
- unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct intel_qep *const qep = counter->priv;
@@ -130,11 +122,11 @@ static const enum counter_function intel_qep_count_functions[] = {
COUNTER_FUNCTION_QUADRATURE_X4,
};
-static int intel_qep_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int intel_qep_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- *function = 0;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
@@ -143,19 +135,19 @@ static const enum counter_synapse_action intel_qep_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
-static int intel_qep_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int intel_qep_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- *action = 0;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
}
static const struct counter_ops intel_qep_counter_ops = {
.count_read = intel_qep_count_read,
- .function_get = intel_qep_function_get,
- .action_get = intel_qep_action_get,
+ .function_read = intel_qep_function_read,
+ .action_read = intel_qep_action_read,
};
#define INTEL_QEP_SIGNAL(_id, _name) { \
@@ -181,31 +173,27 @@ static struct counter_synapse intel_qep_count_synapses[] = {
INTEL_QEP_SYNAPSE(2),
};
-static ssize_t ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf)
+static int intel_qep_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *ceiling)
{
struct intel_qep *qep = counter->priv;
- u32 reg;
pm_runtime_get_sync(qep->dev);
- reg = intel_qep_readl(qep, INTEL_QEPMAX);
+ *ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
pm_runtime_put(qep->dev);
- return sysfs_emit(buf, "%u\n", reg);
+ return 0;
}
-static ssize_t ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *priv, const char *buf, size_t len)
+static int intel_qep_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, u64 max)
{
struct intel_qep *qep = counter->priv;
- u32 max;
- int ret;
+ int ret = 0;
- ret = kstrtou32(buf, 0, &max);
- if (ret < 0)
- return ret;
+ /* Intel QEP ceiling configuration only supports 32-bit values */
+ if (max != (u32)max)
+ return -ERANGE;
mutex_lock(&qep->lock);
if (qep->enabled) {
@@ -216,34 +204,28 @@ static ssize_t ceiling_write(struct counter_device *counter,
pm_runtime_get_sync(qep->dev);
intel_qep_writel(qep, INTEL_QEPMAX, max);
pm_runtime_put(qep->dev);
- ret = len;
out:
mutex_unlock(&qep->lock);
return ret;
}
-static ssize_t enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf)
+static int intel_qep_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
struct intel_qep *qep = counter->priv;
- return sysfs_emit(buf, "%u\n", qep->enabled);
+ *enable = qep->enabled;
+
+ return 0;
}
-static ssize_t enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *priv, const char *buf, size_t len)
+static int intel_qep_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 val)
{
struct intel_qep *qep = counter->priv;
u32 reg;
- bool val, changed;
- int ret;
-
- ret = kstrtobool(buf, &val);
- if (ret)
- return ret;
+ bool changed;
mutex_lock(&qep->lock);
changed = val ^ qep->enabled;
@@ -267,12 +249,12 @@ static ssize_t enable_write(struct counter_device *counter,
out:
mutex_unlock(&qep->lock);
- return len;
+ return 0;
}
-static ssize_t spike_filter_ns_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf)
+static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *length)
{
struct intel_qep *qep = counter->priv;
u32 reg;
@@ -281,33 +263,31 @@ static ssize_t spike_filter_ns_read(struct counter_device *counter,
reg = intel_qep_readl(qep, INTEL_QEPCON);
if (!(reg & INTEL_QEPCON_FLT_EN)) {
pm_runtime_put(qep->dev);
- return sysfs_emit(buf, "0\n");
+ return 0;
}
reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
pm_runtime_put(qep->dev);
- return sysfs_emit(buf, "%u\n", (reg + 2) * INTEL_QEP_CLK_PERIOD_NS);
+ *length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
+
+ return 0;
}
-static ssize_t spike_filter_ns_write(struct counter_device *counter,
- struct counter_count *count,
- void *priv, const char *buf, size_t len)
+static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 length)
{
struct intel_qep *qep = counter->priv;
- u32 reg, length;
+ u32 reg;
bool enable;
- int ret;
-
- ret = kstrtou32(buf, 0, &length);
- if (ret < 0)
- return ret;
+ int ret = 0;
/*
* Spike filter length is (MAX_COUNT + 2) clock periods.
* Disable filter when userspace writes 0, enable for valid
* nanoseconds values and error out otherwise.
*/
- length /= INTEL_QEP_CLK_PERIOD_NS;
+ do_div(length, INTEL_QEP_CLK_PERIOD_NS);
if (length == 0) {
enable = false;
length = 0;
@@ -336,16 +316,15 @@ static ssize_t spike_filter_ns_write(struct counter_device *counter,
intel_qep_writel(qep, INTEL_QEPFLT, length);
intel_qep_writel(qep, INTEL_QEPCON, reg);
pm_runtime_put(qep->dev);
- ret = len;
out:
mutex_unlock(&qep->lock);
return ret;
}
-static ssize_t preset_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf)
+static int intel_qep_preset_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ u8 *preset_enable)
{
struct intel_qep *qep = counter->priv;
u32 reg;
@@ -353,21 +332,18 @@ static ssize_t preset_enable_read(struct counter_device *counter,
pm_runtime_get_sync(qep->dev);
reg = intel_qep_readl(qep, INTEL_QEPCON);
pm_runtime_put(qep->dev);
- return sysfs_emit(buf, "%u\n", !(reg & INTEL_QEPCON_COUNT_RST_MODE));
+
+ *preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
+
+ return 0;
}
-static ssize_t preset_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *priv, const char *buf, size_t len)
+static int intel_qep_preset_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 val)
{
struct intel_qep *qep = counter->priv;
u32 reg;
- bool val;
- int ret;
-
- ret = kstrtobool(buf, &val);
- if (ret)
- return ret;
+ int ret = 0;
mutex_lock(&qep->lock);
if (qep->enabled) {
@@ -384,7 +360,6 @@ static ssize_t preset_enable_write(struct counter_device *counter,
intel_qep_writel(qep, INTEL_QEPCON, reg);
pm_runtime_put(qep->dev);
- ret = len;
out:
mutex_unlock(&qep->lock);
@@ -392,11 +367,14 @@ out:
return ret;
}
-static const struct counter_count_ext intel_qep_count_ext[] = {
- INTEL_QEP_COUNTER_EXT_RW(ceiling),
- INTEL_QEP_COUNTER_EXT_RW(enable),
- INTEL_QEP_COUNTER_EXT_RW(spike_filter_ns),
- INTEL_QEP_COUNTER_EXT_RW(preset_enable)
+static struct counter_comp intel_qep_count_ext[] = {
+ COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
+ COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
+ COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
+ intel_qep_preset_enable_write),
+ COUNTER_COMP_COUNT_U64("spike_filter_ns",
+ intel_qep_spike_filter_ns_read,
+ intel_qep_spike_filter_ns_write),
};
static struct counter_count intel_qep_counter_count[] = {
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 1de4243db488..8514a87fcbee 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#define INTERRUPT_CNT_NAME "interrupt-cnt"
@@ -33,30 +34,23 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static ssize_t interrupt_cnt_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int interrupt_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
struct interrupt_cnt_priv *priv = counter->priv;
- return sysfs_emit(buf, "%d\n", priv->enabled);
+ *enable = priv->enabled;
+
+ return 0;
}
-static ssize_t interrupt_cnt_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *private, const char *buf,
- size_t len)
+static int interrupt_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
struct interrupt_cnt_priv *priv = counter->priv;
- bool enable;
- ssize_t ret;
-
- ret = kstrtobool(buf, &enable);
- if (ret)
- return ret;
if (priv->enabled == enable)
- return len;
+ return 0;
if (enable) {
priv->enabled = true;
@@ -66,33 +60,30 @@ static ssize_t interrupt_cnt_enable_write(struct counter_device *counter,
priv->enabled = false;
}
- return len;
+ return 0;
}
-static const struct counter_count_ext interrupt_cnt_ext[] = {
- {
- .name = "enable",
- .read = interrupt_cnt_enable_read,
- .write = interrupt_cnt_enable_write,
- },
+static struct counter_comp interrupt_cnt_ext[] = {
+ COUNTER_COMP_ENABLE(interrupt_cnt_enable_read,
+ interrupt_cnt_enable_write),
};
static const enum counter_synapse_action interrupt_cnt_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
-static int interrupt_cnt_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int interrupt_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- *action = 0;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
}
static int interrupt_cnt_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct interrupt_cnt_priv *priv = counter->priv;
@@ -102,8 +93,7 @@ static int interrupt_cnt_read(struct counter_device *counter,
}
static int interrupt_cnt_write(struct counter_device *counter,
- struct counter_count *count,
- const unsigned long val)
+ struct counter_count *count, const u64 val)
{
struct interrupt_cnt_priv *priv = counter->priv;
@@ -119,11 +109,11 @@ static const enum counter_function interrupt_cnt_functions[] = {
COUNTER_FUNCTION_INCREASE,
};
-static int interrupt_cnt_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int interrupt_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- *function = 0;
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
}
@@ -148,10 +138,10 @@ static int interrupt_cnt_signal_read(struct counter_device *counter,
}
static const struct counter_ops interrupt_cnt_ops = {
- .action_get = interrupt_cnt_action_get,
+ .action_read = interrupt_cnt_action_read,
.count_read = interrupt_cnt_read,
.count_write = interrupt_cnt_write,
- .function_get = interrupt_cnt_function_get,
+ .function_read = interrupt_cnt_function_read,
.signal_read = interrupt_cnt_signal_read,
};
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 1aa70b9c4833..0ab1b2716784 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) 2020 Microchip
*
* Author: Kamel Bouhara <kamel.bouhara@bootlin.com>
@@ -32,28 +32,16 @@ struct mchp_tc_data {
bool trig_inverted;
};
-enum mchp_tc_count_function {
- MCHP_TC_FUNCTION_INCREASE,
- MCHP_TC_FUNCTION_QUADRATURE,
-};
-
static const enum counter_function mchp_tc_count_functions[] = {
- [MCHP_TC_FUNCTION_INCREASE] = COUNTER_FUNCTION_INCREASE,
- [MCHP_TC_FUNCTION_QUADRATURE] = COUNTER_FUNCTION_QUADRATURE_X4,
-};
-
-enum mchp_tc_synapse_action {
- MCHP_TC_SYNAPSE_ACTION_NONE = 0,
- MCHP_TC_SYNAPSE_ACTION_RISING_EDGE,
- MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE,
- MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
static const enum counter_synapse_action mchp_tc_synapse_actions[] = {
- [MCHP_TC_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [MCHP_TC_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- [MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
static struct counter_signal mchp_tc_count_signals[] = {
@@ -80,23 +68,23 @@ static struct counter_synapse mchp_tc_count_synapses[] = {
}
};
-static int mchp_tc_count_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int mchp_tc_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
struct mchp_tc_data *const priv = counter->priv;
if (priv->qdec_mode)
- *function = MCHP_TC_FUNCTION_QUADRATURE;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
else
- *function = MCHP_TC_FUNCTION_INCREASE;
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
}
-static int mchp_tc_count_function_set(struct counter_device *counter,
- struct counter_count *count,
- size_t function)
+static int mchp_tc_count_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
struct mchp_tc_data *const priv = counter->priv;
u32 bmr, cmr;
@@ -108,7 +96,7 @@ static int mchp_tc_count_function_set(struct counter_device *counter,
cmr &= ~ATMEL_TC_WAVE;
switch (function) {
- case MCHP_TC_FUNCTION_INCREASE:
+ case COUNTER_FUNCTION_INCREASE:
priv->qdec_mode = 0;
/* Set highest rate based on whether soc has gclk or not */
bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
@@ -120,7 +108,7 @@ static int mchp_tc_count_function_set(struct counter_device *counter,
cmr |= ATMEL_TC_CMR_MASK;
cmr &= ~(ATMEL_TC_ABETRG | ATMEL_TC_XC0);
break;
- case MCHP_TC_FUNCTION_QUADRATURE:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
if (!priv->tc_cfg->has_qdec)
return -EINVAL;
/* In QDEC mode settings both channels 0 and 1 are required */
@@ -176,10 +164,10 @@ static int mchp_tc_count_signal_read(struct counter_device *counter,
return 0;
}
-static int mchp_tc_count_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int mchp_tc_count_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
struct mchp_tc_data *const priv = counter->priv;
u32 cmr;
@@ -188,26 +176,26 @@ static int mchp_tc_count_action_get(struct counter_device *counter,
switch (cmr & ATMEL_TC_ETRGEDG) {
default:
- *action = MCHP_TC_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
break;
case ATMEL_TC_ETRGEDG_RISING:
- *action = MCHP_TC_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
break;
case ATMEL_TC_ETRGEDG_FALLING:
- *action = MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
break;
case ATMEL_TC_ETRGEDG_BOTH:
- *action = MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
break;
}
return 0;
}
-static int mchp_tc_count_action_set(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t action)
+static int mchp_tc_count_action_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action)
{
struct mchp_tc_data *const priv = counter->priv;
u32 edge = ATMEL_TC_ETRGEDG_NONE;
@@ -217,16 +205,16 @@ static int mchp_tc_count_action_set(struct counter_device *counter,
return -EINVAL;
switch (action) {
- case MCHP_TC_SYNAPSE_ACTION_NONE:
+ case COUNTER_SYNAPSE_ACTION_NONE:
edge = ATMEL_TC_ETRGEDG_NONE;
break;
- case MCHP_TC_SYNAPSE_ACTION_RISING_EDGE:
+ case COUNTER_SYNAPSE_ACTION_RISING_EDGE:
edge = ATMEL_TC_ETRGEDG_RISING;
break;
- case MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE:
+ case COUNTER_SYNAPSE_ACTION_FALLING_EDGE:
edge = ATMEL_TC_ETRGEDG_FALLING;
break;
- case MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE:
+ case COUNTER_SYNAPSE_ACTION_BOTH_EDGES:
edge = ATMEL_TC_ETRGEDG_BOTH;
break;
default:
@@ -240,8 +228,7 @@ static int mchp_tc_count_action_set(struct counter_device *counter,
}
static int mchp_tc_count_read(struct counter_device *counter,
- struct counter_count *count,
- unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct mchp_tc_data *const priv = counter->priv;
u32 cnt;
@@ -264,12 +251,12 @@ static struct counter_count mchp_tc_counts[] = {
};
static const struct counter_ops mchp_tc_ops = {
- .signal_read = mchp_tc_count_signal_read,
- .count_read = mchp_tc_count_read,
- .function_get = mchp_tc_count_function_get,
- .function_set = mchp_tc_count_function_set,
- .action_get = mchp_tc_count_action_get,
- .action_set = mchp_tc_count_action_set
+ .signal_read = mchp_tc_count_signal_read,
+ .count_read = mchp_tc_count_read,
+ .function_read = mchp_tc_count_function_read,
+ .function_write = mchp_tc_count_function_write,
+ .action_read = mchp_tc_count_action_read,
+ .action_write = mchp_tc_count_action_write
};
static const struct atmel_tcb_config tcb_rm9200_config = {
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 13656957c45f..5168833b1fdf 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
struct stm32_lptim_cnt {
struct counter_device counter;
@@ -107,11 +108,7 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
}
-/**
- * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes
- * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
- * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
- *
+/*
* In non-quadrature mode, device counts up on active edge.
* In quadrature mode, encoder counting scenarios are as follows:
* +---------+----------+--------------------+--------------------+
@@ -129,33 +126,20 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
* | edges | Low -> | Up | Down | Down | Up |
* +---------+----------+----------+---------+----------+---------+
*/
-enum stm32_lptim_cnt_function {
- STM32_LPTIM_COUNTER_INCREASE,
- STM32_LPTIM_ENCODER_BOTH_EDGE,
-};
-
static const enum counter_function stm32_lptim_cnt_functions[] = {
- [STM32_LPTIM_COUNTER_INCREASE] = COUNTER_FUNCTION_INCREASE,
- [STM32_LPTIM_ENCODER_BOTH_EDGE] = COUNTER_FUNCTION_QUADRATURE_X4,
-};
-
-enum stm32_lptim_synapse_action {
- STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE,
- STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE,
- STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES,
- STM32_LPTIM_SYNAPSE_ACTION_NONE,
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
- /* Index must match with stm32_lptim_cnt_polarity[] (priv->polarity) */
- [STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- [STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
- [STM32_LPTIM_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ COUNTER_SYNAPSE_ACTION_NONE,
};
static int stm32_lptim_cnt_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct stm32_lptim_cnt *const priv = counter->priv;
u32 cnt;
@@ -170,28 +154,28 @@ static int stm32_lptim_cnt_read(struct counter_device *counter,
return 0;
}
-static int stm32_lptim_cnt_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int stm32_lptim_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
struct stm32_lptim_cnt *const priv = counter->priv;
if (!priv->quadrature_mode) {
- *function = STM32_LPTIM_COUNTER_INCREASE;
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
}
- if (priv->polarity == STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES) {
- *function = STM32_LPTIM_ENCODER_BOTH_EDGE;
+ if (priv->polarity == STM32_LPTIM_CKPOL_BOTH_EDGES) {
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
return -EINVAL;
}
-static int stm32_lptim_cnt_function_set(struct counter_device *counter,
- struct counter_count *count,
- size_t function)
+static int stm32_lptim_cnt_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
struct stm32_lptim_cnt *const priv = counter->priv;
@@ -199,12 +183,12 @@ static int stm32_lptim_cnt_function_set(struct counter_device *counter,
return -EBUSY;
switch (function) {
- case STM32_LPTIM_COUNTER_INCREASE:
+ case COUNTER_FUNCTION_INCREASE:
priv->quadrature_mode = 0;
return 0;
- case STM32_LPTIM_ENCODER_BOTH_EDGE:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
priv->quadrature_mode = 1;
- priv->polarity = STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES;
+ priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
@@ -212,9 +196,9 @@ static int stm32_lptim_cnt_function_set(struct counter_device *counter,
}
}
-static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_lptim_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ u8 *enable)
{
struct stm32_lptim_cnt *const priv = counter->priv;
int ret;
@@ -223,22 +207,18 @@ static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%u\n", ret);
+ *enable = ret;
+
+ return 0;
}
-static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_lptim_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ u8 enable)
{
struct stm32_lptim_cnt *const priv = counter->priv;
- bool enable;
int ret;
- ret = kstrtobool(buf, &enable);
- if (ret)
- return ret;
-
/* Check nobody uses the timer, or already disabled/enabled */
ret = stm32_lptim_is_enabled(priv);
if ((ret < 0) || (!ret && !enable))
@@ -254,78 +234,81 @@ static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter,
if (ret)
return ret;
- return len;
+ return 0;
}
-static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *ceiling)
{
struct stm32_lptim_cnt *const priv = counter->priv;
- return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling);
+ *ceiling = priv->ceiling;
+
+ return 0;
}
-static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 ceiling)
{
struct stm32_lptim_cnt *const priv = counter->priv;
- unsigned int ceiling;
- int ret;
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
- ret = kstrtouint(buf, 0, &ceiling);
- if (ret)
- return ret;
-
if (ceiling > STM32_LPTIM_MAX_ARR)
return -ERANGE;
priv->ceiling = ceiling;
- return len;
+ return 0;
}
-static const struct counter_count_ext stm32_lptim_cnt_ext[] = {
- {
- .name = "enable",
- .read = stm32_lptim_cnt_enable_read,
- .write = stm32_lptim_cnt_enable_write
- },
- {
- .name = "ceiling",
- .read = stm32_lptim_cnt_ceiling_read,
- .write = stm32_lptim_cnt_ceiling_write
- },
+static struct counter_comp stm32_lptim_cnt_ext[] = {
+ COUNTER_COMP_ENABLE(stm32_lptim_cnt_enable_read,
+ stm32_lptim_cnt_enable_write),
+ COUNTER_COMP_CEILING(stm32_lptim_cnt_ceiling_read,
+ stm32_lptim_cnt_ceiling_write),
};
-static int stm32_lptim_cnt_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int stm32_lptim_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
struct stm32_lptim_cnt *const priv = counter->priv;
- size_t function;
+ enum counter_function function;
int err;
- err = stm32_lptim_cnt_function_get(counter, count, &function);
+ err = stm32_lptim_cnt_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
- case STM32_LPTIM_COUNTER_INCREASE:
+ case COUNTER_FUNCTION_INCREASE:
/* LP Timer acts as up-counter on input 1 */
- if (synapse->signal->id == count->synapses[0].signal->id)
- *action = priv->polarity;
- else
- *action = STM32_LPTIM_SYNAPSE_ACTION_NONE;
- return 0;
- case STM32_LPTIM_ENCODER_BOTH_EDGE:
- *action = priv->polarity;
+ if (synapse->signal->id != count->synapses[0].signal->id) {
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+ return 0;
+ }
+
+ switch (priv->polarity) {
+ case STM32_LPTIM_CKPOL_RISING_EDGE:
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ return 0;
+ case STM32_LPTIM_CKPOL_FALLING_EDGE:
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
+ return 0;
+ case STM32_LPTIM_CKPOL_BOTH_EDGES:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
@@ -333,43 +316,48 @@ static int stm32_lptim_cnt_action_get(struct counter_device *counter,
}
}
-static int stm32_lptim_cnt_action_set(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t action)
+static int stm32_lptim_cnt_action_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action)
{
struct stm32_lptim_cnt *const priv = counter->priv;
- size_t function;
+ enum counter_function function;
int err;
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
- err = stm32_lptim_cnt_function_get(counter, count, &function);
+ err = stm32_lptim_cnt_function_read(counter, count, &function);
if (err)
return err;
/* only set polarity when in counter mode (on input 1) */
- if (function == STM32_LPTIM_COUNTER_INCREASE
- && synapse->signal->id == count->synapses[0].signal->id) {
- switch (action) {
- case STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE:
- case STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE:
- case STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES:
- priv->polarity = action;
- return 0;
- }
- }
+ if (function != COUNTER_FUNCTION_INCREASE
+ || synapse->signal->id != count->synapses[0].signal->id)
+ return -EINVAL;
- return -EINVAL;
+ switch (action) {
+ case COUNTER_SYNAPSE_ACTION_RISING_EDGE:
+ priv->polarity = STM32_LPTIM_CKPOL_RISING_EDGE;
+ return 0;
+ case COUNTER_SYNAPSE_ACTION_FALLING_EDGE:
+ priv->polarity = STM32_LPTIM_CKPOL_FALLING_EDGE;
+ return 0;
+ case COUNTER_SYNAPSE_ACTION_BOTH_EDGES:
+ priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static const struct counter_ops stm32_lptim_cnt_ops = {
.count_read = stm32_lptim_cnt_read,
- .function_get = stm32_lptim_cnt_function_get,
- .function_set = stm32_lptim_cnt_function_set,
- .action_get = stm32_lptim_cnt_action_get,
- .action_set = stm32_lptim_cnt_action_set,
+ .function_read = stm32_lptim_cnt_function_read,
+ .function_write = stm32_lptim_cnt_function_write,
+ .action_read = stm32_lptim_cnt_action_read,
+ .action_write = stm32_lptim_cnt_action_write,
};
static struct counter_signal stm32_lptim_cnt_signals[] = {
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 3fb0debd7425..0546e932db0c 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#define TIM_CCMR_CCXS (BIT(8) | BIT(0))
#define TIM_CCMR_MASK (TIM_CCMR_CC1S | TIM_CCMR_CC2S | \
@@ -36,29 +37,15 @@ struct stm32_timer_cnt {
struct stm32_timer_regs bak;
};
-/**
- * enum stm32_count_function - enumerates stm32 timer counter encoder modes
- * @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
- * @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
- * @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
- * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
- */
-enum stm32_count_function {
- STM32_COUNT_SLAVE_MODE_DISABLED,
- STM32_COUNT_ENCODER_MODE_1,
- STM32_COUNT_ENCODER_MODE_2,
- STM32_COUNT_ENCODER_MODE_3,
-};
-
static const enum counter_function stm32_count_functions[] = {
- [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_FUNCTION_INCREASE,
- [STM32_COUNT_ENCODER_MODE_1] = COUNTER_FUNCTION_QUADRATURE_X2_A,
- [STM32_COUNT_ENCODER_MODE_2] = COUNTER_FUNCTION_QUADRATURE_X2_B,
- [STM32_COUNT_ENCODER_MODE_3] = COUNTER_FUNCTION_QUADRATURE_X4,
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_B,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
static int stm32_count_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 cnt;
@@ -70,8 +57,7 @@ static int stm32_count_read(struct counter_device *counter,
}
static int stm32_count_write(struct counter_device *counter,
- struct counter_count *count,
- const unsigned long val)
+ struct counter_count *count, const u64 val)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 ceiling;
@@ -83,9 +69,9 @@ static int stm32_count_write(struct counter_device *counter,
return regmap_write(priv->regmap, TIM_CNT, val);
}
-static int stm32_count_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int stm32_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 smcr;
@@ -93,42 +79,42 @@ static int stm32_count_function_get(struct counter_device *counter,
regmap_read(priv->regmap, TIM_SMCR, &smcr);
switch (smcr & TIM_SMCR_SMS) {
- case 0:
- *function = STM32_COUNT_SLAVE_MODE_DISABLED;
+ case TIM_SMCR_SMS_SLAVE_MODE_DISABLED:
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
- case 1:
- *function = STM32_COUNT_ENCODER_MODE_1;
+ case TIM_SMCR_SMS_ENCODER_MODE_1:
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
return 0;
- case 2:
- *function = STM32_COUNT_ENCODER_MODE_2;
+ case TIM_SMCR_SMS_ENCODER_MODE_2:
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_B;
return 0;
- case 3:
- *function = STM32_COUNT_ENCODER_MODE_3;
+ case TIM_SMCR_SMS_ENCODER_MODE_3:
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
default:
return -EINVAL;
}
}
-static int stm32_count_function_set(struct counter_device *counter,
- struct counter_count *count,
- size_t function)
+static int stm32_count_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 cr1, sms;
switch (function) {
- case STM32_COUNT_SLAVE_MODE_DISABLED:
- sms = 0;
+ case COUNTER_FUNCTION_INCREASE:
+ sms = TIM_SMCR_SMS_SLAVE_MODE_DISABLED;
break;
- case STM32_COUNT_ENCODER_MODE_1:
- sms = 1;
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
+ sms = TIM_SMCR_SMS_ENCODER_MODE_1;
break;
- case STM32_COUNT_ENCODER_MODE_2:
- sms = 2;
+ case COUNTER_FUNCTION_QUADRATURE_X2_B:
+ sms = TIM_SMCR_SMS_ENCODER_MODE_2;
break;
- case STM32_COUNT_ENCODER_MODE_3:
- sms = 3;
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ sms = TIM_SMCR_SMS_ENCODER_MODE_3;
break;
default:
return -EINVAL;
@@ -150,44 +136,37 @@ static int stm32_count_function_set(struct counter_device *counter,
return 0;
}
-static ssize_t stm32_count_direction_read(struct counter_device *counter,
+static int stm32_count_direction_read(struct counter_device *counter,
struct counter_count *count,
- void *private, char *buf)
+ enum counter_count_direction *direction)
{
struct stm32_timer_cnt *const priv = counter->priv;
- const char *direction;
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
- direction = (cr1 & TIM_CR1_DIR) ? "backward" : "forward";
+ *direction = (cr1 & TIM_CR1_DIR) ? COUNTER_COUNT_DIRECTION_BACKWARD :
+ COUNTER_COUNT_DIRECTION_FORWARD;
- return scnprintf(buf, PAGE_SIZE, "%s\n", direction);
+ return 0;
}
-static ssize_t stm32_count_ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *ceiling)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 arr;
regmap_read(priv->regmap, TIM_ARR, &arr);
- return snprintf(buf, PAGE_SIZE, "%u\n", arr);
+ *ceiling = arr;
+
+ return 0;
}
-static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, u64 ceiling)
{
struct stm32_timer_cnt *const priv = counter->priv;
- unsigned int ceiling;
- int ret;
-
- ret = kstrtouint(buf, 0, &ceiling);
- if (ret)
- return ret;
if (ceiling > priv->max_arr)
return -ERANGE;
@@ -196,34 +175,27 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_write(priv->regmap, TIM_ARR, ceiling);
- return len;
+ return 0;
}
-static ssize_t stm32_count_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
- return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)(cr1 & TIM_CR1_CEN));
+ *enable = cr1 & TIM_CR1_CEN;
+
+ return 0;
}
-static ssize_t stm32_count_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
struct stm32_timer_cnt *const priv = counter->priv;
- int err;
u32 cr1;
- bool enable;
-
- err = kstrtobool(buf, &enable);
- if (err)
- return err;
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
@@ -242,70 +214,55 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter,
/* Keep enabled state to properly handle low power states */
priv->enabled = enable;
- return len;
+ return 0;
}
-static const struct counter_count_ext stm32_count_ext[] = {
- {
- .name = "direction",
- .read = stm32_count_direction_read,
- },
- {
- .name = "enable",
- .read = stm32_count_enable_read,
- .write = stm32_count_enable_write
- },
- {
- .name = "ceiling",
- .read = stm32_count_ceiling_read,
- .write = stm32_count_ceiling_write
- },
-};
-
-enum stm32_synapse_action {
- STM32_SYNAPSE_ACTION_NONE,
- STM32_SYNAPSE_ACTION_BOTH_EDGES
+static struct counter_comp stm32_count_ext[] = {
+ COUNTER_COMP_DIRECTION(stm32_count_direction_read),
+ COUNTER_COMP_ENABLE(stm32_count_enable_read, stm32_count_enable_write),
+ COUNTER_COMP_CEILING(stm32_count_ceiling_read,
+ stm32_count_ceiling_write),
};
static const enum counter_synapse_action stm32_synapse_actions[] = {
- [STM32_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [STM32_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
-static int stm32_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int stm32_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- size_t function;
+ enum counter_function function;
int err;
- err = stm32_count_function_get(counter, count, &function);
+ err = stm32_count_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
- case STM32_COUNT_SLAVE_MODE_DISABLED:
+ case COUNTER_FUNCTION_INCREASE:
/* counts on internal clock when CEN=1 */
- *action = STM32_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
- case STM32_COUNT_ENCODER_MODE_1:
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
if (synapse->signal->id == count->synapses[0].signal->id)
- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
- *action = STM32_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
- case STM32_COUNT_ENCODER_MODE_2:
+ case COUNTER_FUNCTION_QUADRATURE_X2_B:
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
if (synapse->signal->id == count->synapses[1].signal->id)
- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
- *action = STM32_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
- case STM32_COUNT_ENCODER_MODE_3:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
/* counts up/down on both TI1FP1 and TI2FP2 edges */
- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
return -EINVAL;
@@ -315,9 +272,9 @@ static int stm32_action_get(struct counter_device *counter,
static const struct counter_ops stm32_timer_cnt_ops = {
.count_read = stm32_count_read,
.count_write = stm32_count_write,
- .function_get = stm32_count_function_get,
- .function_set = stm32_count_function_set,
- .action_get = stm32_action_get,
+ .function_read = stm32_count_function_read,
+ .function_write = stm32_count_function_write,
+ .action_read = stm32_action_read,
};
static struct counter_signal stm32_signals[] = {
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 94fe58bb3eab..09817c953f9a 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/types.h>
/* 32-bit registers */
#define QPOSCNT 0x0
@@ -73,19 +74,13 @@ enum {
};
/* Position Counter Input Modes */
-enum {
+enum ti_eqep_count_func {
TI_EQEP_COUNT_FUNC_QUAD_COUNT,
TI_EQEP_COUNT_FUNC_DIR_COUNT,
TI_EQEP_COUNT_FUNC_UP_COUNT,
TI_EQEP_COUNT_FUNC_DOWN_COUNT,
};
-enum {
- TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES,
- TI_EQEP_SYNAPSE_ACTION_RISING_EDGE,
- TI_EQEP_SYNAPSE_ACTION_NONE,
-};
-
struct ti_eqep_cnt {
struct counter_device counter;
struct regmap *regmap32;
@@ -93,7 +88,7 @@ struct ti_eqep_cnt {
};
static int ti_eqep_count_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
struct ti_eqep_cnt *priv = counter->priv;
u32 cnt;
@@ -105,7 +100,7 @@ static int ti_eqep_count_read(struct counter_device *counter,
}
static int ti_eqep_count_write(struct counter_device *counter,
- struct counter_count *count, unsigned long val)
+ struct counter_count *count, u64 val)
{
struct ti_eqep_cnt *priv = counter->priv;
u32 max;
@@ -117,64 +112,100 @@ static int ti_eqep_count_write(struct counter_device *counter,
return regmap_write(priv->regmap32, QPOSCNT, val);
}
-static int ti_eqep_function_get(struct counter_device *counter,
- struct counter_count *count, size_t *function)
+static int ti_eqep_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
struct ti_eqep_cnt *priv = counter->priv;
u32 qdecctl;
regmap_read(priv->regmap16, QDECCTL, &qdecctl);
- *function = (qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT;
+
+ switch ((qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT) {
+ case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
+ break;
+ case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ *function = COUNTER_FUNCTION_PULSE_DIRECTION;
+ break;
+ case TI_EQEP_COUNT_FUNC_UP_COUNT:
+ *function = COUNTER_FUNCTION_INCREASE;
+ break;
+ case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ *function = COUNTER_FUNCTION_DECREASE;
+ break;
+ }
return 0;
}
-static int ti_eqep_function_set(struct counter_device *counter,
- struct counter_count *count, size_t function)
+static int ti_eqep_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
struct ti_eqep_cnt *priv = counter->priv;
+ enum ti_eqep_count_func qsrc;
+
+ switch (function) {
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ qsrc = TI_EQEP_COUNT_FUNC_QUAD_COUNT;
+ break;
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
+ qsrc = TI_EQEP_COUNT_FUNC_DIR_COUNT;
+ break;
+ case COUNTER_FUNCTION_INCREASE:
+ qsrc = TI_EQEP_COUNT_FUNC_UP_COUNT;
+ break;
+ case COUNTER_FUNCTION_DECREASE:
+ qsrc = TI_EQEP_COUNT_FUNC_DOWN_COUNT;
+ break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC,
- function << QDECCTL_QSRC_SHIFT);
+ qsrc << QDECCTL_QSRC_SHIFT);
}
-static int ti_eqep_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t *action)
+static int ti_eqep_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
struct ti_eqep_cnt *priv = counter->priv;
- size_t function;
+ enum counter_function function;
u32 qdecctl;
int err;
- err = ti_eqep_function_get(counter, count, &function);
+ err = ti_eqep_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
- case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
/* In quadrature mode, the rising and falling edge of both
* QEPA and QEPB trigger QCLK.
*/
- *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
- case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
/* In direction-count mode only rising edge of QEPA is counted
* and QEPB gives direction.
*/
switch (synapse->signal->id) {
case TI_EQEP_SIGNAL_QEPA:
- *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
case TI_EQEP_SIGNAL_QEPB:
- *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
- case TI_EQEP_COUNT_FUNC_UP_COUNT:
- case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ case COUNTER_FUNCTION_INCREASE:
+ case COUNTER_FUNCTION_DECREASE:
/* In up/down-count modes only QEPA is counted and QEPB is not
* used.
*/
@@ -185,12 +216,12 @@ static int ti_eqep_action_get(struct counter_device *counter,
return err;
if (qdecctl & QDECCTL_XCR)
- *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
- *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
case TI_EQEP_SIGNAL_QEPB:
- *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
default:
/* should never reach this path */
@@ -205,82 +236,67 @@ static int ti_eqep_action_get(struct counter_device *counter,
static const struct counter_ops ti_eqep_counter_ops = {
.count_read = ti_eqep_count_read,
.count_write = ti_eqep_count_write,
- .function_get = ti_eqep_function_get,
- .function_set = ti_eqep_function_set,
- .action_get = ti_eqep_action_get,
+ .function_read = ti_eqep_function_read,
+ .function_write = ti_eqep_function_write,
+ .action_read = ti_eqep_action_read,
};
-static ssize_t ti_eqep_position_ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
+static int ti_eqep_position_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *ceiling)
{
struct ti_eqep_cnt *priv = counter->priv;
u32 qposmax;
regmap_read(priv->regmap32, QPOSMAX, &qposmax);
- return sprintf(buf, "%u\n", qposmax);
+ *ceiling = qposmax;
+
+ return 0;
}
-static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
+static int ti_eqep_position_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 ceiling)
{
struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
+ if (ceiling != (u32)ceiling)
+ return -ERANGE;
- regmap_write(priv->regmap32, QPOSMAX, res);
+ regmap_write(priv->regmap32, QPOSMAX, ceiling);
- return len;
+ return 0;
}
-static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
+static int ti_eqep_position_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
struct ti_eqep_cnt *priv = counter->priv;
u32 qepctl;
regmap_read(priv->regmap16, QEPCTL, &qepctl);
- return sprintf(buf, "%u\n", !!(qepctl & QEPCTL_PHEN));
+ *enable = !!(qepctl & QEPCTL_PHEN);
+
+ return 0;
}
-static ssize_t ti_eqep_position_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
+static int ti_eqep_position_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
struct ti_eqep_cnt *priv = counter->priv;
- int err;
- bool res;
-
- err = kstrtobool(buf, &res);
- if (err < 0)
- return err;
- regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, res ? -1 : 0);
+ regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0);
- return len;
+ return 0;
}
-static struct counter_count_ext ti_eqep_position_ext[] = {
- {
- .name = "ceiling",
- .read = ti_eqep_position_ceiling_read,
- .write = ti_eqep_position_ceiling_write,
- },
- {
- .name = "enable",
- .read = ti_eqep_position_enable_read,
- .write = ti_eqep_position_enable_write,
- },
+static struct counter_comp ti_eqep_position_ext[] = {
+ COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read,
+ ti_eqep_position_ceiling_write),
+ COUNTER_COMP_ENABLE(ti_eqep_position_enable_read,
+ ti_eqep_position_enable_write),
};
static struct counter_signal ti_eqep_signals[] = {
@@ -295,16 +311,16 @@ static struct counter_signal ti_eqep_signals[] = {
};
static const enum counter_function ti_eqep_position_functions[] = {
- [TI_EQEP_COUNT_FUNC_QUAD_COUNT] = COUNTER_FUNCTION_QUADRATURE_X4,
- [TI_EQEP_COUNT_FUNC_DIR_COUNT] = COUNTER_FUNCTION_PULSE_DIRECTION,
- [TI_EQEP_COUNT_FUNC_UP_COUNT] = COUNTER_FUNCTION_INCREASE,
- [TI_EQEP_COUNT_FUNC_DOWN_COUNT] = COUNTER_FUNCTION_DECREASE,
+ COUNTER_FUNCTION_QUADRATURE_X4,
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_DECREASE,
};
static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = {
- [TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
- [TI_EQEP_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [TI_EQEP_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_NONE,
};
static struct counter_synapse ti_eqep_position_synapses[] = {
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 28467d83c745..3d514b82d055 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -470,7 +470,8 @@ static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
if (policy->cached_target_freq == target_freq)
index = policy->cached_resolved_idx;
else
- index = cpufreq_table_find_index_dl(policy, target_freq);
+ index = cpufreq_table_find_index_dl(policy, target_freq,
+ false);
entry = &policy->freq_table[index];
next_freq = entry->frequency;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index d0b10baf039a..6448e03bcf48 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -91,7 +91,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int index;
index = cpufreq_table_find_index_h(policy,
- policy->cur - 1);
+ policy->cur - 1,
+ relation & CPUFREQ_RELATION_E);
freq_next = policy->freq_table[index].frequency;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index d4c27022b9c9..db17196266e4 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -741,8 +741,6 @@ static int __init cppc_cpufreq_init(void)
if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
- INIT_LIST_HEAD(&cpu_data_list);
-
cppc_check_hisi_workaround();
cppc_freq_invariance_init();
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5782b15a8caa..e338d2f010fe 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -554,7 +554,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L);
+ return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2260,8 +2260,16 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
return 0;
- if (cpufreq_driver->target)
+ if (cpufreq_driver->target) {
+ /*
+ * If the driver hasn't setup a single inefficient frequency,
+ * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
+ */
+ if (!policy->efficiencies_available)
+ relation &= ~CPUFREQ_RELATION_E;
+
return cpufreq_driver->target(policy, target_freq, relation);
+ }
if (!cpufreq_driver->target_index)
return -EINVAL;
@@ -2523,8 +2531,15 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (ret)
return ret;
+ /*
+ * Resolve policy min/max to available frequencies. It ensures
+ * no frequency resolution will neither overshoot the requested maximum
+ * nor undershoot the requested minimum.
+ */
policy->min = new_data.min;
policy->max = new_data.max;
+ policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
+ policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index aa39ff31ec9f..0879ec3c170c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -111,7 +111,8 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
if (requested_freq > policy->max)
requested_freq = policy->max;
- __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, requested_freq,
+ CPUFREQ_RELATION_HE);
dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -134,7 +135,8 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
else
requested_freq = policy->min;
- __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, requested_freq,
+ CPUFREQ_RELATION_LE);
dbs_info->requested_freq = requested_freq;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index eb4320b619c9..3b8f924771b4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -83,9 +83,11 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
- index = cpufreq_table_find_index_h(policy, freq_avg);
+ index = cpufreq_table_find_index_h(policy, freq_avg,
+ relation & CPUFREQ_RELATION_E);
freq_lo = freq_table[index].frequency;
- index = cpufreq_table_find_index_l(policy, freq_avg);
+ index = cpufreq_table_find_index_l(policy, freq_avg,
+ relation & CPUFREQ_RELATION_E);
freq_hi = freq_table[index].frequency;
/* Find out how long we have to be in hi and lo freqs */
@@ -118,12 +120,12 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
if (od_tuners->powersave_bias)
freq = od_ops.powersave_bias_target(policy, freq,
- CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_HE);
else if (policy->cur == policy->max)
return;
__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
- CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE);
}
/*
@@ -161,9 +163,9 @@ static void od_update(struct cpufreq_policy *policy)
if (od_tuners->powersave_bias)
freq_next = od_ops.powersave_bias_target(policy,
freq_next,
- CPUFREQ_RELATION_L);
+ CPUFREQ_RELATION_LE);
- __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE);
}
}
@@ -182,7 +184,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy)
*/
if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
__cpufreq_driver_target(policy, dbs_info->freq_lo,
- CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_HE);
return dbs_info->freq_lo_delay_us;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8c176b7dae41..349ddbaef796 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -32,6 +32,7 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
+#include "../drivers/thermal/intel/thermal_interrupt.h"
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
@@ -219,6 +220,7 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -257,6 +259,7 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+ struct delayed_work hwp_notify_work;
};
static struct cpudata **all_cpu_data;
@@ -537,7 +540,8 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
* scaling factor is too high, so recompute it to make the HWP_CAP
* highest performance correspond to the maximum turbo frequency.
*/
- if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
+ if (turbo_freq < cpu->pstate.turbo_freq) {
cpu->pstate.turbo_freq = turbo_freq;
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
cpu->pstate.scaling = scaling;
@@ -985,11 +989,15 @@ skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
+static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
+
static void intel_pstate_hwp_offline(struct cpudata *cpu)
{
u64 value = READ_ONCE(cpu->hwp_req_cached);
int min_perf;
+ intel_pstate_disable_hwp_interrupt(cpu);
+
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* In case the EPP has been set to "performance" by the
@@ -1053,6 +1061,9 @@ static int intel_pstate_suspend(struct cpufreq_policy *policy)
cpu->suspended = true;
+ /* disable HWP interrupt and cancel any pending work */
+ intel_pstate_disable_hwp_interrupt(cpu);
+
return 0;
}
@@ -1546,15 +1557,105 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
/************************** sysfs end ************************/
+static void intel_pstate_notify_work(struct work_struct *work)
+{
+ struct cpudata *cpudata =
+ container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
+
+ cpufreq_update_policy(cpudata->cpu);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+}
+
+static DEFINE_SPINLOCK(hwp_notify_lock);
+static cpumask_t hwp_intr_enable_mask;
+
+void notify_hwp_interrupt(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ struct cpudata *cpudata;
+ unsigned long flags;
+ u64 value;
+
+ if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ return;
+
+ rdmsrl_safe(MSR_HWP_STATUS, &value);
+ if (!(value & 0x01))
+ return;
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+
+ if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
+ goto ack_intr;
+
+ /*
+ * Currently we never free all_cpu_data. And we can't reach here
+ * without this allocated. But for safety for future changes, added
+ * check.
+ */
+ if (unlikely(!READ_ONCE(all_cpu_data)))
+ goto ack_intr;
+
+ /*
+ * The free is done during cleanup, when cpufreq registry is failed.
+ * We wouldn't be here if it fails on init or switch status. But for
+ * future changes, added check.
+ */
+ cpudata = READ_ONCE(all_cpu_data[this_cpu]);
+ if (unlikely(!cpudata))
+ goto ack_intr;
+
+ schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ return;
+
+ack_intr:
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+}
+
+static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+{
+ unsigned long flags;
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+ if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
+ cancel_delayed_work(&cpudata->hwp_notify_work);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+}
+
+static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+{
+ /* Enable HWP notification interrupt for guaranteed performance change */
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+ INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
+ cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+ }
+}
+
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
- /* First disable HWP notification interrupt as we don't process them */
+ /* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+
+ intel_pstate_enable_hwp_interrupt(cpudata);
}
static int atom_get_min_pstate(void)
@@ -2266,7 +2367,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
if (!cpu)
return -ENOMEM;
- all_cpu_data[cpunum] = cpu;
+ WRITE_ONCE(all_cpu_data[cpunum], cpu);
cpu->cpu = cpunum;
@@ -2929,8 +3030,10 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
+ spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
- all_cpu_data[cpu] = NULL;
+ WRITE_ONCE(all_cpu_data[cpu], NULL);
+ spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3199,6 +3302,7 @@ static bool intel_pstate_hwp_is_enabled(void)
static int __init intel_pstate_init(void)
{
+ static struct cpudata **_all_cpu_data;
const struct x86_cpu_id *id;
int rc;
@@ -3224,7 +3328,7 @@ static int __init intel_pstate_init(void)
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- hwp_active++;
+ WRITE_ONCE(hwp_active, 1);
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
@@ -3275,10 +3379,12 @@ hwp_cpu_matched:
pr_info("Intel P-state driver initializing\n");
- all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
- if (!all_cpu_data)
+ _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
+ if (!_all_cpu_data)
return -ENOMEM;
+ WRITE_ONCE(all_cpu_data, _all_cpu_data);
+
intel_pstate_request_control_from_smm();
intel_pstate_sysfs_expose_params();
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 0cf18dd46b92..8ddbd0c5ce37 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -109,7 +109,7 @@ static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
struct mtk_cpufreq_data *data = policy->driver_data;
unsigned int index;
- index = cpufreq_table_find_index_dl(policy, target_freq);
+ index = cpufreq_table_find_index_dl(policy, target_freq, false);
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 5a2cf5f91ccb..fddbd1ea1635 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -934,7 +934,7 @@ static void powernv_cpufreq_work_fn(struct work_struct *work)
policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
- index = cpufreq_table_find_index_c(policy, policy->cur);
+ index = cpufreq_table_find_index_c(policy, policy->cur, false);
powernv_cpufreq_target_index(policy, index);
cpumask_andnot(&mask, &mask, policy->cpus);
cpufreq_cpu_put(policy);
@@ -1022,7 +1022,7 @@ static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
int index;
struct powernv_smp_call_data freq_data;
- index = cpufreq_table_find_index_dl(policy, target_freq);
+ index = cpufreq_table_find_index_dl(policy, target_freq, false);
freq_data.pstate_id = powernv_freqs[index].driver_data;
freq_data.gpstate_id = powernv_freqs[index].driver_data;
set_pstate(&freq_data);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 148e8aedefa9..2011fb9c03a4 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -173,12 +173,14 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
case 6:
camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
+ fallthrough;
case 3:
clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
break;
case 8:
camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
+ fallthrough;
case 4:
clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
break;
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index ad7d4f272ddc..76c888ed8d16 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -243,7 +243,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
- priv_index = cpufreq_table_find_index_h(policy, old_freq);
+ priv_index = cpufreq_table_find_index_h(policy, old_freq, false);
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 5d1943e787b0..6c88827f4e62 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -159,6 +159,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
table = ERR_PTR(err);
goto free;
}
+ if (msg.rx.ret) {
+ table = ERR_PTR(-EINVAL);
+ goto free;
+ }
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index a9620e4489ae..ac381db25dbe 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -242,7 +242,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
- if (cl >= data->num_clusters)
+ if (cl >= data->num_clusters || !data->tables[cl])
return -EINVAL;
/* set same policy for all cpus in a cluster */
@@ -310,6 +310,12 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return ERR_PTR(err);
+ if (msg.rx.ret == -BPMP_EINVAL) {
+ /* Cluster not available */
+ return NULL;
+ }
+ if (msg.rx.ret)
+ return ERR_PTR(-EINVAL);
/*
* Make sure frequency table step is a multiple of mdiv to match
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 334f83e56120..15d6c46c0a47 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -99,7 +99,7 @@ config ARM_MVEBU_V7_CPUIDLE
config ARM_TEGRA_CPUIDLE
bool "CPU Idle Driver for NVIDIA Tegra SoCs"
- depends on ARCH_TEGRA && !ARM64
+ depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND
help
@@ -112,6 +112,7 @@ config ARM_QCOM_SPM_CPUIDLE
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
select QCOM_SCM
+ select QCOM_SPM
help
Select this to enable cpuidle for Qualcomm processors.
The Subsystem Power Manager (SPM) controls low power modes for the
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index c0e7971da2da..01e77913a414 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -18,158 +18,18 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
+#include <soc/qcom/spm.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"
-#define MAX_PMIC_DATA 2
-#define MAX_SEQ_DATA 64
-#define SPM_CTL_INDEX 0x7f
-#define SPM_CTL_INDEX_SHIFT 4
-#define SPM_CTL_EN BIT(0)
-
-enum pm_sleep_mode {
- PM_SLEEP_MODE_STBY,
- PM_SLEEP_MODE_RET,
- PM_SLEEP_MODE_SPC,
- PM_SLEEP_MODE_PC,
- PM_SLEEP_MODE_NR,
-};
-
-enum spm_reg {
- SPM_REG_CFG,
- SPM_REG_SPM_CTL,
- SPM_REG_DLY,
- SPM_REG_PMIC_DLY,
- SPM_REG_PMIC_DATA_0,
- SPM_REG_PMIC_DATA_1,
- SPM_REG_VCTL,
- SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
- SPM_REG_PMIC_STS,
- SPM_REG_NR,
-};
-
-struct spm_reg_data {
- const u8 *reg_offset;
- u32 spm_cfg;
- u32 spm_dly;
- u32 pmic_dly;
- u32 pmic_data[MAX_PMIC_DATA];
- u8 seq[MAX_SEQ_DATA];
- u8 start_index[PM_SLEEP_MODE_NR];
-};
-
-struct spm_driver_data {
+struct cpuidle_qcom_spm_data {
struct cpuidle_driver cpuidle_driver;
- void __iomem *reg_base;
- const struct spm_reg_data *reg_data;
-};
-
-static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x30,
- [SPM_REG_DLY] = 0x34,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8974, 8084 */
-static const struct spm_reg_data spm_reg_8974_8084_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x1,
- .spm_dly = 0x3C102800,
- .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
- 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
- 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 3,
+ struct spm_driver_data *spm;
};
-/* SPM register data for 8226 */
-static const struct spm_reg_data spm_reg_8226_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x0,
- .spm_dly = 0x3C102800,
- .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
- 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
- 0x80, 0x10, 0x26, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 5,
-};
-
-static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x20,
- [SPM_REG_PMIC_DLY] = 0x24,
- [SPM_REG_PMIC_DATA_0] = 0x28,
- [SPM_REG_PMIC_DATA_1] = 0x2C,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8064 */
-static const struct spm_reg_data spm_reg_8064_cpu = {
- .reg_offset = spm_reg_offset_v1_1,
- .spm_cfg = 0x1F,
- .pmic_dly = 0x02020004,
- .pmic_data[0] = 0x0084009C,
- .pmic_data[1] = 0x00A4001C,
- .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
- 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 2,
-};
-
-static inline void spm_register_write(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- if (drv->reg_data->reg_offset[reg])
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
-}
-
-/* Ensure a guaranteed write, before return */
-static inline void spm_register_write_sync(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- u32 ret;
-
- if (!drv->reg_data->reg_offset[reg])
- return;
-
- do {
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- ret = readl_relaxed(drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- if (ret == val)
- break;
- cpu_relax();
- } while (1);
-}
-
-static inline u32 spm_register_read(struct spm_driver_data *drv,
- enum spm_reg reg)
-{
- return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
-}
-
-static void spm_set_low_power_mode(struct spm_driver_data *drv,
- enum pm_sleep_mode mode)
-{
- u32 start_index;
- u32 ctl_val;
-
- start_index = drv->reg_data->start_index[mode];
-
- ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
- ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
- ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
- ctl_val |= SPM_CTL_EN;
- spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
-}
-
static int qcom_pm_collapse(unsigned long int unused)
{
qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
@@ -201,10 +61,10 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
static int spm_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- struct spm_driver_data *data = container_of(drv, struct spm_driver_data,
- cpuidle_driver);
+ struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
+ cpuidle_driver);
- return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data->spm);
}
static struct cpuidle_driver qcom_spm_idle_driver = {
@@ -225,134 +85,92 @@ static const struct of_device_id qcom_idle_state_match[] = {
{ },
};
-static int spm_cpuidle_init(struct cpuidle_driver *drv, int cpu)
+static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
+ struct platform_device *pdev = NULL;
+ struct device_node *cpu_node, *saw_node;
+ struct cpuidle_qcom_spm_data *data = NULL;
int ret;
- memcpy(drv, &qcom_spm_idle_driver, sizeof(*drv));
- drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
- /* Parse idle states from device tree */
- ret = dt_init_idle_driver(drv, qcom_idle_state_match, 1);
- if (ret <= 0)
- return ret ? : -ENODEV;
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ if (!saw_node)
+ return -ENODEV;
- /* We have atleast one power down mode */
- return qcom_scm_set_warm_boot_addr(cpu_resume_arm, drv->cpumask);
-}
+ pdev = of_find_device_by_node(saw_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+ if (!pdev)
+ return -ENODEV;
-static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
- int *spm_cpu)
-{
- struct spm_driver_data *drv = NULL;
- struct device_node *cpu_node, *saw_node;
- int cpu;
- bool found = 0;
+ data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_node = of_cpu_device_node_get(cpu);
- if (!cpu_node)
- continue;
- saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
- found = (saw_node == pdev->dev.of_node);
- of_node_put(saw_node);
- of_node_put(cpu_node);
- if (found)
- break;
- }
+ data->spm = dev_get_drvdata(&pdev->dev);
+ if (!data->spm)
+ return -EINVAL;
- if (found) {
- drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
- if (drv)
- *spm_cpu = cpu;
- }
+ data->cpuidle_driver = qcom_spm_idle_driver;
+ data->cpuidle_driver.cpumask = (struct cpumask *)cpumask_of(cpu);
- return drv;
-}
+ ret = dt_init_idle_driver(&data->cpuidle_driver,
+ qcom_idle_state_match, 1);
+ if (ret <= 0)
+ return ret ? : -ENODEV;
-static const struct of_device_id spm_match_table[] = {
- { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
- .data = &spm_reg_8226_cpu },
- { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
- .data = &spm_reg_8064_cpu },
- { },
-};
+ ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ return cpuidle_register(&data->cpuidle_driver, NULL);
+}
-static int spm_dev_probe(struct platform_device *pdev)
+static int spm_cpuidle_drv_probe(struct platform_device *pdev)
{
- struct spm_driver_data *drv;
- struct resource *res;
- const struct of_device_id *match_id;
- void __iomem *addr;
int cpu, ret;
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- drv = spm_get_drv(pdev, &cpu);
- if (!drv)
- return -EINVAL;
- platform_set_drvdata(pdev, drv);
+ for_each_possible_cpu(cpu) {
+ ret = spm_cpuidle_register(&pdev->dev, cpu);
+ if (ret && ret != -ENODEV) {
+ dev_err(&pdev->dev,
+ "Cannot register for CPU%d: %d\n", cpu, ret);
+ }
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drv->reg_base))
- return PTR_ERR(drv->reg_base);
+ return 0;
+}
- match_id = of_match_node(spm_match_table, pdev->dev.of_node);
- if (!match_id)
- return -ENODEV;
+static struct platform_driver spm_cpuidle_driver = {
+ .probe = spm_cpuidle_drv_probe,
+ .driver = {
+ .name = "qcom-spm-cpuidle",
+ .suppress_bind_attrs = true,
+ },
+};
- drv->reg_data = match_id->data;
+static int __init qcom_spm_cpuidle_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
- ret = spm_cpuidle_init(&drv->cpuidle_driver, cpu);
+ ret = platform_driver_register(&spm_cpuidle_driver);
if (ret)
return ret;
- /* Write the SPM sequences first.. */
- addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
- __iowrite32_copy(addr, drv->reg_data->seq,
- ARRAY_SIZE(drv->reg_data->seq) / 4);
-
- /*
- * ..and then the control registers.
- * On some SoC if the control registers are written first and if the
- * CPU was held in reset, the reset signal could trigger the SPM state
- * machine, before the sequences are completely written.
- */
- spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
- spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
- spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
- spm_register_write(drv, SPM_REG_PMIC_DATA_0,
- drv->reg_data->pmic_data[0]);
- spm_register_write(drv, SPM_REG_PMIC_DATA_1,
- drv->reg_data->pmic_data[1]);
-
- /* Set up Standby as the default low power mode */
- spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
-
- return cpuidle_register(&drv->cpuidle_driver, NULL);
-}
-
-static int spm_dev_remove(struct platform_device *pdev)
-{
- struct spm_driver_data *drv = platform_get_drvdata(pdev);
+ pdev = platform_device_register_simple("qcom-spm-cpuidle",
+ -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&spm_cpuidle_driver);
+ return PTR_ERR(pdev);
+ }
- cpuidle_unregister(&drv->cpuidle_driver);
return 0;
}
-
-static struct platform_driver spm_driver = {
- .probe = spm_dev_probe,
- .remove = spm_dev_remove,
- .driver = {
- .name = "saw",
- .of_match_table = spm_match_table,
- },
-};
-
-builtin_platform_driver(spm_driver);
+device_initcall(qcom_spm_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 508bd9f23792..9845629aeb6d 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -337,6 +337,9 @@ static void tegra_cpuidle_setup_tegra114_c7_state(void)
static int tegra_cpuidle_probe(struct platform_device *pdev)
{
+ if (tegra_pmc_get_suspend_mode() == TEGRA_SUSPEND_NOT_READY)
+ return -EPROBE_DEFER;
+
/* LP2 could be disabled in device-tree */
if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2)
tegra_cpuidle_disable_state(TEGRA_CC6);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 53ec9585ccd4..469e18547d06 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -488,6 +488,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
&kdev->kobj, "state%d", i);
if (ret) {
kobject_put(&kobj->kobj);
+ kfree(kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@@ -619,6 +620,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
&kdev->kobj, "driver");
if (ret) {
kobject_put(&kdrv->kobj);
+ kfree(kdrv);
return ret;
}
@@ -705,7 +707,6 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
if (!kdev)
return -ENOMEM;
kdev->dev = dev;
- dev->kobj_dev = kdev;
init_completion(&kdev->kobj_unregister);
@@ -713,9 +714,11 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
"cpuidle");
if (error) {
kobject_put(&kdev->kobj);
+ kfree(kdev);
return error;
}
+ dev->kobj_dev = kdev;
kobject_uevent(&kdev->kobj, KOBJ_ADD);
return 0;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index e313233ec6de..bf6275ffc4aa 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1153,16 +1153,27 @@ static struct caam_akcipher_alg caam_rsa = {
int caam_pkc_init(struct device *ctrldev)
{
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- u32 pk_inst;
+ u32 pk_inst, pkha;
int err;
init_done = false;
/* Determine public key hardware accelerator presence. */
- if (priv->era < 10)
+ if (priv->era < 10) {
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
- else
- pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
+ } else {
+ pkha = rd_reg32(&priv->ctrl->vreg.pkha);
+ pk_inst = pkha & CHA_VER_NUM_MASK;
+
+ /*
+ * Newer CAAMs support partially disabled functionality. If this is the
+ * case, the number is non-zero, but this bit is set to indicate that
+ * no encryption or decryption is supported. Only signing and verifying
+ * is supported.
+ */
+ if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
+ pk_inst = 0;
+ }
/* Do not register algorithms if PKHA is not present. */
if (!pk_inst)
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index af61f3a2c0d4..3738625c0250 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -322,6 +322,9 @@ struct version_regs {
/* CHA Miscellaneous Information - AESA_MISC specific */
#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+/* CHA Miscellaneous Information - PKHA_MISC specific */
+#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0d5576f6ad21..fe69053b2394 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -467,8 +467,8 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q = &ccp->cmd_q[i];
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "%s-q%u", ccp->name, cmd_q->id);
+ kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
if (IS_ERR(kthread)) {
dev_err(dev, "error creating queue thread (%ld)\n",
PTR_ERR(kthread));
@@ -477,7 +477,6 @@ static int ccp_init(struct ccp_device *ccp)
}
cmd_q->kthread = kthread;
- wake_up_process(kthread);
}
dev_dbg(dev, "Enabling interrupts...\n");
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 7838f63bab32..7b73332d6aa1 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -950,8 +950,8 @@ static int ccp5_init(struct ccp_device *ccp)
cmd_q = &ccp->cmd_q[i];
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "%s-q%u", ccp->name, cmd_q->id);
+ kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
if (IS_ERR(kthread)) {
dev_err(dev, "error creating queue thread (%ld)\n",
PTR_ERR(kthread));
@@ -960,7 +960,6 @@ static int ccp5_init(struct ccp_device *ccp)
}
cmd_q->kthread = kthread;
- wake_up_process(kthread);
}
dev_dbg(dev, "Enabling interrupts...\n");
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 2ecb0e1f65d8..e09925d86bf3 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -134,7 +134,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
- case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
+ case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
default: return 0;
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index e599ac6dc162..790fa9058a36 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -103,7 +103,8 @@ MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
static void init_cc_cache_params(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- u32 cache_params, ace_const, val, mask;
+ u32 cache_params, ace_const, val;
+ u64 mask;
/* compute CC_AXIM_CACHE_PARAMS */
cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index e89f9e0094b4..c7816c83e324 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -222,8 +222,10 @@ struct chcr_authenc_ctx {
};
struct __aead_ctx {
- struct chcr_gcm_ctx gcm[0];
- struct chcr_authenc_ctx authenc[];
+ union {
+ DECLARE_FLEX_ARRAY(struct chcr_gcm_ctx, gcm);
+ DECLARE_FLEX_ARRAY(struct chcr_authenc_ctx, authenc);
+ };
};
struct chcr_aead_ctx {
@@ -245,9 +247,11 @@ struct hmac_ctx {
};
struct __crypto_ctx {
- struct hmac_ctx hmacctx[0];
- struct ablk_ctx ablkctx[0];
- struct chcr_aead_ctx aeadctx[];
+ union {
+ DECLARE_FLEX_ARRAY(struct hmac_ctx, hmacctx);
+ DECLARE_FLEX_ARRAY(struct ablk_ctx, ablkctx);
+ DECLARE_FLEX_ARRAY(struct chcr_aead_ctx, aeadctx);
+ };
};
struct chcr_context {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 369562d34d66..fed52ae516ba 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -233,6 +233,8 @@
#define QM_DBG_WRITE_LEN 1024
#define QM_DBG_TMP_BUF_LEN 22
#define QM_PCI_COMMAND_INVALID ~0
+#define QM_RESET_STOP_TX_OFFSET 1
+#define QM_RESET_STOP_RX_OFFSET 2
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
@@ -883,6 +885,20 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
+{
+ u32 *addr;
+
+ if (qp->is_in_kernel)
+ return;
+
+ addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
+ *addr = 1;
+
+ /* make sure setup is completed */
+ mb();
+}
+
static irqreturn_t qm_aeq_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -2467,6 +2483,15 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
return qp->sqe + sq_tail * qp->qm->sqe_size;
}
+static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
+{
+ u64 *addr;
+
+ /* Use last 64 bits of DUS to reset status. */
+ addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
+ *addr = 0;
+}
+
static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
{
struct device *dev = &qm->pdev->dev;
@@ -2492,7 +2517,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
}
qp = &qm->qp_array[qp_id];
-
+ hisi_qm_unset_hw_reset(qp);
memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
qp->event_cb = NULL;
@@ -2912,6 +2937,14 @@ static int hisi_qm_get_available_instances(struct uacce_device *uacce)
return hisi_qm_get_free_qp_num(uacce->priv);
}
+static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
+{
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++)
+ qm_set_qp_disable(&qm->qp_array[i], offset);
+}
+
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
unsigned long arg,
struct uacce_queue *q)
@@ -3094,7 +3127,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (IS_ERR(uacce))
return PTR_ERR(uacce);
- if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
+ if (uacce->flags & UACCE_DEV_SVA) {
qm->use_sva = true;
} else {
/* only consider sva case */
@@ -3122,8 +3155,10 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
else
mmio_page_nr = qm->db_interval / PAGE_SIZE;
+ /* Add one more page for device or qp status */
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
+ sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
+ PAGE_SHIFT;
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
@@ -3367,8 +3402,10 @@ void hisi_qm_uninit(struct hisi_qm *qm)
qm_irq_unregister(qm);
hisi_qm_pci_uninit(qm);
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->use_sva) {
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ }
up_write(&qm->qps_lock);
}
@@ -3682,11 +3719,13 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
if (qm->status.stop_reason == QM_SOFT_RESET ||
qm->status.stop_reason == QM_FLR) {
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
ret = qm_stop_started_qp(qm);
if (ret < 0) {
dev_err(dev, "Failed to stop started qp!\n");
goto err_unlock;
}
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
}
/* Mask eq and aeq irq */
@@ -4185,7 +4224,7 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
return -EINVAL;
}
- ret = sscanf(buf, "%ld", val);
+ ret = sscanf(buf, "%lu", val);
if (ret != QM_QOS_VAL_NUM)
return -EINVAL;
@@ -5045,6 +5084,8 @@ static int qm_controller_reset(struct hisi_qm *qm)
ret = qm_controller_reset_prepare(qm);
if (ret) {
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return ret;
}
@@ -5131,6 +5172,8 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
return;
}
@@ -5314,9 +5357,14 @@ static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
atomic_set(&qm->status.flags, QM_STOP);
cmd = QM_VF_PREPARE_FAIL;
goto err_prepare;
+ } else {
+ goto out;
}
err_prepare:
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
+out:
pci_save_state(pdev);
ret = qm->ops->ping_pf(qm, cmd);
if (ret)
@@ -5777,9 +5825,11 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_irq_register;
}
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+ if (qm->mode == UACCE_MODE_SVA) {
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+ }
ret = hisi_qm_memory_init(qm);
if (ret)
@@ -5792,8 +5842,10 @@ int hisi_qm_init(struct hisi_qm *qm)
return 0;
err_alloc_uacce:
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->use_sva) {
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ }
err_irq_register:
qm_irq_unregister(qm);
err_pci_init:
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 7148201ce76e..873971ef9aee 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -218,7 +218,7 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_AVG_DELAY ", 0x28ull},
{"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
{"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
- {"HZIP_COMSUMED_BYTE ", 0x38ull},
+ {"HZIP_CONSUMED_BYTE ", 0x38ull},
{"HZIP_PRODUCED_BYTE ", 0x40ull},
{"HZIP_COMP_INF ", 0x70ull},
{"HZIP_PRE_OUT ", 0x78ull},
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index aa4c7b2af3e2..d8e82d69745d 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -674,14 +674,12 @@ static int img_hash_digest(struct ahash_request *req)
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
{
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = -ENOMEM;
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("img_hash: Could not load fallback driver.\n");
- err = PTR_ERR(ctx->fallback);
- goto err;
+ return PTR_ERR(ctx->fallback);
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct img_hash_request_ctx) +
@@ -689,9 +687,6 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
IMG_HASH_DMA_THRESHOLD);
return 0;
-
-err:
- return err;
}
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 00cf8f028cb9..7942b48dd55a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -39,6 +39,25 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
Intel does not recommend use of CTS mode with AES/SM4.
+config CRYPTO_DEV_KEEMBAY_OCS_ECC
+ tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CRYPTO_ECDH
+ select CRYPTO_ENGINE
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+ Elliptic Curve Cryptography (ECC) hardware acceleration for use with
+ Crypto API.
+
+ Provides OCS acceleration for ECDH-256 and ECDH-384.
+
+ Say Y or M if you are compiling for the Intel Keem Bay SoC. The
+ module will be called keembay-ocs-ecc.
+
+ If unsure, say N.
+
config CRYPTO_DEV_KEEMBAY_OCS_HCU
tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
select CRYPTO_HASH
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/keembay/Makefile
index aea03d4432c4..7c12c3c138bd 100644
--- a/drivers/crypto/keembay/Makefile
+++ b/drivers/crypto/keembay/Makefile
@@ -4,5 +4,7 @@
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o
+
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c
new file mode 100644
index 000000000000..679e6ae295e0
--- /dev/null
+++ b/drivers/crypto/keembay/keembay-ocs-ecc.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS ECC Crypto Driver.
+ *
+ * Copyright (C) 2019-2021 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/fips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
+
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+
+#define DRV_NAME "keembay-ocs-ecc"
+
+#define KMB_OCS_ECC_PRIORITY 350
+
+#define HW_OFFS_OCS_ECC_COMMAND 0x00000000
+#define HW_OFFS_OCS_ECC_STATUS 0x00000004
+#define HW_OFFS_OCS_ECC_DATA_IN 0x00000080
+#define HW_OFFS_OCS_ECC_CX_DATA_OUT 0x00000100
+#define HW_OFFS_OCS_ECC_CY_DATA_OUT 0x00000180
+#define HW_OFFS_OCS_ECC_ISR 0x00000400
+#define HW_OFFS_OCS_ECC_IER 0x00000404
+
+#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0)
+#define HW_OCS_ECC_COMMAND_INS_BP BIT(0)
+
+#define HW_OCS_ECC_COMMAND_START_VAL BIT(0)
+
+#define OCS_ECC_OP_SIZE_384 BIT(8)
+#define OCS_ECC_OP_SIZE_256 0
+
+/* ECC Instruction : for ECC_COMMAND */
+#define OCS_ECC_INST_WRITE_AX (0x1 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_AY (0x2 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BX_D (0x3 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BY_L (0x4 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_P (0x5 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_A (0x6 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_D_IDX_A (0x8 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP)
+
+#define ECC_ENABLE_INTR 1
+
+#define POLL_USEC 100
+#define TIMEOUT_USEC 10000
+
+#define KMB_ECC_VLI_MAX_DIGITS ECC_CURVE_NIST_P384_DIGITS
+#define KMB_ECC_VLI_MAX_BYTES (KMB_ECC_VLI_MAX_DIGITS \
+ << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define POW_CUBE 3
+
+/**
+ * struct ocs_ecc_dev - ECC device context
+ * @list: List of device contexts
+ * @dev: OCS ECC device
+ * @base_reg: IO base address of OCS ECC
+ * @engine: Crypto engine for the device
+ * @irq_done: IRQ done completion.
+ * @irq: IRQ number
+ */
+struct ocs_ecc_dev {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *base_reg;
+ struct crypto_engine *engine;
+ struct completion irq_done;
+ int irq;
+};
+
+/**
+ * struct ocs_ecc_ctx - Transformation context.
+ * @engine_ctx: Crypto engine ctx.
+ * @ecc_dev: The ECC driver associated with this context.
+ * @curve: The elliptic curve used by this transformation.
+ * @private_key: The private key.
+ */
+struct ocs_ecc_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct ocs_ecc_dev *ecc_dev;
+ const struct ecc_curve *curve;
+ u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
+};
+
+/* Driver data. */
+struct ocs_ecc_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+/* Global variable holding the list of OCS ECC devices (only one expected). */
+static struct ocs_ecc_drv ocs_ecc = {
+ .dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock),
+};
+
+/* Get OCS ECC tfm context from kpp_request. */
+static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req)
+{
+ return kpp_tfm_ctx(crypto_kpp_reqtfm(req));
+}
+
+/* Converts number of digits to number of bytes. */
+static inline unsigned int digits_to_bytes(unsigned int n)
+{
+ return n << ECC_DIGITS_TO_BYTES_SHIFT;
+}
+
+/*
+ * Wait for ECC idle i.e when an operation (other than write operations)
+ * is done.
+ */
+static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev)
+{
+ u32 value;
+
+ return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS),
+ value,
+ !(value & HW_OCS_ECC_ISR_INT_STATUS_DONE),
+ POLL_USEC, TIMEOUT_USEC);
+}
+
+static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size)
+{
+ iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL,
+ ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+}
+
+/* Direct write of u32 buffer to ECC engine with associated instruction. */
+static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev,
+ u32 op_size,
+ u32 inst,
+ const void *data_in,
+ size_t data_size)
+{
+ iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+ /* MMIO Write src uint32 to dst. */
+ memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in,
+ data_size);
+}
+
+/* Start OCS ECC operation and wait for its completion. */
+static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size,
+ u32 inst)
+{
+ reinit_completion(&ecc_dev->irq_done);
+
+ iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER);
+ iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+ return wait_for_completion_interruptible(&ecc_dev->irq_done);
+}
+
+/**
+ * ocs_ecc_read_cx_out() - Read the CX data output buffer.
+ * @dev: The OCS ECC device to read from.
+ * @cx_out: The buffer where to store the CX value. Must be at least
+ * @byte_count byte long.
+ * @byte_count: The amount of data to read.
+ */
+static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out,
+ size_t byte_count)
+{
+ memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT,
+ byte_count);
+}
+
+/**
+ * ocs_ecc_read_cy_out() - Read the CX data output buffer.
+ * @dev: The OCS ECC device to read from.
+ * @cy_out: The buffer where to store the CY value. Must be at least
+ * @byte_count byte long.
+ * @byte_count: The amount of data to read.
+ */
+static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out,
+ size_t byte_count)
+{
+ memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT,
+ byte_count);
+}
+
+static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx)
+{
+ if (tctx->ecc_dev)
+ return tctx->ecc_dev;
+
+ spin_lock(&ocs_ecc.lock);
+
+ /* Only a single OCS device available. */
+ tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev,
+ list);
+
+ spin_unlock(&ocs_ecc.lock);
+
+ return tctx->ecc_dev;
+}
+
+/* Do point multiplication using OCS ECC HW. */
+static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
+ struct ecc_point *result,
+ const struct ecc_point *point,
+ u64 *scalar,
+ const struct ecc_curve *curve)
+{
+ u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */
+ u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+ OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+ size_t nbytes = digits_to_bytes(curve->g.ndigits);
+ int rc = 0;
+
+ /* Generate random nbytes for Simple and Differential SCA protection. */
+ rc = crypto_get_default_rng();
+ if (rc)
+ return rc;
+
+ rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
+ crypto_put_default_rng();
+ if (rc)
+ return rc;
+
+ /* Wait engine to be idle before starting new operation. */
+ rc = ocs_ecc_wait_idle(ecc_dev);
+ if (rc)
+ return rc;
+
+ /* Send ecc_start pulse as well as indicating operation size. */
+ ocs_ecc_cmd_start(ecc_dev, op_size);
+
+ /* Write ax param; Base point (Gx). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+ point->x, nbytes);
+
+ /* Write ay param; Base point (Gy). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+ point->y, nbytes);
+
+ /*
+ * Write the private key into DATA_IN reg.
+ *
+ * Since DATA_IN register is used to write different values during the
+ * computation private Key value is overwritten with
+ * side-channel-resistance value.
+ */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D,
+ scalar, nbytes);
+
+ /* Write operand by/l. */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L,
+ sca, nbytes);
+ memzero_explicit(sca, sizeof(sca));
+
+ /* Write p = curve prime(GF modulus). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+ curve->p, nbytes);
+
+ /* Write a = curve coefficient. */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A,
+ curve->a, nbytes);
+
+ /* Make hardware perform the multiplication. */
+ rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A);
+ if (rc)
+ return rc;
+
+ /* Read result. */
+ ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes);
+ ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes);
+
+ return 0;
+}
+
+/**
+ * kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW.
+ * @ecc_dev: The OCS ECC device to use.
+ * @scalar_out: Where to store the output scalar.
+ * @scalar_a: Input scalar operand 'a'.
+ * @scalar_b: Input scalar operand 'b'
+ * @curve: The curve on which the operation is performed.
+ * @ndigits: The size of the operands (in digits).
+ * @inst: The operation to perform (as an OCS ECC instruction).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out,
+ const u64 *scalar_a, const u64 *scalar_b,
+ const struct ecc_curve *curve,
+ unsigned int ndigits, const u32 inst)
+{
+ u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+ OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+ size_t nbytes = digits_to_bytes(ndigits);
+ int rc;
+
+ /* Wait engine to be idle before starting new operation. */
+ rc = ocs_ecc_wait_idle(ecc_dev);
+ if (rc)
+ return rc;
+
+ /* Send ecc_start pulse as well as indicating operation size. */
+ ocs_ecc_cmd_start(ecc_dev, op_size);
+
+ /* Write ax param (Base point (Gx).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+ scalar_a, nbytes);
+
+ /* Write ay param Base point (Gy).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+ scalar_b, nbytes);
+
+ /* Write p = curve prime(GF modulus).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+ curve->p, nbytes);
+
+ /* Give instruction A.B or A+B to ECC engine. */
+ rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst);
+ if (rc)
+ return rc;
+
+ ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes);
+
+ if (vli_is_zero(scalar_out, ndigits))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev,
+ const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ int rc;
+
+ if (WARN_ON(pk->ndigits != curve->g.ndigits))
+ return -EINVAL;
+
+ /* Check 1: Verify key is not the zero point. */
+ if (ecc_point_is_zero(pk))
+ return -EINVAL;
+
+ /* Check 2: Verify key is in the range [0, p-1]. */
+ if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+ return -EINVAL;
+
+ if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+ return -EINVAL;
+
+ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+
+ /* y^2 */
+ /* Compute y^2 -> store in yy */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_MUL_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* x^3 */
+ /* Assigning w = 3, used for calculating x^3. */
+ w[0] = POW_CUBE;
+ /* Load the next stage.*/
+ rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_POW_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Do a*x -> store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve,
+ pk->ndigits,
+ OCS_ECC_INST_CALC_A_MUL_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Do ax + b == w + b; store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve,
+ pk->ndigits,
+ OCS_ECC_INST_CALC_A_ADD_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* x^3 + ax + b == x^3 + w -> store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_ADD_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Compare y^2 == x^3 + a·x + b. */
+ rc = vli_cmp(yy, w, pk->ndigits);
+ if (rc)
+ rc = -EINVAL;
+
+exit:
+ memzero_explicit(xxx, sizeof(xxx));
+ memzero_explicit(yy, sizeof(yy));
+ memzero_explicit(w, sizeof(w));
+
+ return rc;
+}
+
+/* SP800-56A section 5.6.2.3.3 full verification */
+static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev,
+ const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ struct ecc_point *nQ;
+ int rc;
+
+ /* Checks 1 through 3 */
+ rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+ if (rc)
+ return rc;
+
+ /* Check 4: Verify that nQ is the zero point. */
+ nQ = ecc_alloc_point(pk->ndigits);
+ if (!nQ)
+ return -ENOMEM;
+
+ rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve);
+ if (rc)
+ goto exit;
+
+ if (!ecc_point_is_zero(nQ))
+ rc = -EINVAL;
+
+exit:
+ ecc_free_point(nQ);
+
+ return rc;
+}
+
+static int kmb_ecc_is_key_valid(const struct ecc_curve *curve,
+ const u64 *private_key, size_t private_key_len)
+{
+ size_t ndigits = curve->g.ndigits;
+ u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1};
+ u64 res[KMB_ECC_VLI_MAX_DIGITS];
+
+ if (private_key_len != digits_to_bytes(ndigits))
+ return -EINVAL;
+
+ if (!private_key)
+ return -EINVAL;
+
+ /* Make sure the private key is in the range [2, n-3]. */
+ if (vli_cmp(one, private_key, ndigits) != -1)
+ return -EINVAL;
+
+ vli_sub(res, curve->n, one, ndigits);
+ vli_sub(res, res, one, ndigits);
+ if (vli_cmp(res, private_key, ndigits) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * ECC private keys are generated using the method of extra random bits,
+ * equivalent to that described in FIPS 186-4, Appendix B.4.1.
+ *
+ * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
+ * than requested
+ * 0 <= c mod(n-1) <= n-2 and implies that
+ * 1 <= d <= n-1
+ *
+ * This method generates a private key uniformly distributed in the range
+ * [1, n-1].
+ */
+static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
+{
+ size_t nbytes = digits_to_bytes(curve->g.ndigits);
+ u64 priv[KMB_ECC_VLI_MAX_DIGITS];
+ size_t nbits;
+ int rc;
+
+ nbits = vli_num_bits(curve->n, curve->g.ndigits);
+
+ /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
+ if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv))
+ return -EINVAL;
+
+ /*
+ * FIPS 186-4 recommends that the private key should be obtained from a
+ * RBG with a security strength equal to or greater than the security
+ * strength associated with N.
+ *
+ * The maximum security strength identified by NIST SP800-57pt1r4 for
+ * ECC is 256 (N >= 512).
+ *
+ * This condition is met by the default RNG because it selects a favored
+ * DRBG with a security strength of 256.
+ */
+ if (crypto_get_default_rng())
+ return -EFAULT;
+
+ rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
+ crypto_put_default_rng();
+ if (rc)
+ goto cleanup;
+
+ rc = kmb_ecc_is_key_valid(curve, priv, nbytes);
+ if (rc)
+ goto cleanup;
+
+ ecc_swap_digits(priv, privkey, curve->g.ndigits);
+
+cleanup:
+ memzero_explicit(&priv, sizeof(priv));
+
+ return rc;
+}
+
+static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+ struct ecdh params;
+ int rc = 0;
+
+ rc = crypto_ecdh_decode_key(buf, len, &params);
+ if (rc)
+ goto cleanup;
+
+ /* Ensure key size is not bigger then expected. */
+ if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Auto-generate private key is not provided. */
+ if (!params.key || !params.key_size) {
+ rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key);
+ goto cleanup;
+ }
+
+ rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key,
+ params.key_size);
+ if (rc)
+ goto cleanup;
+
+ ecc_swap_digits((const u64 *)params.key, tctx->private_key,
+ tctx->curve->g.ndigits);
+cleanup:
+ memzero_explicit(&params, sizeof(params));
+
+ if (rc)
+ tctx->curve = NULL;
+
+ return rc;
+}
+
+/* Compute shared secret. */
+static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx,
+ struct kpp_request *req)
+{
+ struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+ const struct ecc_curve *curve = tctx->curve;
+ u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS];
+ u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+ size_t copied, nbytes, pubk_len;
+ struct ecc_point *pk, *result;
+ int rc;
+
+ nbytes = digits_to_bytes(curve->g.ndigits);
+
+ /* Public key is a point, thus it has two coordinates */
+ pubk_len = 2 * nbytes;
+
+ /* Copy public key from SG list to pubk_buf. */
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src, pubk_len),
+ pubk_buf, pubk_len);
+ if (copied != pubk_len)
+ return -EINVAL;
+
+ /* Allocate and initialize public key point. */
+ pk = ecc_alloc_point(curve->g.ndigits);
+ if (!pk)
+ return -ENOMEM;
+
+ ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits);
+ ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits);
+
+ /*
+ * Check the public key for following
+ * Check 1: Verify key is not the zero point.
+ * Check 2: Verify key is in the range [1, p-1].
+ * Check 3: Verify that y^2 == (x^3 + a·x + b) mod p
+ */
+ rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+ if (rc)
+ goto exit_free_pk;
+
+ /* Allocate point for storing computed shared secret. */
+ result = ecc_alloc_point(pk->ndigits);
+ if (!result) {
+ rc = -ENOMEM;
+ goto exit_free_pk;
+ }
+
+ /* Calculate the shared secret.*/
+ rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve);
+ if (rc)
+ goto exit_free_result;
+
+ if (ecc_point_is_zero(result)) {
+ rc = -EFAULT;
+ goto exit_free_result;
+ }
+
+ /* Copy shared secret from point to buffer. */
+ ecc_swap_digits(result->x, shared_secret, result->ndigits);
+
+ /* Request might ask for less bytes than what we have. */
+ nbytes = min_t(size_t, nbytes, req->dst_len);
+
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ shared_secret, nbytes);
+
+ if (copied != nbytes)
+ rc = -EINVAL;
+
+ memzero_explicit(shared_secret, sizeof(shared_secret));
+
+exit_free_result:
+ ecc_free_point(result);
+
+exit_free_pk:
+ ecc_free_point(pk);
+
+ return rc;
+}
+
+/* Compute public key. */
+static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx,
+ struct kpp_request *req)
+{
+ const struct ecc_curve *curve = tctx->curve;
+ u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+ struct ecc_point *pk;
+ size_t pubk_len;
+ size_t copied;
+ int rc;
+
+ /* Public key is a point, so it has double the digits. */
+ pubk_len = 2 * digits_to_bytes(curve->g.ndigits);
+
+ pk = ecc_alloc_point(curve->g.ndigits);
+ if (!pk)
+ return -ENOMEM;
+
+ /* Public Key(pk) = priv * G. */
+ rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key,
+ curve);
+ if (rc)
+ goto exit;
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) {
+ rc = -EAGAIN;
+ goto exit;
+ }
+
+ /* Copy public key from point to buffer. */
+ ecc_swap_digits(pk->x, pubk_buf, pk->ndigits);
+ ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits);
+
+ /* Copy public key to req->dst. */
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, pubk_len),
+ pubk_buf, pubk_len);
+
+ if (copied != pubk_len)
+ rc = -EINVAL;
+
+exit:
+ ecc_free_point(pk);
+
+ return rc;
+}
+
+static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct kpp_request *req = container_of(areq, struct kpp_request, base);
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+ int rc;
+
+ if (req->src)
+ rc = kmb_ecc_do_shared_secret(tctx, req);
+ else
+ rc = kmb_ecc_do_public_key(tctx, req);
+
+ crypto_finalize_kpp_request(ecc_dev->engine, req, rc);
+
+ return 0;
+}
+
+static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ const struct ecc_curve *curve = tctx->curve;
+
+ /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+ if (!tctx->curve)
+ return -EINVAL;
+
+ /* Ensure dst is present. */
+ if (!req->dst)
+ return -EINVAL;
+
+ /* Check the request dst is big enough to hold the public key. */
+ if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits)))
+ return -EINVAL;
+
+ /* 'src' is not supposed to be present when generate pubk is called. */
+ if (req->src)
+ return -EINVAL;
+
+ return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+ req);
+}
+
+static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ const struct ecc_curve *curve = tctx->curve;
+
+ /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+ if (!tctx->curve)
+ return -EINVAL;
+
+ /* Ensure dst is present. */
+ if (!req->dst)
+ return -EINVAL;
+
+ /* Ensure src is present. */
+ if (!req->src)
+ return -EINVAL;
+
+ /*
+ * req->src is expected to the (other-side) public key, so its length
+ * must be 2 * coordinate size (in bytes).
+ */
+ if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits))
+ return -EINVAL;
+
+ return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+ req);
+}
+
+static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
+{
+ memset(tctx, 0, sizeof(*tctx));
+
+ tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx);
+
+ if (IS_ERR(tctx->ecc_dev)) {
+ pr_err("Failed to find the device : %ld\n",
+ PTR_ERR(tctx->ecc_dev));
+ return PTR_ERR(tctx->ecc_dev);
+ }
+
+ tctx->curve = ecc_get_curve(curve_id);
+ if (!tctx->curve)
+ return -EOPNOTSUPP;
+
+ tctx->engine_ctx.op.prepare_request = NULL;
+ tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
+ tctx->engine_ctx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256);
+}
+
+static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384);
+}
+
+static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ memzero_explicit(tctx->private_key, sizeof(*tctx->private_key));
+}
+
+static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ /* Public key is made of two coordinates, so double the digits. */
+ return digits_to_bytes(tctx->curve->g.ndigits) * 2;
+}
+
+static struct kpp_alg ocs_ecdh_p256 = {
+ .set_secret = kmb_ocs_ecdh_set_secret,
+ .generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .init = kmb_ocs_ecdh_nist_p256_init_tfm,
+ .exit = kmb_ocs_ecdh_exit_tfm,
+ .max_size = kmb_ocs_ecdh_max_size,
+ .base = {
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "ecdh-nist-p256-keembay-ocs",
+ .cra_priority = KMB_OCS_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+ },
+};
+
+static struct kpp_alg ocs_ecdh_p384 = {
+ .set_secret = kmb_ocs_ecdh_set_secret,
+ .generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .init = kmb_ocs_ecdh_nist_p384_init_tfm,
+ .exit = kmb_ocs_ecdh_exit_tfm,
+ .max_size = kmb_ocs_ecdh_max_size,
+ .base = {
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "ecdh-nist-p384-keembay-ocs",
+ .cra_priority = KMB_OCS_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+ },
+};
+
+static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_ecc_dev *ecc_dev = dev_id;
+ u32 status;
+
+ /*
+ * Read the status register and write it back to clear the
+ * DONE_INT_STATUS bit.
+ */
+ status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+ iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+
+ if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE))
+ return IRQ_NONE;
+
+ complete(&ecc_dev->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+static int kmb_ocs_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_ecc_dev *ecc_dev;
+ int rc;
+
+ ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL);
+ if (!ecc_dev)
+ return -ENOMEM;
+
+ ecc_dev->dev = dev;
+
+ platform_set_drvdata(pdev, ecc_dev);
+
+ INIT_LIST_HEAD(&ecc_dev->list);
+ init_completion(&ecc_dev->irq_done);
+
+ /* Get base register address. */
+ ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ecc_dev->base_reg)) {
+ dev_err(dev, "Failed to get base address\n");
+ rc = PTR_ERR(ecc_dev->base_reg);
+ goto list_del;
+ }
+
+ /* Get and request IRQ */
+ ecc_dev->irq = platform_get_irq(pdev, 0);
+ if (ecc_dev->irq < 0) {
+ rc = ecc_dev->irq;
+ goto list_del;
+ }
+
+ rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler,
+ NULL, 0, "keembay-ocs-ecc", ecc_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ\n");
+ goto list_del;
+ }
+
+ /* Add device to the list of OCS ECC devices. */
+ spin_lock(&ocs_ecc.lock);
+ list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list);
+ spin_unlock(&ocs_ecc.lock);
+
+ /* Initialize crypto engine. */
+ ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!ecc_dev->engine) {
+ dev_err(dev, "Could not allocate crypto engine\n");
+ goto list_del;
+ }
+
+ rc = crypto_engine_start(ecc_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto cleanup;
+ }
+
+ /* Register the KPP algo. */
+ rc = crypto_register_kpp(&ocs_ecdh_p256);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto cleanup;
+ }
+
+ rc = crypto_register_kpp(&ocs_ecdh_p384);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto ocs_ecdh_p384_error;
+ }
+
+ return 0;
+
+ocs_ecdh_p384_error:
+ crypto_unregister_kpp(&ocs_ecdh_p256);
+
+cleanup:
+ crypto_engine_exit(ecc_dev->engine);
+
+list_del:
+ spin_lock(&ocs_ecc.lock);
+ list_del(&ecc_dev->list);
+ spin_unlock(&ocs_ecc.lock);
+
+ return rc;
+}
+
+static int kmb_ocs_ecc_remove(struct platform_device *pdev)
+{
+ struct ocs_ecc_dev *ecc_dev;
+
+ ecc_dev = platform_get_drvdata(pdev);
+ if (!ecc_dev)
+ return -ENODEV;
+
+ crypto_unregister_kpp(&ocs_ecdh_p384);
+ crypto_unregister_kpp(&ocs_ecdh_p256);
+
+ spin_lock(&ocs_ecc.lock);
+ list_del(&ecc_dev->list);
+ spin_unlock(&ocs_ecc.lock);
+
+ crypto_engine_exit(ecc_dev->engine);
+
+ return 0;
+}
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_ecc_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-ecc",
+ },
+ {}
+};
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_ecc_driver = {
+ .probe = kmb_ocs_ecc_probe,
+ .remove = kmb_ocs_ecc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_ecc_of_match,
+ },
+};
+module_platform_driver(kmb_ocs_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs");
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index f14aac532f53..5cd332880653 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -615,7 +615,6 @@ static struct platform_driver marvell_cesa = {
};
module_platform_driver(marvell_cesa);
-MODULE_ALIAS("platform:mv_crypto");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index a72723455df7..877a948469bd 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1274,6 +1274,7 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc)
req->base.complete, req->base.data);
aead_request_set_crypt(&rctx->fbk_req, req->src,
req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(&rctx->fbk_req, req->assoclen);
ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
crypto_aead_decrypt(&rctx->fbk_req);
} else {
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 33d8e50dcbda..fa768f10635f 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
@@ -161,7 +162,36 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+ u32 status;
+ u32 csr;
+ int ret;
+
+ addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
+ csr |= ADF_4XXX_PM_SOU;
+ ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_4XXX_PM_INIT_STATE,
+ ADF_4XXX_PM_POLL_DELAY_US,
+ ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_4XXX_PM_STATUS);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+ return ret;
+}
+
+static int pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
}
@@ -215,6 +245,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
+ hw_data->init_device = adf_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->uof_get_num_objs = uof_get_num_objs;
@@ -222,7 +253,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+ hw_data->enable_pfvf_comms = pfvf_comms_disabled;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 4fe2a776293c..924bac6feb37 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -62,6 +62,16 @@
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
+/* Power management */
+#define ADF_4XXX_PM_POLL_DELAY_US 20
+#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_4XXX_PM_STATUS (0x50A00C)
+#define ADF_4XXX_PM_INTERRUPT (0x50A028)
+#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
+#define ADF_4XXX_PM_INIT_STATE BIT(21)
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_4XXX_PM_SOU BIT(18)
+
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 3027c01bc89e..1fa690219d92 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->accel_mask)
- return 0;
-
- for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
- if (self->accel_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->ae_mask)
- return 0;
-
- for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
- if (self->ae_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXX_PMISC_BAR;
@@ -88,12 +60,12 @@ static u32 get_etr_bar_id(struct adf_hw_device_data *self)
static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
- return 0;
+ return ADF_C3XXX_SRAM_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
- int aes = get_num_aes(self);
+ int aes = self->get_num_aes(self);
if (aes == 6)
return DEV_SKU_4;
@@ -106,41 +78,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C3XXX_PF2VF_OFFSET(i);
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
- unsigned long accel_mask = hw_device->accel_mask;
- unsigned long ae_mask = hw_device->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
- unsigned int val, i;
-
- /* Enable Accel Engine error detection & correction */
- for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
- val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
- val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
- val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
- }
-
- /* Enable shared memory error detection & correction */
- for_each_set_bit(i, &accel_mask, ADF_C3XXX_MAX_ACCELERATORS) {
- val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
- val |= ADF_C3XXX_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
- val |= ADF_C3XXX_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
- }
-}
-
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
@@ -154,13 +91,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_C3XXX_SMIA1_MASK);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
- return 0;
-}
-
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
@@ -177,16 +107,16 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
- hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
@@ -205,7 +135,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
+ hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+ hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 86ee02a86789..1b86f828725c 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -6,8 +6,7 @@
/* PCIe configuration space */
#define ADF_C3XXX_PMISC_BAR 0
#define ADF_C3XXX_ETR_BAR 1
-#define ADF_C3XXX_RX_RINGS_OFFSET 8
-#define ADF_C3XXX_TX_RINGS_MASK 0xFF
+#define ADF_C3XXX_SRAM_BAR 0
#define ADF_C3XXX_MAX_ACCELERATORS 3
#define ADF_C3XXX_MAX_ACCELENGINES 6
#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
@@ -19,16 +18,6 @@
#define ADF_C3XXX_SMIA0_MASK 0xFFFF
#define ADF_C3XXX_SMIA1_MASK 0x1
#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
-/* Error detection and correction */
-#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
-#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
-#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
-#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
-#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
-
-#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index b023c80873bb..0613db077689 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->accel_mask)
- return 0;
-
- for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
- if (self->accel_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->ae_mask)
- return 0;
-
- for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
- if (self->ae_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_PMISC_BAR;
@@ -93,7 +65,7 @@ static u32 get_sram_bar_id(struct adf_hw_device_data *self)
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
- int aes = get_num_aes(self);
+ int aes = self->get_num_aes(self);
if (aes == 8)
return DEV_SKU_2;
@@ -108,41 +80,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C62X_PF2VF_OFFSET(i);
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
- unsigned long accel_mask = hw_device->accel_mask;
- unsigned long ae_mask = hw_device->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
- unsigned int val, i;
-
- /* Enable Accel Engine error detection & correction */
- for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
- val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
- val |= ADF_C62X_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
- val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
- }
-
- /* Enable shared memory error detection & correction */
- for_each_set_bit(i, &accel_mask, ADF_C62X_MAX_ACCELERATORS) {
- val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
- val |= ADF_C62X_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
- val |= ADF_C62X_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
- }
-}
-
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
@@ -156,13 +93,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_C62X_SMIA1_MASK);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
- return 0;
-}
-
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
@@ -179,16 +109,16 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
- hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
@@ -207,7 +137,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
+ hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+ hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
index e6664bd20c91..68c3436bd3aa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -7,8 +7,6 @@
#define ADF_C62X_SRAM_BAR 0
#define ADF_C62X_PMISC_BAR 1
#define ADF_C62X_ETR_BAR 2
-#define ADF_C62X_RX_RINGS_OFFSET 8
-#define ADF_C62X_TX_RINGS_MASK 0xFF
#define ADF_C62X_MAX_ACCELERATORS 5
#define ADF_C62X_MAX_ACCELENGINES 10
#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
@@ -20,16 +18,6 @@
#define ADF_C62X_SMIA0_MASK 0xFFFF
#define ADF_C62X_SMIA1_MASK 0x1
#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
-/* Error detection and correction */
-#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
-#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
-#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
-#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
-#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_C62X_ERRSSMSH_EN BIT(3)
-
-#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 38c0af6d4e43..57d9ca08e611 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -42,13 +42,17 @@ struct adf_bar {
resource_size_t base_addr;
void __iomem *virt_addr;
resource_size_t size;
-} __packed;
+};
+
+struct adf_irq {
+ bool enabled;
+ char name[ADF_MAX_MSIX_VECTOR_NAME];
+};
struct adf_accel_msix {
- struct msix_entry *entries;
- char **names;
+ struct adf_irq *irqs;
u32 num_entries;
-} __packed;
+};
struct adf_accel_pci {
struct pci_dev *pci_dev;
@@ -56,7 +60,7 @@ struct adf_accel_pci {
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
u8 revid;
u8 sku;
-} __packed;
+};
enum dev_state {
DEV_DOWN = 0,
@@ -96,7 +100,7 @@ struct adf_hw_device_class {
const char *name;
const enum adf_device_type type;
u32 instances;
-} __packed;
+};
struct arb_info {
u32 arb_cfg;
@@ -166,12 +170,18 @@ struct adf_hw_device_data {
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(void);
+ int (*init_device)(struct adf_accel_dev *accel_dev);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
+ u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
+ void (*enable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
+ u32 vf_mask);
+ void (*disable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
+ u32 vf_mask);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
char *(*uof_get_name)(u32 obj_num);
@@ -195,7 +205,7 @@ struct adf_hw_device_data {
u8 num_logical_accel;
u8 num_engines;
u8 min_iov_compat_ver;
-} __packed;
+};
/* CSR write macro */
#define ADF_CSR_WR(csr_base, csr_offset, val) \
@@ -251,7 +261,8 @@ struct adf_accel_dev {
struct adf_accel_vf_info *vf_info;
} pf;
struct {
- char *irq_name;
+ bool irq_enabled;
+ char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion iov_msg_completion;
@@ -261,5 +272,5 @@ struct adf_accel_dev {
};
bool is_vf;
u32 accel_id;
-} __packed;
+};
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 4261749fae8d..2cc6622833c4 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -62,7 +62,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
@@ -197,10 +196,11 @@ void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg);
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
@@ -211,6 +211,11 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
#else
#define adf_sriov_configure NULL
+static inline int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 9e560c7d4163..262bdc05dab4 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -4,6 +4,104 @@
#include "icp_qat_hw.h"
#include <linux/pci.h>
+#define ADF_GEN2_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+
+u32 adf_gen2_get_pf2vf_offset(u32 i)
+{
+ return ADF_GEN2_PF2VF_OFFSET(i);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset);
+
+u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ u32 errsou3, errmsk3, vf_int_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
+ */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+ return vf_int_mask;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources);
+
+void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts);
+
+void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts);
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
+{
+ if (!self || !self->accel_mask)
+ return 0;
+
+ return hweight16(self->accel_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
+
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
+
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)
+ [hw_data->get_misc_bar_id(hw_data)];
+ unsigned long accel_mask = hw_data->accel_mask;
+ unsigned long ae_mask = hw_data->ae_mask;
+ void __iomem *csr = misc_bar->virt_addr;
+ unsigned int val, i;
+
+ /* Enable Accel Engine error detection & correction */
+ for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
+ val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i));
+ val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
+ ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+ val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i));
+ val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
+ ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i));
+ val |= ADF_GEN2_ERRSSMSH_EN;
+ ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val);
+ val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i));
+ val |= ADF_GEN2_ERRSSMSH_EN;
+ ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
+
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs)
{
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index 756b0ddfac5e..c169d704097d 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -22,6 +22,8 @@
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_GEN2_RX_RINGS_OFFSET 8
+#define ADF_GEN2_TX_RINGS_MASK 0xFF
#define BUILD_RING_BASE_ADDR(addr, size) \
(((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
@@ -125,6 +127,31 @@ do { \
#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+/* Error detection and correction */
+#define ADF_GEN2_AE_CTX_ENABLES(i) ((i) * 0x1000 + 0x20818)
+#define ADF_GEN2_AE_MISC_CONTROL(i) ((i) * 0x1000 + 0x20960)
+#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_GEN2_UERRSSMSH(i) ((i) * 0x4000 + 0x18)
+#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
+#define ADF_GEN2_ERRSSMSH_EN BIT(3)
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+u32 adf_gen2_get_pf2vf_offset(u32 i);
+u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_bar);
+void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
+void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 60bc7b991d35..e3749e5817d9 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -79,6 +79,11 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->init_device && hw_data->init_device(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
+ return -EFAULT;
+ }
+
if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
return -EFAULT;
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index c678d5c531aa..40593c9449a2 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -16,46 +16,31 @@
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
-#define ADF_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_ERRSOU5 (0x3A000 + 0xD8)
-#define ADF_ERRMSK3 (0x3A000 + 0x1C)
-#define ADF_ERRMSK5 (0x3A000 + 0xDC)
-#define ADF_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 msix_num_entries = 1;
+ u32 msix_num_entries = hw_data->num_banks + 1;
+ int ret;
if (hw_data->set_msix_rttable)
hw_data->set_msix_rttable(accel_dev);
- /* If SR-IOV is disabled, add entries for each bank */
- if (!accel_dev->pf.vf_info) {
- int i;
-
- msix_num_entries += hw_data->num_banks;
- for (i = 0; i < msix_num_entries; i++)
- pci_dev_info->msix_entries.entries[i].entry = i;
- } else {
- pci_dev_info->msix_entries.entries[0].entry =
- hw_data->num_banks;
- }
-
- if (pci_enable_msix_exact(pci_dev_info->pci_dev,
- pci_dev_info->msix_entries.entries,
- msix_num_entries)) {
- dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
- return -EFAULT;
+ ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
+ msix_num_entries, PCI_IRQ_MSIX);
+ if (unlikely(ret < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to allocate %d MSI-X vectors\n",
+ msix_num_entries);
+ return ret;
}
return 0;
}
static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
{
- pci_disable_msix(pci_dev_info->pci_dev);
+ pci_free_irq_vectors(pci_dev_info->pci_dev);
}
static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
@@ -80,22 +65,10 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 errsou3, errsou5, errmsk3, errmsk5;
unsigned long vf_mask;
/* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
- errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
- vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
- vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
- errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
- vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
- vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
+ vf_mask = hw_data->get_vf2pf_sources(pmisc_addr);
if (vf_mask) {
struct adf_accel_vf_info *vf_info;
@@ -135,13 +108,39 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
return IRQ_NONE;
}
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ int clust_irq = hw_data->num_banks;
+ int irq, i = 0;
+
+ if (pci_dev_info->msix_entries.num_entries > 1) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ if (irqs[i].enabled) {
+ irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, &etr_data->banks[i]);
+ }
+ }
+ }
+
+ if (irqs[i].enabled) {
+ irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+ free_irq(irq, accel_dev);
+ }
+}
+
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+ struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
struct adf_etr_data *etr_data = accel_dev->transport;
- int ret, i = 0;
+ int clust_irq = hw_data->num_banks;
+ int ret, irq, i = 0;
char *name;
/* Request msix irq for all banks unless SR-IOV enabled */
@@ -150,105 +149,82 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
struct adf_etr_bank_data *bank = &etr_data->banks[i];
unsigned int cpu, cpus = num_online_cpus();
- name = *(pci_dev_info->msix_entries.names + i);
+ name = irqs[i].name;
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
"qat%d-bundle%d", accel_dev->accel_id, i);
- ret = request_irq(msixe[i].vector,
- adf_msix_isr_bundle, 0, name, bank);
+ irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+ if (unlikely(irq < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get IRQ number of device vector %d - %s\n",
+ i, name);
+ ret = irq;
+ goto err;
+ }
+ ret = request_irq(irq, adf_msix_isr_bundle, 0,
+ &name[0], bank);
if (ret) {
dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d for %s\n",
- msixe[i].vector, name);
- return ret;
+ "Failed to allocate IRQ %d for %s\n",
+ irq, name);
+ goto err;
}
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
i) % cpus;
- irq_set_affinity_hint(msixe[i].vector,
- get_cpu_mask(cpu));
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+ irqs[i].enabled = true;
}
}
/* Request msix irq for AE */
- name = *(pci_dev_info->msix_entries.names + i);
+ name = irqs[i].name;
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
"qat%d-ae-cluster", accel_dev->accel_id);
- ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+ irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+ if (unlikely(irq < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get IRQ number of device vector %d - %s\n",
+ i, name);
+ ret = irq;
+ goto err;
+ }
+ ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d, for %s\n",
- msixe[i].vector, name);
- return ret;
+ "Failed to allocate IRQ %d for %s\n", irq, name);
+ goto err;
}
+ irqs[i].enabled = true;
+ return ret;
+err:
+ adf_free_irqs(accel_dev);
return ret;
}
-static void adf_free_irqs(struct adf_accel_dev *accel_dev)
-{
- struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
- struct adf_etr_data *etr_data = accel_dev->transport;
- int i = 0;
-
- if (pci_dev_info->msix_entries.num_entries > 1) {
- for (i = 0; i < hw_data->num_banks; i++) {
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, &etr_data->banks[i]);
- }
- }
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, accel_dev);
-}
-
-static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
{
- int i;
- char **names;
- struct msix_entry *entries;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 msix_num_entries = 1;
+ struct adf_irq *irqs;
/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
- entries = kcalloc_node(msix_num_entries, sizeof(*entries),
- GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
- if (!entries)
+ irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+ GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+ if (!irqs)
return -ENOMEM;
- names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
- if (!names) {
- kfree(entries);
- return -ENOMEM;
- }
- for (i = 0; i < msix_num_entries; i++) {
- *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!(*(names + i)))
- goto err;
- }
accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
- accel_dev->accel_pci_dev.msix_entries.entries = entries;
- accel_dev->accel_pci_dev.msix_entries.names = names;
+ accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
return 0;
-err:
- for (i = 0; i < msix_num_entries; i++)
- kfree(*(names + i));
- kfree(entries);
- kfree(names);
- return -ENOMEM;
}
-static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
{
- char **names = accel_dev->accel_pci_dev.msix_entries.names;
- int i;
-
- kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
- kfree(*(names + i));
- kfree(names);
+ kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
+ accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
}
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
@@ -287,7 +263,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
adf_free_irqs(accel_dev);
adf_cleanup_bh(accel_dev);
adf_disable_msix(&accel_dev->accel_pci_dev);
- adf_isr_free_msix_entry_table(accel_dev);
+ adf_isr_free_msix_vectors_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
@@ -303,7 +279,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
{
int ret;
- ret = adf_isr_alloc_msix_entry_table(accel_dev);
+ ret = adf_isr_alloc_msix_vectors_data(accel_dev);
if (ret)
goto err_out;
@@ -328,7 +304,7 @@ err_disable_msix:
adf_disable_msix(&accel_dev->accel_pci_dev);
err_free_msix_table:
- adf_isr_free_msix_entry_table(accel_dev);
+ adf_isr_free_msix_vectors_data(accel_dev);
err_out:
return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 976b9ab7617c..59860bdaedb6 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -5,82 +5,51 @@
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
-#define ADF_DH895XCC_EP_OFFSET 0x3A000
-#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
-#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
-#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
-#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
-
-static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 reg;
-
- /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
- reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
- }
-
- /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
- if (vf_mask >> 16) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
- reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
- }
-}
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_PFVF_MSG_ACK_DELAY 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
+#define ADF_PFVF_MSG_RETRY_DELAY 5
+#define ADF_PFVF_MSG_MAX_RETRIES 3
+#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
+ ADF_PFVF_MSG_ACK_MAX_RETRY + \
+ ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
+ hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
-static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 reg;
-
- /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
- ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
- }
-
- /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
- if (vf_mask >> 16) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
- ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
- }
-}
-
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
-void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
- __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
}
@@ -117,44 +86,33 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
mutex_lock(lock);
- /* Check if PF2VF CSR is in use by remote function */
+ /* Check if the PFVF CSR is in use by remote function */
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
if ((val & remote_in_use_mask) == remote_in_use_pattern) {
dev_dbg(&GET_DEV(accel_dev),
- "PF2VF CSR in use by remote function\n");
+ "PFVF CSR in use by remote function\n");
ret = -EBUSY;
goto out;
}
- /* Attempt to get ownership of PF2VF CSR */
msg &= ~local_in_use_mask;
msg |= local_in_use_pattern;
- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
-
- /* Wait in case remote func also attempting to get ownership */
- msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
-
- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- if ((val & local_in_use_mask) != local_in_use_pattern) {
- dev_dbg(&GET_DEV(accel_dev),
- "PF2VF CSR in use by remote - collision detected\n");
- ret = -EBUSY;
- goto out;
- }
- /*
- * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
- * remain in the PF2VF CSR for all writes including ACK from remote
- * until this local function relinquishes the CSR. Send the message
- * by interrupting the remote.
- */
+ /* Attempt to get ownership of the PFVF CSR */
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
/* Wait for confirmation from remote func it received the message */
do {
- msleep(ADF_IOV_MSG_ACK_DELAY);
+ msleep(ADF_PFVF_MSG_ACK_DELAY);
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+ } while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY));
+
+ if (val != msg) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Collision - PFVF CSR overwritten by remote function\n");
+ ret = -EIO;
+ goto out;
+ }
if (val & int_bit) {
dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
@@ -162,7 +120,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
ret = -EIO;
}
- /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+ /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
out:
mutex_unlock(lock);
@@ -170,16 +128,17 @@ out:
}
/**
- * adf_iov_putmsg() - send PF2VF message
+ * adf_iov_putmsg() - send PFVF message
* @accel_dev: Pointer to acceleration device.
* @msg: Message to send
- * @vf_nr: VF number to which the message will be sent
+ * @vf_nr: VF number to which the message will be sent if on PF, ignored
+ * otherwise
*
- * Function sends a message from the PF to a VF
+ * Function sends a message through the PFVF channel
*
* Return: 0 on success, error code otherwise.
*/
-int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{
u32 count = 0;
int ret;
@@ -187,12 +146,77 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
do {
ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
if (ret)
- msleep(ADF_IOV_MSG_RETRY_DELAY);
- } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+ msleep(ADF_PFVF_MSG_RETRY_DELAY);
+ } while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES));
return ret;
}
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: VF number to which the message will be sent
+ * @msg: Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg)
+{
+ return adf_iov_putmsg(accel_dev, msg, vf_nr);
+}
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg: Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg)
+{
+ return adf_iov_putmsg(accel_dev, msg, 0);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Request message to send
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+ int ret;
+
+ reinit_completion(&accel_dev->vf.iov_msg_completion);
+
+ /* Send request from VF to PF */
+ ret = adf_send_vf2pf_msg(accel_dev, msg);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send request msg to PF\n");
+ return ret;
+ }
+
+ /* Wait for response */
+ if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+ timeout)) {
+ dev_err(&GET_DEV(accel_dev),
+ "PFVF request/response message timeout expired\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
{
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
@@ -204,6 +228,11 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
/* Read message from the VF */
msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+ if (!(msg & ADF_VF2PF_INT)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
+ goto out;
+ }
/* To ACK, clear the VF2PFINT bit */
msg &= ~ADF_VF2PF_INT;
@@ -284,9 +313,10 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
goto err;
}
- if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+ if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+out:
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
@@ -304,7 +334,7 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+ if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
}
@@ -312,7 +342,6 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
- unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 msg = 0;
int ret;
@@ -322,24 +351,13 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
- reinit_completion(&accel_dev->vf.iov_msg_completion);
-
- /* Send request from VF to PF */
- ret = adf_iov_putmsg(accel_dev, msg, 0);
+ ret = adf_send_vf2pf_req(accel_dev, msg);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Compatibility Version Request.\n");
return ret;
}
- /* Wait for response */
- if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
- timeout)) {
- dev_err(&GET_DEV(accel_dev),
- "IOV request/response message timeout expired\n");
- return -EIO;
- }
-
/* Response from PF received, check compatibility */
switch (accel_dev->vf.compatible) {
case ADF_PF2VF_VF_COMPATIBLE:
@@ -378,3 +396,21 @@ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
return adf_vf2pf_request_version(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
index ffd43aa50b57..a7d8f8367345 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -90,13 +90,4 @@
/* VF->PF Compatible Version Request */
#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
-/* Collision detection */
-#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
-#define ADF_IOV_MSG_ACK_DELAY 2
-#define ADF_IOV_MSG_ACK_MAX_RETRY 100
-#define ADF_IOV_MSG_RETRY_DELAY 5
-#define ADF_IOV_MSG_MAX_RETRIES 3
-#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
- ADF_IOV_MSG_ACK_MAX_RETRY + \
- ADF_IOV_MSG_COLLISION_DETECT_DELAY)
#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index 3e25fac051b2..8d11bb24cea0 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -17,7 +17,7 @@ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ if (adf_send_vf2pf_msg(accel_dev, msg)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
@@ -41,7 +41,7 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
- if (adf_iov_putmsg(accel_dev, msg, 0))
+ if (adf_send_vf2pf_msg(accel_dev, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 7828a6573f3e..db5e7abbe5f3 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -53,27 +53,22 @@ EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
- int stat = pci_enable_msi(pci_dev_info->pci_dev);
-
- if (stat) {
+ int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
+ PCI_IRQ_MSI);
+ if (unlikely(stat < 0)) {
dev_err(&GET_DEV(accel_dev),
- "Failed to enable MSI interrupts\n");
+ "Failed to enable MSI interrupt: %d\n", stat);
return stat;
}
- accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!accel_dev->vf.irq_name)
- return -ENOMEM;
-
- return stat;
+ return 0;
}
static void adf_disable_msi(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
- kfree(accel_dev->vf.irq_name);
- pci_disable_msi(pdev);
+ pci_free_irq_vectors(pdev);
}
static void adf_dev_stop_async(struct work_struct *work)
@@ -101,6 +96,11 @@ static void adf_pf2vf_bh_handler(void *data)
/* Read the message from PF */
msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
+ if (!(msg & ADF_PF2VF_INT)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PF2VF interrupt, msg %X. Ignored\n", msg);
+ goto out;
+ }
if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
/* Ignore legacy non-system (non-kernel) PF2VF messages */
@@ -149,6 +149,7 @@ static void adf_pf2vf_bh_handler(void *data)
msg &= ~ADF_PF2VF_INT;
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+out:
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
return;
@@ -240,6 +241,7 @@ static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
}
cpu = accel_dev->accel_id % num_online_cpus();
irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+ accel_dev->vf.irq_enabled = true;
return ret;
}
@@ -271,8 +273,10 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
- irq_set_affinity_hint(pdev->irq, NULL);
- free_irq(pdev->irq, (void *)accel_dev);
+ if (accel_dev->vf.irq_enabled) {
+ irq_set_affinity_hint(pdev->irq, NULL);
+ free_irq(pdev->irq, accel_dev);
+ }
adf_cleanup_bh(accel_dev);
adf_cleanup_pf2vf_bh(accel_dev);
adf_disable_msi(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 0a9ce365a544..8e2e1554dcf6 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -35,34 +35,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->accel_mask)
- return 0;
-
- for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
- if (self->accel_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->ae_mask)
- return 0;
-
- for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
- if (self->ae_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_PMISC_BAR;
@@ -126,41 +98,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_DH895XCC_PF2VF_OFFSET(i);
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
- unsigned long accel_mask = hw_device->accel_mask;
- unsigned long ae_mask = hw_device->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
- unsigned int val, i;
-
- /* Enable Accel Engine error detection & correction */
- for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
- val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
- val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
- val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
- }
-
- /* Enable shared memory error detection & correction */
- for_each_set_bit(i, &accel_mask, ADF_DH895XCC_MAX_ACCELERATORS) {
- val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
- val |= ADF_DH895XCC_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
- val |= ADF_DH895XCC_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
- }
-}
-
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
@@ -175,11 +112,50 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_DH895XCC_SMIA1_MASK);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
+{
+ u32 errsou5, errmsk5, vf_int_mask;
+
+ vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar);
+
+ /* Get the interrupt sources triggered by VFs, but to avoid duplicates
+ * in the work queue, clear vf_int_mask_sets bits that are already
+ * masked in ERRMSK register.
+ */
+ errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
+ errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
+ vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+ vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+ return vf_int_mask;
+}
+
+static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask);
- return 0;
+ /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+ }
+}
+
+static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+
+ /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+ }
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
@@ -198,16 +174,16 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
- hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = get_accel_cap;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_admin_info = adf_gen2_get_admin_info;
@@ -225,7 +201,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_sbr;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
+ hw_data->get_vf2pf_sources = get_vf2pf_sources;
+ hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+ hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts;
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index f99319cd4543..0af34dd8708a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -7,8 +7,6 @@
#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
#define ADF_DH895XCC_ETR_BAR 2
-#define ADF_DH895XCC_RX_RINGS_OFFSET 8
-#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
@@ -25,16 +23,10 @@
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
#define ADF_DH895XCC_SMIA1_MASK 0x1
-/* Error detection and correction */
-#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
-#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28)
-#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
-#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
-#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
-#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16)
/* AE to function mapping */
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 55aa3a71169b..7717e9e5977b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2171,6 +2171,8 @@ static int s5p_aes_probe(struct platform_device *pdev)
variant = find_s5p_sss_version(pdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
/*
* Note: HASH and PRNG uses the same registers in secss, avoid
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 544d7040cfc5..bcbc38dc6ae8 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -2412,8 +2412,7 @@ static int sa_ul_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
- ret);
+ dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
pm_runtime_disable(dev);
return ret;
}
@@ -2435,16 +2434,16 @@ static int sa_ul_probe(struct platform_device *pdev)
sa_register_algos(dev_data);
- ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, dev);
if (ret)
goto release_dma;
- device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
+ device_for_each_child(dev, dev, sa_link_child);
return 0;
release_dma:
- sa_unregister_algos(&pdev->dev);
+ sa_unregister_algos(dev);
dma_release_channel(dev_data->dma_rx2);
dma_release_channel(dev_data->dma_rx1);
@@ -2453,8 +2452,8 @@ release_dma:
destroy_dma_pool:
dma_pool_destroy(dev_data->sc_pool);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 53927f9fa77e..9db0c402c9ce 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -75,52 +75,27 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
-#define CXL_COMPONENT_REGS() \
- void __iomem *hdm_decoder
-
-#define CXL_DEVICE_REGS() \
- void __iomem *status; \
- void __iomem *mbox; \
- void __iomem *memdev
-
-/* See note for 'struct cxl_regs' for the rationale of this organization */
-/*
- * CXL_COMPONENT_REGS - Common set of CXL Component register block base pointers
- * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
- */
-struct cxl_component_regs {
- CXL_COMPONENT_REGS();
-};
-
-/* See note for 'struct cxl_regs' for the rationale of this organization */
-/*
- * CXL_DEVICE_REGS - Common set of CXL Device register block base pointers
- * @status: CXL 2.0 8.2.8.3 Device Status Registers
- * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
- * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
- */
-struct cxl_device_regs {
- CXL_DEVICE_REGS();
-};
-
/*
- * Note, the anonymous union organization allows for per
- * register-block-type helper routines, without requiring block-type
- * agnostic code to include the prefix.
+ * Using struct_group() allows for per register-block-type helper routines,
+ * without requiring block-type agnostic code to include the prefix.
*/
struct cxl_regs {
- union {
- struct {
- CXL_COMPONENT_REGS();
- };
- struct cxl_component_regs component;
- };
- union {
- struct {
- CXL_DEVICE_REGS();
- };
- struct cxl_device_regs device_regs;
- };
+ /*
+ * Common set of CXL Component register block base pointers
+ * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
+ */
+ struct_group_tagged(cxl_component_regs, component,
+ void __iomem *hdm_decoder;
+ );
+ /*
+ * Common set of CXL Device register block base pointers
+ * @status: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
+ */
+ struct_group_tagged(cxl_device_regs, device_regs,
+ void __iomem *status, *mbox, *memdev;
+ );
};
struct cxl_reg_map {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 85faa7a5c7d1..06333d430382 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -827,7 +827,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
- if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
+ if (!devfreq->profile->max_state || !devfreq->profile->freq_table) {
mutex_unlock(&devfreq->lock);
err = set_freq_table(devfreq);
if (err < 0)
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 17ed980d9099..9b849d781116 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -94,11 +94,16 @@ static struct __exynos_ppmu_events {
PPMU_EVENT(d1-general),
PPMU_EVENT(d1-rt),
- /* For Exynos5422 SoC */
+ /* For Exynos5422 SoC, deprecated (backwards compatible) */
PPMU_EVENT(dmc0_0),
PPMU_EVENT(dmc0_1),
PPMU_EVENT(dmc1_0),
PPMU_EVENT(dmc1_1),
+ /* For Exynos5422 SoC */
+ PPMU_EVENT(dmc0-0),
+ PPMU_EVENT(dmc0-1),
+ PPMU_EVENT(dmc1-0),
+ PPMU_EVENT(dmc1-1),
};
static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
@@ -561,13 +566,10 @@ static int of_get_devfreq_events(struct device_node *np,
* use default if not.
*/
if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
- int id;
/* Not all registers take the same value for
* read+write data count.
*/
- id = __exynos_ppmu_find_ppmu_id(desc[j].name);
-
- switch (id) {
+ switch (ppmu_events[i].id) {
case PPMU_PMNCNT0:
case PPMU_PMNCNT1:
case PPMU_PMNCNT2:
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 40d81f23cacf..1ef021273a06 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- dma-resv.o seqno-fence.o
+ dma-resv.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 63d32261b63f..2ea59cb8edcd 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -74,7 +74,7 @@ static void dma_buf_release(struct dentry *dentry)
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
dma_buf_stats_teardown(dmabuf);
dmabuf->ops->release(dmabuf);
@@ -82,6 +82,7 @@ static void dma_buf_release(struct dentry *dentry)
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
+ WARN_ON(!list_empty(&dmabuf->attachments));
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
@@ -205,16 +206,55 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
+ dma_fence_put(fence);
+}
+
+static bool dma_buf_poll_shared(struct dma_resv *resv,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_resv_list *fobj = dma_resv_shared_list(resv);
+ struct dma_fence *fence;
+ int i, r;
+
+ if (!fobj)
+ return false;
+
+ for (i = 0; i < fobj->shared_count; ++i) {
+ fence = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(resv));
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+ }
+
+ return false;
+}
+
+static bool dma_buf_poll_excl(struct dma_resv *resv,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_fence *fence = dma_resv_excl_fence(resv);
+ int r;
+
+ if (!fence)
+ return false;
+
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+
+ return false;
}
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct dma_resv *resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence_excl;
__poll_t events;
- unsigned shared_count, seq;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -228,101 +268,50 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;
-retry:
- seq = read_seqcount_begin(&resv->seq);
- rcu_read_lock();
-
- fobj = rcu_dereference(resv->fence);
- if (fobj)
- shared_count = fobj->shared_count;
- else
- shared_count = 0;
- fence_excl = dma_resv_excl_fence(resv);
- if (read_seqcount_retry(&resv->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
-
- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- __poll_t pevents = EPOLLIN;
+ dma_resv_lock(resv, NULL);
- if (shared_count == 0)
- pevents |= EPOLLOUT;
+ if (events & EPOLLOUT) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
- if (dcb->active) {
- dcb->active |= pevents;
- events &= ~pevents;
- } else
- dcb->active = pevents;
+ if (dcb->active)
+ events &= ~EPOLLOUT;
+ else
+ dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);
- if (events & pevents) {
- if (!dma_fence_get_rcu(fence_excl)) {
- /* force a recheck */
- events &= ~pevents;
- dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
- events &= ~pevents;
- dma_fence_put(fence_excl);
- } else {
- /*
- * No callback queued, wake up any additional
- * waiters.
- */
- dma_fence_put(fence_excl);
+ if (events & EPOLLOUT) {
+ if (!dma_buf_poll_shared(resv, dcb) &&
+ !dma_buf_poll_excl(resv, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- }
+ else
+ events &= ~EPOLLOUT;
}
}
- if ((events & EPOLLOUT) && shared_count > 0) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
- int i;
+ if (events & EPOLLIN) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
- /* Only queue a new callback if no event has fired yet */
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
- events &= ~EPOLLOUT;
+ events &= ~EPOLLIN;
else
- dcb->active = EPOLLOUT;
+ dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock);
- if (!(events & EPOLLOUT))
- goto out;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
- if (!dma_fence_get_rcu(fence)) {
- /*
- * fence refcount dropped to zero, this means
- * that fobj has been freed
- *
- * call dma_buf_poll_cb and force a recheck!
- */
- events &= ~EPOLLOUT;
+ if (events & EPOLLIN) {
+ if (!dma_buf_poll_excl(resv, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- break;
- }
- if (!dma_fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- dma_fence_put(fence);
- events &= ~EPOLLOUT;
- break;
- }
- dma_fence_put(fence);
+ else
+ events &= ~EPOLLIN;
}
-
- /* No callback queued, wake up any additional waiters. */
- if (i == shared_count)
- dma_buf_poll_cb(NULL, &dcb->cb);
}
-out:
- rcu_read_unlock();
+ dma_resv_unlock(resv);
return events;
}
@@ -565,8 +554,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
@@ -610,7 +599,7 @@ err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_export);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
/**
* dma_buf_fd - returns a file descriptor for the given struct dma_buf
@@ -634,7 +623,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
return fd;
}
-EXPORT_SYMBOL_GPL(dma_buf_fd);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
/**
* dma_buf_get - returns the struct dma_buf related to an fd
@@ -660,7 +649,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_GPL(dma_buf_get);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
/**
* dma_buf_put - decreases refcount of the buffer
@@ -679,7 +668,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_GPL(dma_buf_put);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
static void mangle_sg_table(struct sg_table *sg_table)
{
@@ -810,7 +799,7 @@ err_unlock:
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -825,7 +814,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
{
return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
-EXPORT_SYMBOL_GPL(dma_buf_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
static void __unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
@@ -871,7 +860,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
kfree(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_detach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
/**
* dma_buf_pin - Lock down the DMA-buf
@@ -901,7 +890,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_pin);
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
/**
* dma_buf_unpin - Unpin a DMA-buf
@@ -922,7 +911,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
if (dmabuf->ops->unpin)
dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_unpin);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -1012,7 +1001,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
-EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -1048,7 +1037,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
@@ -1068,7 +1057,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
/**
* DOC: cpu access
@@ -1212,7 +1201,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1240,7 +1229,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
/**
@@ -1282,7 +1271,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_GPL(dma_buf_mmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
@@ -1336,7 +1325,7 @@ out_unlock:
mutex_unlock(&dmabuf->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_vmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
@@ -1360,7 +1349,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
}
mutex_unlock(&dmabuf->lock);
}
-EXPORT_SYMBOL_GPL(dma_buf_vunmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index ce0f5eff575d..1e82ecd443fa 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -616,20 +616,17 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
* @cb: the callback to register
* @func: the function to call
*
+ * Add a software callback to the fence. The caller should keep a reference to
+ * the fence.
+ *
* @cb will be initialized by dma_fence_add_callback(), no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
- * Note that the callback can be called from an atomic context. If
- * fence is already signaled, this function will return -ENOENT (and
+ * If fence is already signaled, this function will return -ENOENT (and
* *not* call the callback).
*
- * Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to dma_fence_wait(), however the caller doesn't need to
- * keep a refcount to fence afterward dma_fence_add_callback() has returned:
- * when software access is enabled, the creator of the fence is required to keep
- * the fence alive until after it signals with dma_fence_signal(). The callback
- * itself can be called from irq context.
+ * Note that the callback can be called from an atomic context or irq context.
*
* Returns 0 in case of success, -ENOENT if the fence is already signaled
* and -EINVAL in case of error.
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index e744fd87c63c..a480af9581bd 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -48,6 +48,8 @@
* write operations) or N shared fences (read operations). The RCU
* mechanism is used to protect read access to fences from locked
* write-side updates.
+ *
+ * See struct dma_resv for more details.
*/
DEFINE_WD_CLASS(reservation_ww_class);
@@ -137,7 +139,11 @@ EXPORT_SYMBOL(dma_resv_fini);
* @num_fences: number of fences we want to add
*
* Should be called before dma_resv_add_shared_fence(). Must
- * be called with obj->lock held.
+ * be called with @obj locked through dma_resv_lock().
+ *
+ * Note that the preallocated slots need to be re-reserved if @obj is unlocked
+ * at any time before calling dma_resv_add_shared_fence(). This is validated
+ * when CONFIG_DEBUG_MUTEXES is enabled.
*
* RETURNS
* Zero for success, or -errno
@@ -234,8 +240,10 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
* @obj: the reservation object
* @fence: the shared fence to add
*
- * Add a fence to a shared slot, obj->lock must be held, and
+ * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_shared() has been called.
+ *
+ * See also &dma_resv.fence for a discussion of the semantics.
*/
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
{
@@ -278,9 +286,11 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
- * @fence: the shared fence to add
+ * @fence: the exclusive fence to add
*
- * Add a fence to the exclusive slot. The obj->lock must be held.
+ * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
+ * Note that this function replaces all fences attached to @obj, see also
+ * &dma_resv.fence_excl for a discussion of the semantics.
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
@@ -314,6 +324,106 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
+ * dma_resv_iter_restart_unlocked - restart the unlocked iterator
+ * @cursor: The dma_resv_iter object to restart
+ *
+ * Restart the unlocked iteration by initializing the cursor object.
+ */
+static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
+{
+ cursor->seq = read_seqcount_begin(&cursor->obj->seq);
+ cursor->index = -1;
+ if (cursor->all_fences)
+ cursor->fences = dma_resv_shared_list(cursor->obj);
+ else
+ cursor->fences = NULL;
+ cursor->is_restarted = true;
+}
+
+/**
+ * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ *
+ * Return all the fences in the dma_resv object which are not yet signaled.
+ * The returned fence has an extra local reference so will stay alive.
+ * If a concurrent modify is detected the whole iteration is started over again.
+ */
+static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
+{
+ struct dma_resv *obj = cursor->obj;
+
+ do {
+ /* Drop the reference from the previous round */
+ dma_fence_put(cursor->fence);
+
+ if (cursor->index == -1) {
+ cursor->fence = dma_resv_excl_fence(obj);
+ cursor->index++;
+ if (!cursor->fence)
+ continue;
+
+ } else if (!cursor->fences ||
+ cursor->index >= cursor->fences->shared_count) {
+ cursor->fence = NULL;
+ break;
+
+ } else {
+ struct dma_resv_list *fences = cursor->fences;
+ unsigned int idx = cursor->index++;
+
+ cursor->fence = rcu_dereference(fences->shared[idx]);
+ }
+ cursor->fence = dma_fence_get_rcu(cursor->fence);
+ if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+ break;
+ } while (true);
+}
+
+/**
+ * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the first fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
+{
+ rcu_read_lock();
+ do {
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
+
+/**
+ * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the next fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
+{
+ bool restart;
+
+ rcu_read_lock();
+ cursor->is_restarted = false;
+ restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
+ do {
+ if (restart)
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ restart = true;
+ } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
+
+/**
* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
@@ -322,74 +432,54 @@ EXPORT_SYMBOL(dma_resv_add_excl_fence);
*/
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
- struct dma_resv_list *src_list, *dst_list;
- struct dma_fence *old, *new;
- unsigned int i;
+ struct dma_resv_iter cursor;
+ struct dma_resv_list *list;
+ struct dma_fence *f, *excl;
dma_resv_assert_held(dst);
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
-
-retry:
- if (src_list) {
- unsigned int shared_count = src_list->shared_count;
-
- rcu_read_unlock();
+ list = NULL;
+ excl = NULL;
- dst_list = dma_resv_list_alloc(shared_count);
- if (!dst_list)
- return -ENOMEM;
+ dma_resv_iter_begin(&cursor, src, true);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
- if (!src_list || src_list->shared_count > shared_count) {
- kfree(dst_list);
- goto retry;
- }
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
- dst_list->shared_count = 0;
- for (i = 0; i < src_list->shared_count; ++i) {
- struct dma_fence __rcu **dst;
- struct dma_fence *fence;
+ if (cursor.fences) {
+ unsigned int cnt = cursor.fences->shared_count;
- fence = rcu_dereference(src_list->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- continue;
+ list = dma_resv_list_alloc(cnt);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
+ }
- if (!dma_fence_get_rcu(fence)) {
- dma_resv_list_free(dst_list);
- src_list = dma_resv_shared_list(src);
- goto retry;
- }
+ list->shared_count = 0;
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- continue;
+ } else {
+ list = NULL;
}
-
- dst = &dst_list->shared[dst_list->shared_count++];
- rcu_assign_pointer(*dst, fence);
+ excl = NULL;
}
- } else {
- dst_list = NULL;
- }
- new = dma_fence_get_rcu_safe(&src->fence_excl);
- rcu_read_unlock();
-
- src_list = dma_resv_shared_list(dst);
- old = dma_resv_excl_fence(dst);
+ dma_fence_get(f);
+ if (dma_resv_iter_is_exclusive(&cursor))
+ excl = f;
+ else
+ RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+ }
+ dma_resv_iter_end(&cursor);
write_seqcount_begin(&dst->seq);
- /* write_seqcount_begin provides the necessary memory barrier */
- RCU_INIT_POINTER(dst->fence_excl, new);
- RCU_INIT_POINTER(dst->fence, dst_list);
+ excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+ list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
write_seqcount_end(&dst->seq);
- dma_resv_list_free(src_list);
- dma_fence_put(old);
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
return 0;
}
@@ -399,99 +489,61 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * @fence_excl: the returned exclusive fence (or NULL)
+ * @shared_count: the number of shared fences returned
+ * @shared: the array of shared fence ptrs returned (array is krealloc'd to
* the required size, and must be freed by caller)
*
* Retrieve all fences from the reservation object. If the pointer for the
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
- unsigned int *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
+ unsigned int *shared_count, struct dma_fence ***shared)
{
- struct dma_fence **shared = NULL;
- struct dma_fence *fence_excl;
- unsigned int shared_count;
- int ret = 1;
-
- do {
- struct dma_resv_list *fobj;
- unsigned int i, seq;
- size_t sz = 0;
-
- shared_count = i = 0;
-
- rcu_read_lock();
- seq = read_seqcount_begin(&obj->seq);
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- fence_excl = dma_resv_excl_fence(obj);
- if (fence_excl && !dma_fence_get_rcu(fence_excl))
- goto unlock;
+ *shared_count = 0;
+ *shared = NULL;
- fobj = dma_resv_shared_list(obj);
- if (fobj)
- sz += sizeof(*shared) * fobj->shared_max;
+ if (fence_excl)
+ *fence_excl = NULL;
- if (!pfence_excl && fence_excl)
- sz += sizeof(*shared);
+ dma_resv_iter_begin(&cursor, obj, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
- if (sz) {
- struct dma_fence **nshared;
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ unsigned int count;
- nshared = krealloc(shared, sz,
- GFP_NOWAIT | __GFP_NOWARN);
- if (!nshared) {
- rcu_read_unlock();
+ while (*shared_count)
+ dma_fence_put((*shared)[--(*shared_count)]);
- dma_fence_put(fence_excl);
- fence_excl = NULL;
+ if (fence_excl)
+ dma_fence_put(*fence_excl);
- nshared = krealloc(shared, sz, GFP_KERNEL);
- if (nshared) {
- shared = nshared;
- continue;
- }
+ count = cursor.fences ? cursor.fences->shared_count : 0;
+ count += fence_excl ? 0 : 1;
- ret = -ENOMEM;
- break;
+ /* Eventually re-allocate the array */
+ *shared = krealloc_array(*shared, count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (count && !*shared) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
}
- shared = nshared;
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- shared[i] = rcu_dereference(fobj->shared[i]);
- if (!dma_fence_get_rcu(shared[i]))
- break;
- }
- }
-
- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
- while (i--)
- dma_fence_put(shared[i]);
- dma_fence_put(fence_excl);
- goto unlock;
}
- ret = 0;
-unlock:
- rcu_read_unlock();
- } while (ret);
-
- if (pfence_excl)
- *pfence_excl = fence_excl;
- else if (fence_excl)
- shared[shared_count++] = fence_excl;
-
- if (!shared_count) {
- kfree(shared);
- shared = NULL;
+ dma_fence_get(fence);
+ if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
+ *fence_excl = fence;
+ else
+ (*shared)[(*shared_count)++] = fence;
}
+ dma_resv_iter_end(&cursor);
- *pshared_count = shared_count;
- *pshared = shared;
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
@@ -513,94 +565,25 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
unsigned long timeout)
{
long ret = timeout ? timeout : 1;
- unsigned int seq, shared_count;
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- int i;
-
-retry:
- shared_count = 0;
- seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
- i = -1;
-
- fence = dma_resv_excl_fence(obj);
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- if (!dma_fence_get_rcu(fence))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- fence = NULL;
- }
-
- } else {
- fence = NULL;
- }
-
- if (wait_all) {
- struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-
- if (fobj)
- shared_count = fobj->shared_count;
-
- for (i = 0; !fence && i < shared_count; ++i) {
- struct dma_fence *lfence;
- lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &lfence->flags))
- continue;
-
- if (!dma_fence_get_rcu(lfence))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(lfence)) {
- dma_fence_put(lfence);
- continue;
- }
+ dma_resv_iter_begin(&cursor, obj, wait_all);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
- fence = lfence;
- break;
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ if (ret <= 0) {
+ dma_resv_iter_end(&cursor);
+ return ret;
}
}
+ dma_resv_iter_end(&cursor);
- rcu_read_unlock();
- if (fence) {
- if (read_seqcount_retry(&obj->seq, seq)) {
- dma_fence_put(fence);
- goto retry;
- }
-
- ret = dma_fence_wait_timeout(fence, intr, ret);
- dma_fence_put(fence);
- if (ret > 0 && wait_all && (i + 1 < shared_count))
- goto retry;
- }
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
-{
- struct dma_fence *fence, *lfence = passed_fence;
- int ret = 1;
-
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = dma_fence_get_rcu(lfence);
- if (!fence)
- return -1;
-
- ret = !!dma_fence_is_signaled(fence);
- dma_fence_put(fence);
- }
- return ret;
-}
-
/**
* dma_resv_test_signaled - Test if a reservation object's fences have been
* signaled.
@@ -609,49 +592,24 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
* fence
*
* Callers are not required to hold specific locks, but maybe hold
- * dma_resv_lock() already
+ * dma_resv_lock() already.
+ *
* RETURNS
- * true if all fences signaled, else false
+ *
+ * True if all fences signaled, else false.
*/
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- unsigned int seq;
- int ret;
-
- rcu_read_lock();
-retry:
- ret = true;
- seq = read_seqcount_begin(&obj->seq);
-
- if (test_all) {
- struct dma_resv_list *fobj = dma_resv_shared_list(obj);
- unsigned int i, shared_count;
-
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- fence = rcu_dereference(fobj->shared[i]);
- ret = dma_resv_test_signaled_single(fence);
- if (ret < 0)
- goto retry;
- else if (!ret)
- break;
- }
- }
-
- fence = dma_resv_excl_fence(obj);
- if (ret && fence) {
- ret = dma_resv_test_signaled_single(fence);
- if (ret < 0)
- goto retry;
+ dma_resv_iter_begin(&cursor, obj, test_all);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_resv_iter_end(&cursor);
+ return false;
}
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto retry;
-
- rcu_read_unlock();
- return ret;
+ dma_resv_iter_end(&cursor);
+ return true;
}
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 23a7e74ef966..f57a39ddd063 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -40,11 +40,12 @@ struct dma_heap_attachment {
bool mapped;
};
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
+#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
-#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
-static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
/*
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
deleted file mode 100644
index bfe14e94c488..000000000000
--- a/drivers/dma-buf/seqno-fence.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012-2014 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/seqno-fence.h>
-
-static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->get_driver_name(fence);
-}
-
-static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->get_timeline_name(fence);
-}
-
-static bool seqno_enable_signaling(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->enable_signaling(fence);
-}
-
-static bool seqno_signaled(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
-}
-
-static void seqno_release(struct dma_fence *fence)
-{
- struct seqno_fence *f = to_seqno_fence(fence);
-
- dma_buf_put(f->sync_buf);
- if (f->ops->release)
- f->ops->release(fence);
- else
- dma_fence_free(&f->base);
-}
-
-static signed long seqno_wait(struct dma_fence *fence, bool intr,
- signed long timeout)
-{
- struct seqno_fence *f = to_seqno_fence(fence);
-
- return f->ops->wait(fence, intr, timeout);
-}
-
-const struct dma_fence_ops seqno_fence_ops = {
- .get_driver_name = seqno_fence_get_driver_name,
- .get_timeline_name = seqno_fence_get_timeline_name,
- .enable_signaling = seqno_enable_signaling,
- .signaled = seqno_signaled,
- .wait = seqno_wait,
- .release = seqno_release,
-};
-EXPORT_SYMBOL(seqno_fence_ops);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 4a2a796e348c..52d04641e361 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -742,8 +742,7 @@ pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
dma_addr_t dma;
int i;
- sw_desc = kzalloc(sizeof(*sw_desc) +
- nb_hw_desc * sizeof(struct pxad_desc_hw *),
+ sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
GFP_NOWAIT);
if (!sw_desc)
return NULL;
diff --git a/drivers/edac/al_mc_edac.c b/drivers/edac/al_mc_edac.c
index 7d4f396c27b5..178b9e581a72 100644
--- a/drivers/edac/al_mc_edac.c
+++ b/drivers/edac/al_mc_edac.c
@@ -238,11 +238,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
if (!mci)
return -ENOMEM;
- ret = devm_add_action(&pdev->dev, devm_al_mc_edac_free, mci);
- if (ret) {
- edac_mc_free(mci);
+ ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_free, mci);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, mci);
al_mc = mci->pvt_info;
@@ -293,11 +291,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
- if (ret) {
- edac_mc_del_mc(&pdev->dev);
+ ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
+ if (ret)
return ret;
- }
if (al_mc->irq_ue > 0) {
ret = devm_request_irq(&pdev->dev,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 99b06a3e8fb1..4fce75013674 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1065,12 +1065,14 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
#define CS_ODD_PRIMARY BIT(1)
#define CS_EVEN_SECONDARY BIT(2)
#define CS_ODD_SECONDARY BIT(3)
+#define CS_3R_INTERLEAVE BIT(4)
#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
+ u8 base, count = 0;
int cs_mode = 0;
if (csrow_enabled(2 * dimm, ctrl, pvt))
@@ -1083,6 +1085,20 @@ static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
+ /*
+ * 3 Rank inteleaving support.
+ * There should be only three bases enabled and their two masks should
+ * be equal.
+ */
+ for_each_chip_select(base, ctrl, pvt)
+ count += csrow_enabled(base, ctrl, pvt);
+
+ if (count == 3 &&
+ pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
+ edac_dbg(1, "3R interleaving in use.\n");
+ cs_mode |= CS_3R_INTERLEAVE;
+ }
+
return cs_mode;
}
@@ -1891,10 +1907,14 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
*
* The MSB is the number of bits in the full mask because BIT[0] is
* always 0.
+ *
+ * In the special 3 Rank interleaving case, a single bit is flipped
+ * without swapping with the most significant bit. This can be handled
+ * by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
- num_zero_bits = msb - weight;
+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 2c5975674723..9f82ca295353 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -66,14 +66,12 @@ unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
char *p = buf;
for (i = 0; i < mci->n_layers; i++) {
- n = snprintf(p, len, "%s %d ",
+ n = scnprintf(p, len, "%s %d ",
edac_layer_name[mci->layers[i].type],
dimm->location[i]);
p += n;
len -= n;
count += n;
- if (!len)
- break;
}
return count;
@@ -341,19 +339,16 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
*/
len = sizeof(dimm->label);
p = dimm->label;
- n = snprintf(p, len, "mc#%u", mci->mc_idx);
+ n = scnprintf(p, len, "mc#%u", mci->mc_idx);
p += n;
len -= n;
for (layer = 0; layer < mci->n_layers; layer++) {
- n = snprintf(p, len, "%s#%u",
- edac_layer_name[mci->layers[layer].type],
- pos[layer]);
+ n = scnprintf(p, len, "%s#%u",
+ edac_layer_name[mci->layers[layer].type],
+ pos[layer]);
p += n;
len -= n;
dimm->location[layer] = pos[layer];
-
- if (len <= 0)
- break;
}
/* Link it to the csrows old API data */
@@ -1027,12 +1022,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *other_detail)
{
struct dimm_info *dimm;
- char *p;
+ char *p, *end;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
struct edac_raw_error_desc *e = &mci->error_desc;
bool any_memory = true;
+ const char *prefix;
edac_dbg(3, "MC%d\n", mci->mc_idx);
@@ -1087,6 +1083,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
*/
p = e->label;
*p = '\0';
+ end = p + sizeof(e->label);
+ prefix = "";
mci_for_each_dimm(mci, dimm) {
if (top_layer >= 0 && top_layer != dimm->location[0])
@@ -1114,12 +1112,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = e->label;
*p = '\0';
} else {
- if (p != e->label) {
- strcpy(p, OTHER_LABEL);
- p += strlen(OTHER_LABEL);
- }
- strcpy(p, dimm->label);
- p += strlen(p);
+ p += scnprintf(p, end - p, "%s%s", prefix, dimm->label);
+ prefix = OTHER_LABEL;
}
/*
@@ -1141,25 +1135,25 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
if (any_memory)
- strcpy(e->label, "any memory");
+ strscpy(e->label, "any memory", sizeof(e->label));
else if (!*e->label)
- strcpy(e->label, "unknown memory");
+ strscpy(e->label, "unknown memory", sizeof(e->label));
edac_inc_csrow(e, row, chan);
/* Fill the RAM location data */
p = e->location;
+ end = p + sizeof(e->location);
+ prefix = "";
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] < 0)
continue;
- p += sprintf(p, "%s:%d ",
- edac_layer_name[mci->layers[i].type],
- pos[i]);
+ p += scnprintf(p, end - p, "%s%s:%d", prefix,
+ edac_layer_name[mci->layers[i].type], pos[i]);
+ prefix = " ";
}
- if (p > e->location)
- *(p - 1) = '\0';
edac_raw_mc_handle_error(e);
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 2f9f1e74bb35..0a638c97702a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -744,7 +744,7 @@ static ssize_t mci_ue_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ue_mc);
+ return sprintf(data, "%u\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct device *dev,
@@ -753,7 +753,7 @@ static ssize_t mci_ce_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ce_mc);
+ return sprintf(data, "%u\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct device *dev,
@@ -762,7 +762,7 @@ static ssize_t mci_ce_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ce_noinfo_count);
+ return sprintf(data, "%u\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct device *dev,
@@ -771,7 +771,7 @@ static ssize_t mci_ue_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ue_noinfo_count);
+ return sprintf(data, "%u\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct device *dev,
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4c626fcd4dcb..1522d4aa2ca6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1052,7 +1052,7 @@ static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
rc = ((reg << 6) | rc) << 26;
- return rc | 0x1ffffff;
+ return rc | 0x3ffffff;
}
static u64 knl_get_tolm(struct sbridge_pvt *pvt)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 169f96e51c29..6971ded598de 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -245,11 +245,8 @@ static int ti_edac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg)) {
- edac_printk(KERN_ERR, EDAC_MOD_NAME,
- "EMIF controller regs not defined\n");
+ if (IS_ERR(reg))
return PTR_ERR(reg);
- }
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
layers[0].size = 1;
@@ -281,8 +278,6 @@ static int ti_edac_probe(struct platform_device *pdev)
error_irq = platform_get_irq(pdev, 0);
if (error_irq < 0) {
ret = error_irq;
- edac_printk(KERN_ERR, EDAC_MOD_NAME,
- "EMIF irq number not defined.\n");
goto err;
}
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index c69d40ae5619..aab87c9b35c8 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -23,7 +23,7 @@ config EXTCON_ADC_JACK
config EXTCON_AXP288
tristate "X-Power AXP288 EXTCON support"
- depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI
+ depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI && IOSF_MBI
select USB_ROLE_SWITCH
help
Say Y here to enable support for USB peripheral detection
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index fdb31954cf2b..7c6d5857ff25 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -24,6 +24,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/iosf_mbi.h>
/* Power source status register */
#define PS_STAT_VBUS_TRIGGER BIT(0)
@@ -215,6 +216,10 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
unsigned int cable = info->previous_cable;
bool vbus_attach = false;
+ ret = iosf_mbi_block_punit_i2c_access();
+ if (ret < 0)
+ return ret;
+
vbus_attach = axp288_get_vbus_attach(info);
if (!vbus_attach)
goto no_vbus;
@@ -253,6 +258,8 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
}
no_vbus:
+ iosf_mbi_unblock_punit_i2c_access();
+
extcon_set_state_sync(info->edev, info->previous_cable, false);
if (info->previous_cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -275,6 +282,8 @@ no_vbus:
return 0;
dev_det_ret:
+ iosf_mbi_unblock_punit_i2c_access();
+
if (ret < 0)
dev_err(info->dev, "failed to detect BC Mod\n");
@@ -305,13 +314,23 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void axp288_extcon_enable(struct axp288_extcon_info *info)
+static int axp288_extcon_enable(struct axp288_extcon_info *info)
{
+ int ret = 0;
+
+ ret = iosf_mbi_block_punit_i2c_access();
+ if (ret < 0)
+ return ret;
+
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, 0);
/* Enable the charger detection logic */
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, BC_GLOBAL_RUN);
+
+ iosf_mbi_unblock_punit_i2c_access();
+
+ return ret;
}
static void axp288_put_role_sw(void *data)
@@ -384,10 +403,16 @@ static int axp288_extcon_probe(struct platform_device *pdev)
}
}
+ ret = iosf_mbi_block_punit_i2c_access();
+ if (ret < 0)
+ return ret;
+
info->vbus_attach = axp288_get_vbus_attach(info);
axp288_extcon_log_rsi(info);
+ iosf_mbi_unblock_punit_i2c_access();
+
/* Initialize extcon device */
info->edev = devm_extcon_dev_allocate(&pdev->dev,
axp288_extcon_cables);
@@ -441,7 +466,9 @@ static int axp288_extcon_probe(struct platform_device *pdev)
}
/* Start charger cable type detection */
- axp288_extcon_enable(info);
+ ret = axp288_extcon_enable(info);
+ if (ret < 0)
+ return ret;
device_init_wakeup(dev, true);
platform_set_drvdata(pdev, info);
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index fa01926c09f1..d7795607f693 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -7,7 +7,6 @@
*/
#include <linux/extcon-provider.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index f06be6d4e2a9..0cb440bdd5cb 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -7,18 +7,17 @@
*/
#include <linux/extcon-provider.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mod_devicetable.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index 805af73b4152..6ba3d89b106d 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -19,15 +19,42 @@
#define TUSB320_REG9_ATTACHED_STATE_MASK 0x3
#define TUSB320_REG9_CABLE_DIRECTION BIT(5)
#define TUSB320_REG9_INTERRUPT_STATUS BIT(4)
-#define TUSB320_ATTACHED_STATE_NONE 0x0
-#define TUSB320_ATTACHED_STATE_DFP 0x1
-#define TUSB320_ATTACHED_STATE_UFP 0x2
-#define TUSB320_ATTACHED_STATE_ACC 0x3
+
+#define TUSB320_REGA 0xa
+#define TUSB320L_REGA_DISABLE_TERM BIT(0)
+#define TUSB320_REGA_I2C_SOFT_RESET BIT(3)
+#define TUSB320_REGA_MODE_SELECT_SHIFT 4
+#define TUSB320_REGA_MODE_SELECT_MASK 0x3
+
+#define TUSB320L_REGA0_REVISION 0xa0
+
+enum tusb320_attached_state {
+ TUSB320_ATTACHED_STATE_NONE,
+ TUSB320_ATTACHED_STATE_DFP,
+ TUSB320_ATTACHED_STATE_UFP,
+ TUSB320_ATTACHED_STATE_ACC,
+};
+
+enum tusb320_mode {
+ TUSB320_MODE_PORT,
+ TUSB320_MODE_UFP,
+ TUSB320_MODE_DFP,
+ TUSB320_MODE_DRP,
+};
+
+struct tusb320_priv;
+
+struct tusb320_ops {
+ int (*set_mode)(struct tusb320_priv *priv, enum tusb320_mode mode);
+ int (*get_revision)(struct tusb320_priv *priv, unsigned int *revision);
+};
struct tusb320_priv {
struct device *dev;
struct regmap *regmap;
struct extcon_dev *edev;
+ struct tusb320_ops *ops;
+ enum tusb320_attached_state state;
};
static const char * const tusb_attached_states[] = {
@@ -62,6 +89,101 @@ static int tusb320_check_signature(struct tusb320_priv *priv)
return 0;
}
+static int tusb320_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode)
+{
+ int ret;
+
+ /* Mode cannot be changed while cable is attached */
+ if (priv->state != TUSB320_ATTACHED_STATE_NONE)
+ return -EBUSY;
+
+ /* Write mode */
+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
+ TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT,
+ mode << TUSB320_REGA_MODE_SELECT_SHIFT);
+ if (ret) {
+ dev_err(priv->dev, "failed to write mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tusb320l_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode)
+{
+ int ret;
+
+ /* Disable CC state machine */
+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
+ TUSB320L_REGA_DISABLE_TERM, 1);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to disable CC state machine: %d\n", ret);
+ return ret;
+ }
+
+ /* Write mode */
+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
+ TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT,
+ mode << TUSB320_REGA_MODE_SELECT_SHIFT);
+ if (ret) {
+ dev_err(priv->dev, "failed to write mode: %d\n", ret);
+ goto err;
+ }
+
+ msleep(5);
+err:
+ /* Re-enable CC state machine */
+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
+ TUSB320L_REGA_DISABLE_TERM, 0);
+ if (ret)
+ dev_err(priv->dev,
+ "failed to re-enable CC state machine: %d\n", ret);
+
+ return ret;
+}
+
+static int tusb320_reset(struct tusb320_priv *priv)
+{
+ int ret;
+
+ /* Set mode to default (follow PORT pin) */
+ ret = priv->ops->set_mode(priv, TUSB320_MODE_PORT);
+ if (ret && ret != -EBUSY) {
+ dev_err(priv->dev,
+ "failed to set mode to PORT: %d\n", ret);
+ return ret;
+ }
+
+ /* Perform soft reset */
+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
+ TUSB320_REGA_I2C_SOFT_RESET, 1);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to write soft reset bit: %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for chip to go through reset */
+ msleep(95);
+
+ return 0;
+}
+
+static int tusb320l_get_revision(struct tusb320_priv *priv, unsigned int *revision)
+{
+ return regmap_read(priv->regmap, TUSB320L_REGA0_REVISION, revision);
+}
+
+static struct tusb320_ops tusb320_ops = {
+ .set_mode = tusb320_set_mode,
+};
+
+static struct tusb320_ops tusb320l_ops = {
+ .set_mode = tusb320l_set_mode,
+ .get_revision = tusb320l_get_revision,
+};
+
static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
{
struct tusb320_priv *priv = dev_id;
@@ -96,6 +218,8 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
extcon_sync(priv->edev, EXTCON_USB);
extcon_sync(priv->edev, EXTCON_USB_HOST);
+ priv->state = state;
+
regmap_write(priv->regmap, TUSB320_REG9, reg);
return IRQ_HANDLED;
@@ -110,6 +234,8 @@ static int tusb320_extcon_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tusb320_priv *priv;
+ const void *match_data;
+ unsigned int revision;
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
@@ -125,12 +251,27 @@ static int tusb320_extcon_probe(struct i2c_client *client,
if (ret)
return ret;
+ match_data = device_get_match_data(&client->dev);
+ if (!match_data)
+ return -EINVAL;
+
+ priv->ops = (struct tusb320_ops*)match_data;
+
priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
if (IS_ERR(priv->edev)) {
dev_err(priv->dev, "failed to allocate extcon device\n");
return PTR_ERR(priv->edev);
}
+ if (priv->ops->get_revision) {
+ ret = priv->ops->get_revision(priv, &revision);
+ if (ret)
+ dev_warn(priv->dev,
+ "failed to read revision register: %d\n", ret);
+ else
+ dev_info(priv->dev, "chip revision %d\n", revision);
+ }
+
ret = devm_extcon_dev_register(priv->dev, priv->edev);
if (ret < 0) {
dev_err(priv->dev, "failed to register extcon device\n");
@@ -145,6 +286,17 @@ static int tusb320_extcon_probe(struct i2c_client *client,
/* update initial state */
tusb320_irq_handler(client->irq, priv);
+ /* Reset chip to its default state */
+ ret = tusb320_reset(priv);
+ if (ret)
+ dev_warn(priv->dev, "failed to reset chip: %d\n", ret);
+ else
+ /*
+ * State and polarity might change after a reset, so update
+ * them again and make sure the interrupt status bit is cleared.
+ */
+ tusb320_irq_handler(client->irq, priv);
+
ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
tusb320_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -154,7 +306,8 @@ static int tusb320_extcon_probe(struct i2c_client *client,
}
static const struct of_device_id tusb320_extcon_dt_match[] = {
- { .compatible = "ti,tusb320", },
+ { .compatible = "ti,tusb320", .data = &tusb320_ops, },
+ { .compatible = "ti,tusb320l", .data = &tusb320l_ops, },
{ }
};
MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index fb6c651214f3..9f89c17730b1 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
@@ -953,11 +954,25 @@ static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
return DMA_FROM_DEVICE;
}
+static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
+ fw_iso_mc_callback_t callback,
+ void *callback_data)
+{
+ struct fw_iso_context *ctx;
+
+ ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
+ 0, 0, 0, NULL, callback_data);
+ if (!IS_ERR(ctx))
+ ctx->callback.mc = callback;
+
+ return ctx;
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
- fw_iso_callback_t cb;
+ union fw_iso_callback cb;
int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
@@ -970,7 +985,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
- cb = iso_callback;
+ cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE:
@@ -978,19 +993,24 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel > 63)
return -EINVAL;
- cb = iso_callback;
+ cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- cb = (fw_iso_callback_t)iso_mc_callback;
+ cb.mc = iso_mc_callback;
break;
default:
return -EINVAL;
}
- context = fw_iso_context_create(client->device->card, a->type,
- a->channel, a->speed, a->header_size, cb, client);
+ if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+ context = fw_iso_mc_context_create(client->device->card, cb.mc,
+ client);
+ else
+ context = fw_iso_context_create(client->device->card, a->type,
+ a->channel, a->speed,
+ a->header_size, cb.sc, client);
if (IS_ERR(context))
return PTR_ERR(context);
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 4c3fd2eed1da..dcc141068128 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1443,8 +1443,8 @@ static int fwnet_probe(struct fw_unit *unit,
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
+ union fwnet_hwaddr ha;
int ret;
- union fwnet_hwaddr *ha;
mutex_lock(&fwnet_device_mutex);
@@ -1491,12 +1491,12 @@ static int fwnet_probe(struct fw_unit *unit,
net->max_mtu = 4096U;
/* Set our hardware address while we're at it */
- ha = (union fwnet_hwaddr *)net->dev_addr;
- put_unaligned_be64(card->guid, &ha->uc.uniq_id);
- ha->uc.max_rec = dev->card->max_receive;
- ha->uc.sspd = dev->card->link_speed;
- put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
- put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
+ ha.uc.uniq_id = cpu_to_be64(card->guid);
+ ha.uc.max_rec = dev->card->max_receive;
+ ha.uc.sspd = dev->card->link_speed;
+ ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32);
+ ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff);
+ dev_addr_set(net, ha.u);
memset(net->broadcast, -1, net->addr_len);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cda7d7162cbb..75cb91055c17 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -295,6 +295,7 @@ config TURRIS_MOX_RWTM
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
+source "drivers/firmware/cirrus/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/imx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5ced0673d94b..4e58cb474a68 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
obj-y += arm_ffa/
obj-y += arm_scmi/
obj-y += broadcom/
+obj-y += cirrus/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index c9fb56afbcb4..14f900047ac0 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -167,6 +167,27 @@ struct ffa_drv_info {
static struct ffa_drv_info *drv_info;
+/*
+ * The driver must be able to support all the versions from the earliest
+ * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
+ * The specification states that if firmware supports a FFA implementation
+ * that is incompatible with and at a greater version number than specified
+ * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
+ * it must return the NOT_SUPPORTED error code.
+ */
+static u32 ffa_compatible_version_find(u32 version)
+{
+ u16 major = MAJOR_VERSION(version), minor = MINOR_VERSION(version);
+ u16 drv_major = MAJOR_VERSION(FFA_DRIVER_VERSION);
+ u16 drv_minor = MINOR_VERSION(FFA_DRIVER_VERSION);
+
+ if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
+ return version;
+
+ pr_info("Firmware version higher than driver version, downgrading\n");
+ return FFA_DRIVER_VERSION;
+}
+
static int ffa_version_check(u32 *version)
{
ffa_value_t ver;
@@ -180,15 +201,20 @@ static int ffa_version_check(u32 *version)
return -EOPNOTSUPP;
}
- if (ver.a0 < FFA_MIN_VERSION || ver.a0 > FFA_DRIVER_VERSION) {
- pr_err("Incompatible version %d.%d found\n",
- MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0));
+ if (ver.a0 < FFA_MIN_VERSION) {
+ pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
+ MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0),
+ MAJOR_VERSION(FFA_MIN_VERSION),
+ MINOR_VERSION(FFA_MIN_VERSION));
return -EINVAL;
}
- *version = ver.a0;
- pr_info("Version %d.%d found\n", MAJOR_VERSION(ver.a0),
+ pr_info("Driver version %d.%d\n", MAJOR_VERSION(FFA_DRIVER_VERSION),
+ MINOR_VERSION(FFA_DRIVER_VERSION));
+ pr_info("Firmware version %d.%d found\n", MAJOR_VERSION(ver.a0),
MINOR_VERSION(ver.a0));
+ *version = ffa_compatible_version_find(ver.a0);
+
return 0;
}
@@ -586,6 +612,22 @@ ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
}
+static int
+ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+{
+ /* Note that upon a successful MEM_LEND request the caller
+ * must ensure that the memory region specified is not accessed
+ * until a successful MEM_RECALIM call has been made.
+ * On systems with a hypervisor present this will been enforced,
+ * however on systems without a hypervisor the responsibility
+ * falls to the calling kernel driver to prevent access.
+ */
+ if (dev->mode_32bit)
+ return ffa_memory_ops(FFA_MEM_LEND, args);
+
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+}
+
static const struct ffa_dev_ops ffa_ops = {
.api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get,
@@ -593,6 +635,7 @@ static const struct ffa_dev_ops ffa_ops = {
.sync_send_receive = ffa_sync_send_receive,
.memory_reclaim = ffa_memory_reclaim,
.memory_share = ffa_memory_share,
+ .memory_lend = ffa_memory_lend,
};
const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
new file mode 100644
index 000000000000..f9503cb481d2
--- /dev/null
+++ b/drivers/firmware/cirrus/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CS_DSP
+ tristate
+ default n
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
new file mode 100644
index 000000000000..f074e2638c9c
--- /dev/null
+++ b/drivers/firmware/cirrus/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_CS_DSP) += cs_dsp.o
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
new file mode 100644
index 000000000000..948dd8382686
--- /dev/null
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -0,0 +1,3109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cs_dsp.c -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.c
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+#define cs_dsp_err(_dsp, fmt, ...) \
+ dev_err(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_warn(_dsp, fmt, ...) \
+ dev_warn(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_info(_dsp, fmt, ...) \
+ dev_info(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_dbg(_dsp, fmt, ...) \
+ dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+
+#define ADSP1_CONTROL_1 0x00
+#define ADSP1_CONTROL_2 0x02
+#define ADSP1_CONTROL_3 0x03
+#define ADSP1_CONTROL_4 0x04
+#define ADSP1_CONTROL_5 0x06
+#define ADSP1_CONTROL_6 0x07
+#define ADSP1_CONTROL_7 0x08
+#define ADSP1_CONTROL_8 0x09
+#define ADSP1_CONTROL_9 0x0A
+#define ADSP1_CONTROL_10 0x0B
+#define ADSP1_CONTROL_11 0x0C
+#define ADSP1_CONTROL_12 0x0D
+#define ADSP1_CONTROL_13 0x0F
+#define ADSP1_CONTROL_14 0x10
+#define ADSP1_CONTROL_15 0x11
+#define ADSP1_CONTROL_16 0x12
+#define ADSP1_CONTROL_17 0x13
+#define ADSP1_CONTROL_18 0x14
+#define ADSP1_CONTROL_19 0x16
+#define ADSP1_CONTROL_20 0x17
+#define ADSP1_CONTROL_21 0x18
+#define ADSP1_CONTROL_22 0x1A
+#define ADSP1_CONTROL_23 0x1B
+#define ADSP1_CONTROL_24 0x1C
+#define ADSP1_CONTROL_25 0x1E
+#define ADSP1_CONTROL_26 0x20
+#define ADSP1_CONTROL_27 0x21
+#define ADSP1_CONTROL_28 0x22
+#define ADSP1_CONTROL_29 0x23
+#define ADSP1_CONTROL_30 0x24
+#define ADSP1_CONTROL_31 0x26
+
+/*
+ * ADSP1 Control 19
+ */
+#define ADSP1_WDMA_BUFFER_LENGTH_MASK 0x00FF /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_SHIFT 0 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_WIDTH 8 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+
+/*
+ * ADSP1 Control 30
+ */
+#define ADSP1_DBG_CLK_ENA 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_MASK 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_SHIFT 3 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_WIDTH 1 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP1_START 0x0001 /* DSP1_START */
+#define ADSP1_START_MASK 0x0001 /* DSP1_START */
+#define ADSP1_START_SHIFT 0 /* DSP1_START */
+#define ADSP1_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP1 Control 31
+ */
+#define ADSP1_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP1_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP1_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+#define ADSP2_CONTROL 0x0
+#define ADSP2_CLOCKING 0x1
+#define ADSP2V2_CLOCKING 0x2
+#define ADSP2_STATUS1 0x4
+#define ADSP2_WDMA_CONFIG_1 0x30
+#define ADSP2_WDMA_CONFIG_2 0x31
+#define ADSP2V2_WDMA_CONFIG_2 0x32
+#define ADSP2_RDMA_CONFIG_1 0x34
+
+#define ADSP2_SCRATCH0 0x40
+#define ADSP2_SCRATCH1 0x41
+#define ADSP2_SCRATCH2 0x42
+#define ADSP2_SCRATCH3 0x43
+
+#define ADSP2V2_SCRATCH0_1 0x40
+#define ADSP2V2_SCRATCH2_3 0x42
+
+/*
+ * ADSP2 Control
+ */
+#define ADSP2_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
+#define ADSP2_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP2_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP2_START 0x0001 /* DSP1_START */
+#define ADSP2_START_MASK 0x0001 /* DSP1_START */
+#define ADSP2_START_SHIFT 0 /* DSP1_START */
+#define ADSP2_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP2 clocking
+ */
+#define ADSP2_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+/*
+ * ADSP2V2 clocking
+ */
+#define ADSP2V2_CLK_SEL_MASK 0x70000 /* CLK_SEL_ENA */
+#define ADSP2V2_CLK_SEL_SHIFT 16 /* CLK_SEL_ENA */
+#define ADSP2V2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+#define ADSP2V2_RATE_MASK 0x7800 /* DSP_RATE */
+#define ADSP2V2_RATE_SHIFT 11 /* DSP_RATE */
+#define ADSP2V2_RATE_WIDTH 4 /* DSP_RATE */
+
+/*
+ * ADSP2 Status 1
+ */
+#define ADSP2_RAM_RDY 0x0001
+#define ADSP2_RAM_RDY_MASK 0x0001
+#define ADSP2_RAM_RDY_SHIFT 0
+#define ADSP2_RAM_RDY_WIDTH 1
+
+/*
+ * ADSP2 Lock support
+ */
+#define ADSP2_LOCK_CODE_0 0x5555
+#define ADSP2_LOCK_CODE_1 0xAAAA
+
+#define ADSP2_WATCHDOG 0x0A
+#define ADSP2_BUS_ERR_ADDR 0x52
+#define ADSP2_REGION_LOCK_STATUS 0x64
+#define ADSP2_LOCK_REGION_1_LOCK_REGION_0 0x66
+#define ADSP2_LOCK_REGION_3_LOCK_REGION_2 0x68
+#define ADSP2_LOCK_REGION_5_LOCK_REGION_4 0x6A
+#define ADSP2_LOCK_REGION_7_LOCK_REGION_6 0x6C
+#define ADSP2_LOCK_REGION_9_LOCK_REGION_8 0x6E
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR 0x7C
+
+#define ADSP2_REGION_LOCK_ERR_MASK 0x8000
+#define ADSP2_ADDR_ERR_MASK 0x4000
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+#define ADSP2_CTRL_ERR_PAUSE_ENA 0x0002
+#define ADSP2_CTRL_ERR_EINT 0x0001
+
+#define ADSP2_BUS_ERR_ADDR_MASK 0x00FFFFFF
+#define ADSP2_XMEM_ERR_ADDR_MASK 0x0000FFFF
+#define ADSP2_PMEM_ERR_ADDR_MASK 0x7FFF0000
+#define ADSP2_PMEM_ERR_ADDR_SHIFT 16
+#define ADSP2_WDT_ENA_MASK 0xFFFFFFFD
+
+#define ADSP2_LOCK_REGION_SHIFT 16
+
+/*
+ * Event control messages
+ */
+#define CS_DSP_FW_EVENT_SHUTDOWN 0x000001
+
+/*
+ * HALO system info
+ */
+#define HALO_AHBM_WINDOW_DEBUG_0 0x02040
+#define HALO_AHBM_WINDOW_DEBUG_1 0x02044
+
+/*
+ * HALO core
+ */
+#define HALO_SCRATCH1 0x005c0
+#define HALO_SCRATCH2 0x005c8
+#define HALO_SCRATCH3 0x005d0
+#define HALO_SCRATCH4 0x005d8
+#define HALO_CCM_CORE_CONTROL 0x41000
+#define HALO_CORE_SOFT_RESET 0x00010
+#define HALO_WDT_CONTROL 0x47000
+
+/*
+ * HALO MPU banks
+ */
+#define HALO_MPU_XMEM_ACCESS_0 0x43000
+#define HALO_MPU_YMEM_ACCESS_0 0x43004
+#define HALO_MPU_WINDOW_ACCESS_0 0x43008
+#define HALO_MPU_XREG_ACCESS_0 0x4300C
+#define HALO_MPU_YREG_ACCESS_0 0x43014
+#define HALO_MPU_XMEM_ACCESS_1 0x43018
+#define HALO_MPU_YMEM_ACCESS_1 0x4301C
+#define HALO_MPU_WINDOW_ACCESS_1 0x43020
+#define HALO_MPU_XREG_ACCESS_1 0x43024
+#define HALO_MPU_YREG_ACCESS_1 0x4302C
+#define HALO_MPU_XMEM_ACCESS_2 0x43030
+#define HALO_MPU_YMEM_ACCESS_2 0x43034
+#define HALO_MPU_WINDOW_ACCESS_2 0x43038
+#define HALO_MPU_XREG_ACCESS_2 0x4303C
+#define HALO_MPU_YREG_ACCESS_2 0x43044
+#define HALO_MPU_XMEM_ACCESS_3 0x43048
+#define HALO_MPU_YMEM_ACCESS_3 0x4304C
+#define HALO_MPU_WINDOW_ACCESS_3 0x43050
+#define HALO_MPU_XREG_ACCESS_3 0x43054
+#define HALO_MPU_YREG_ACCESS_3 0x4305C
+#define HALO_MPU_XM_VIO_ADDR 0x43100
+#define HALO_MPU_XM_VIO_STATUS 0x43104
+#define HALO_MPU_YM_VIO_ADDR 0x43108
+#define HALO_MPU_YM_VIO_STATUS 0x4310C
+#define HALO_MPU_PM_VIO_ADDR 0x43110
+#define HALO_MPU_PM_VIO_STATUS 0x43114
+#define HALO_MPU_LOCK_CONFIG 0x43140
+
+/*
+ * HALO_AHBM_WINDOW_DEBUG_1
+ */
+#define HALO_AHBM_CORE_ERR_ADDR_MASK 0x0fffff00
+#define HALO_AHBM_CORE_ERR_ADDR_SHIFT 8
+#define HALO_AHBM_FLAGS_ERR_MASK 0x000000ff
+
+/*
+ * HALO_CCM_CORE_CONTROL
+ */
+#define HALO_CORE_RESET 0x00000200
+#define HALO_CORE_EN 0x00000001
+
+/*
+ * HALO_CORE_SOFT_RESET
+ */
+#define HALO_CORE_SOFT_RESET_MASK 0x00000001
+
+/*
+ * HALO_WDT_CONTROL
+ */
+#define HALO_WDT_EN_MASK 0x00000001
+
+/*
+ * HALO_MPU_?M_VIO_STATUS
+ */
+#define HALO_MPU_VIO_STS_MASK 0x007e0000
+#define HALO_MPU_VIO_STS_SHIFT 17
+#define HALO_MPU_VIO_ERR_WR_MASK 0x00008000
+#define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff
+#define HALO_MPU_VIO_ERR_SRC_SHIFT 0
+
+struct cs_dsp_ops {
+ bool (*validate_version)(struct cs_dsp *dsp, unsigned int version);
+ unsigned int (*parse_sizes)(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware);
+ int (*setup_algs)(struct cs_dsp *dsp);
+ unsigned int (*region_to_reg)(struct cs_dsp_region const *mem,
+ unsigned int offset);
+
+ void (*show_fw_status)(struct cs_dsp *dsp);
+ void (*stop_watchdog)(struct cs_dsp *dsp);
+
+ int (*enable_memory)(struct cs_dsp *dsp);
+ void (*disable_memory)(struct cs_dsp *dsp);
+ int (*lock_memory)(struct cs_dsp *dsp, unsigned int lock_regions);
+
+ int (*enable_core)(struct cs_dsp *dsp);
+ void (*disable_core)(struct cs_dsp *dsp);
+
+ int (*start_core)(struct cs_dsp *dsp);
+ void (*stop_core)(struct cs_dsp *dsp);
+};
+
+static const struct cs_dsp_ops cs_dsp_adsp1_ops;
+static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
+static const struct cs_dsp_ops cs_dsp_halo_ops;
+
+struct cs_dsp_buf {
+ struct list_head list;
+ void *buf;
+};
+
+static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len,
+ struct list_head *list)
+{
+ struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+
+ if (buf == NULL)
+ return NULL;
+
+ buf->buf = vmalloc(len);
+ if (!buf->buf) {
+ kfree(buf);
+ return NULL;
+ }
+ memcpy(buf->buf, src, len);
+
+ if (list)
+ list_add_tail(&buf->list, list);
+
+ return buf;
+}
+
+static void cs_dsp_buf_free(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct cs_dsp_buf *buf = list_first_entry(list,
+ struct cs_dsp_buf,
+ list);
+ list_del(&buf->list);
+ vfree(buf->buf);
+ kfree(buf);
+ }
+}
+
+/**
+ * cs_dsp_mem_region_name() - Return a name string for a memory type
+ * @type: the memory type to match
+ *
+ * Return: A const string identifying the memory region.
+ */
+const char *cs_dsp_mem_region_name(unsigned int type)
+{
+ switch (type) {
+ case WMFW_ADSP1_PM:
+ return "PM";
+ case WMFW_HALO_PM_PACKED:
+ return "PM_PACKED";
+ case WMFW_ADSP1_DM:
+ return "DM";
+ case WMFW_ADSP2_XM:
+ return "XM";
+ case WMFW_HALO_XM_PACKED:
+ return "XM_PACKED";
+ case WMFW_ADSP2_YM:
+ return "YM";
+ case WMFW_HALO_YM_PACKED:
+ return "YM_PACKED";
+ case WMFW_ADSP1_ZM:
+ return "ZM";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(cs_dsp_mem_region_name);
+
+#ifdef CONFIG_DEBUG_FS
+static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
+{
+ char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
+
+ kfree(dsp->wmfw_file_name);
+ dsp->wmfw_file_name = tmp;
+}
+
+static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s)
+{
+ char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
+
+ kfree(dsp->bin_file_name);
+ dsp->bin_file_name = tmp;
+}
+
+static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
+{
+ kfree(dsp->wmfw_file_name);
+ kfree(dsp->bin_file_name);
+ dsp->wmfw_file_name = NULL;
+ dsp->bin_file_name = NULL;
+}
+
+static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cs_dsp *dsp = file->private_data;
+ ssize_t ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->wmfw_file_name || !dsp->booted)
+ ret = 0;
+ else
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ dsp->wmfw_file_name,
+ strlen(dsp->wmfw_file_name));
+
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+
+static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cs_dsp *dsp = file->private_data;
+ ssize_t ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->bin_file_name || !dsp->booted)
+ ret = 0;
+ else
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ dsp->bin_file_name,
+ strlen(dsp->bin_file_name));
+
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+
+static const struct {
+ const char *name;
+ const struct file_operations fops;
+} cs_dsp_debugfs_fops[] = {
+ {
+ .name = "wmfw_file_name",
+ .fops = {
+ .open = simple_open,
+ .read = cs_dsp_debugfs_wmfw_read,
+ },
+ },
+ {
+ .name = "bin_file_name",
+ .fops = {
+ .open = simple_open,
+ .read = cs_dsp_debugfs_bin_read,
+ },
+ },
+};
+
+/**
+ * cs_dsp_init_debugfs() - Create and populate DSP representation in debugfs
+ * @dsp: pointer to DSP structure
+ * @debugfs_root: pointer to debugfs directory in which to create this DSP
+ * representation
+ */
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
+{
+ struct dentry *root = NULL;
+ int i;
+
+ root = debugfs_create_dir(dsp->name, debugfs_root);
+
+ debugfs_create_bool("booted", 0444, root, &dsp->booted);
+ debugfs_create_bool("running", 0444, root, &dsp->running);
+ debugfs_create_x32("fw_id", 0444, root, &dsp->fw_id);
+ debugfs_create_x32("fw_version", 0444, root, &dsp->fw_id_version);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_debugfs_fops); ++i)
+ debugfs_create_file(cs_dsp_debugfs_fops[i].name, 0444, root,
+ dsp, &cs_dsp_debugfs_fops[i].fops);
+
+ dsp->debugfs_root = root;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_init_debugfs);
+
+/**
+ * cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+{
+ cs_dsp_debugfs_clear(dsp);
+ debugfs_remove_recursive(dsp->debugfs_root);
+ dsp->debugfs_root = NULL;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_cleanup_debugfs);
+#else
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
+{
+}
+EXPORT_SYMBOL_GPL(cs_dsp_init_debugfs);
+
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+{
+}
+EXPORT_SYMBOL_GPL(cs_dsp_cleanup_debugfs);
+
+static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
+ const char *s)
+{
+}
+
+static inline void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp,
+ const char *s)
+{
+}
+
+static inline void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
+{
+}
+#endif
+
+static const struct cs_dsp_region *cs_dsp_find_region(struct cs_dsp *dsp,
+ int type)
+{
+ int i;
+
+ for (i = 0; i < dsp->num_mems; i++)
+ if (dsp->mem[i].type == type)
+ return &dsp->mem[i];
+
+ return NULL;
+}
+
+static unsigned int cs_dsp_region_to_reg(struct cs_dsp_region const *mem,
+ unsigned int offset)
+{
+ switch (mem->type) {
+ case WMFW_ADSP1_PM:
+ return mem->base + (offset * 3);
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP1_ZM:
+ return mem->base + (offset * 2);
+ default:
+ WARN(1, "Unknown memory region type");
+ return offset;
+ }
+}
+
+static unsigned int cs_dsp_halo_region_to_reg(struct cs_dsp_region const *mem,
+ unsigned int offset)
+{
+ switch (mem->type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return mem->base + (offset * 4);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return (mem->base + (offset * 3)) & ~0x3;
+ case WMFW_HALO_PM_PACKED:
+ return mem->base + (offset * 5);
+ default:
+ WARN(1, "Unknown memory region type");
+ return offset;
+ }
+}
+
+static void cs_dsp_read_fw_status(struct cs_dsp *dsp,
+ int noffs, unsigned int *offs)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < noffs; ++i) {
+ ret = regmap_read(dsp->regmap, dsp->base + offs[i], &offs[i]);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
+ return;
+ }
+ }
+}
+
+static void cs_dsp_adsp2_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = {
+ ADSP2_SCRATCH0, ADSP2_SCRATCH1, ADSP2_SCRATCH2, ADSP2_SCRATCH3,
+ };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0], offs[1], offs[2], offs[3]);
+}
+
+static void cs_dsp_adsp2v2_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = { ADSP2V2_SCRATCH0_1, ADSP2V2_SCRATCH2_3 };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0] & 0xFFFF, offs[0] >> 16,
+ offs[1] & 0xFFFF, offs[1] >> 16);
+}
+
+static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = {
+ HALO_SCRATCH1, HALO_SCRATCH2, HALO_SCRATCH3, HALO_SCRATCH4,
+ };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0], offs[1], offs[2], offs[3]);
+}
+
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg)
+{
+ const struct cs_dsp_alg_region *alg_region = &ctl->alg_region;
+ struct cs_dsp *dsp = ctl->dsp;
+ const struct cs_dsp_region *mem;
+
+ mem = cs_dsp_find_region(dsp, alg_region->type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No base for region %x\n",
+ alg_region->type);
+ return -EINVAL;
+ }
+
+ *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_write_acked_control() - Sends event_id to the acked control
+ * @ctl: pointer to acked coefficient control
+ * @event_id: the value to write to the given acked control
+ *
+ * Once the value has been written to the control the function shall block
+ * until the running firmware acknowledges the write or timeout is exceeded.
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ __be32 val = cpu_to_be32(event_id);
+ unsigned int reg;
+ int i, ret;
+
+ if (!dsp->running)
+ return -EPERM;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ cs_dsp_dbg(dsp, "Sending 0x%x to acked control alg 0x%x %s:0x%x\n",
+ event_id, ctl->alg_region.alg,
+ cs_dsp_mem_region_name(ctl->alg_region.type), ctl->offset);
+
+ ret = regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to write %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ /*
+ * Poll for ack, we initially poll at ~1ms intervals for firmwares
+ * that respond quickly, then go to ~10ms polls. A firmware is unlikely
+ * to ack instantly so we do the first 1ms delay before reading the
+ * control to avoid a pointless bus transaction
+ */
+ for (i = 0; i < CS_DSP_ACKED_CTL_TIMEOUT_MS;) {
+ switch (i) {
+ case 0 ... CS_DSP_ACKED_CTL_N_QUICKPOLLS - 1:
+ usleep_range(1000, 2000);
+ i++;
+ break;
+ default:
+ usleep_range(10000, 20000);
+ i += 10;
+ break;
+ }
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ if (val == 0) {
+ cs_dsp_dbg(dsp, "Acked control ACKED at poll %u\n", i);
+ return 0;
+ }
+ }
+
+ cs_dsp_warn(dsp, "Acked control @0x%x alg:0x%x %s:0x%x timed out\n",
+ reg, ctl->alg_region.alg,
+ cs_dsp_mem_region_name(ctl->alg_region.type),
+ ctl->offset);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_acked_control);
+
+static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
+ const void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ void *scratch;
+ int ret;
+ unsigned int reg;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ scratch = kmemdup(buf, len, GFP_KERNEL | GFP_DMA);
+ if (!scratch)
+ return -ENOMEM;
+
+ ret = regmap_raw_write(dsp->regmap, reg, scratch,
+ len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to write %zu bytes to %x: %d\n",
+ len, reg, ret);
+ kfree(scratch);
+ return ret;
+ }
+ cs_dsp_dbg(dsp, "Wrote %zu bytes to %x\n", len, reg);
+
+ kfree(scratch);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control
+ * @ctl: pointer to coefficient control
+ * @buf: the buffer to write to the given control
+ * @len: the length of the buffer
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len)
+{
+ int ret = 0;
+
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ ret = -EPERM;
+ else if (buf != ctl->cache)
+ memcpy(ctl->cache, buf, len);
+
+ ctl->set = 1;
+ if (ctl->enabled && ctl->dsp->running)
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, buf, len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_ctrl);
+
+static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ void *scratch;
+ int ret;
+ unsigned int reg;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ scratch = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!scratch)
+ return -ENOMEM;
+
+ ret = regmap_raw_read(dsp->regmap, reg, scratch, len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %zu bytes from %x: %d\n",
+ len, reg, ret);
+ kfree(scratch);
+ return ret;
+ }
+ cs_dsp_dbg(dsp, "Read %zu bytes from %x\n", len, reg);
+
+ memcpy(buf, scratch, len);
+ kfree(scratch);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer
+ * @ctl: pointer to coefficient control
+ * @buf: the buffer to store to the given control
+ * @len: the length of the buffer
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+{
+ int ret = 0;
+
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+ if (ctl->enabled && ctl->dsp->running)
+ return cs_dsp_coeff_read_ctrl_raw(ctl, buf, len);
+ else
+ return -EPERM;
+ } else {
+ if (!ctl->flags && ctl->enabled && ctl->dsp->running)
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+
+ if (buf != ctl->cache)
+ memcpy(buf, ctl->cache, len);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_read_ctrl);
+
+static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (!ctl->enabled || ctl->set)
+ continue;
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ continue;
+
+ /*
+ * For readable controls populate the cache from the DSP memory.
+ * For non-readable controls the cache was zero-filled when
+ * created so we don't need to do anything.
+ */
+ if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (!ctl->enabled)
+ continue;
+ if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, ctl->cache,
+ ctl->len);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void cs_dsp_signal_event_controls(struct cs_dsp *dsp,
+ unsigned int event)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->type != WMFW_CTL_TYPE_HOSTEVENT)
+ continue;
+
+ if (!ctl->enabled)
+ continue;
+
+ ret = cs_dsp_coeff_write_acked_control(ctl, event);
+ if (ret)
+ cs_dsp_warn(dsp,
+ "Failed to send 0x%x event to alg 0x%x (%d)\n",
+ event, ctl->alg_region.alg, ret);
+ }
+}
+
+static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl)
+{
+ kfree(ctl->cache);
+ kfree(ctl->subname);
+ kfree(ctl);
+}
+
+static int cs_dsp_create_control(struct cs_dsp *dsp,
+ const struct cs_dsp_alg_region *alg_region,
+ unsigned int offset, unsigned int len,
+ const char *subname, unsigned int subname_len,
+ unsigned int flags, unsigned int type)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->fw_name == dsp->fw_name &&
+ ctl->alg_region.alg == alg_region->alg &&
+ ctl->alg_region.type == alg_region->type) {
+ if ((!subname && !ctl->subname) ||
+ (subname && !strncmp(ctl->subname, subname, ctl->subname_len))) {
+ if (!ctl->enabled)
+ ctl->enabled = 1;
+ return 0;
+ }
+ }
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->fw_name = dsp->fw_name;
+ ctl->alg_region = *alg_region;
+ if (subname && dsp->fw_ver >= 2) {
+ ctl->subname_len = subname_len;
+ ctl->subname = kmemdup(subname,
+ strlen(subname) + 1, GFP_KERNEL);
+ if (!ctl->subname) {
+ ret = -ENOMEM;
+ goto err_ctl;
+ }
+ }
+ ctl->enabled = 1;
+ ctl->set = 0;
+ ctl->dsp = dsp;
+
+ ctl->flags = flags;
+ ctl->type = type;
+ ctl->offset = offset;
+ ctl->len = len;
+ ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
+ if (!ctl->cache) {
+ ret = -ENOMEM;
+ goto err_ctl_subname;
+ }
+
+ list_add(&ctl->list, &dsp->ctl_list);
+
+ if (dsp->client_ops->control_add) {
+ ret = dsp->client_ops->control_add(ctl);
+ if (ret)
+ goto err_list_del;
+ }
+
+ return 0;
+
+err_list_del:
+ list_del(&ctl->list);
+ kfree(ctl->cache);
+err_ctl_subname:
+ kfree(ctl->subname);
+err_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+struct cs_dsp_coeff_parsed_alg {
+ int id;
+ const u8 *name;
+ int name_len;
+ int ncoeff;
+};
+
+struct cs_dsp_coeff_parsed_coeff {
+ int offset;
+ int mem_type;
+ const u8 *name;
+ int name_len;
+ unsigned int ctl_type;
+ int flags;
+ int len;
+};
+
+static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+{
+ int length;
+
+ switch (bytes) {
+ case 1:
+ length = **pos;
+ break;
+ case 2:
+ length = le16_to_cpu(*((__le16 *)*pos));
+ break;
+ default:
+ return 0;
+ }
+
+ if (str)
+ *str = *pos + bytes;
+
+ *pos += ((length + bytes) + 3) & ~0x03;
+
+ return length;
+}
+
+static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
+{
+ int val = 0;
+
+ switch (bytes) {
+ case 2:
+ val = le16_to_cpu(*((__le16 *)*pos));
+ break;
+ case 4:
+ val = le32_to_cpu(*((__le32 *)*pos));
+ break;
+ default:
+ break;
+ }
+
+ *pos += bytes;
+
+ return val;
+}
+
+static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
+ struct cs_dsp_coeff_parsed_alg *blk)
+{
+ const struct wmfw_adsp_alg_data *raw;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+ raw = (const struct wmfw_adsp_alg_data *)*data;
+ *data = raw->data;
+
+ blk->id = le32_to_cpu(raw->id);
+ blk->name = raw->name;
+ blk->name_len = strlen(raw->name);
+ blk->ncoeff = le32_to_cpu(raw->ncoeff);
+ break;
+ default:
+ blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
+ &blk->name);
+ cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
+ blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
+ cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
+}
+
+static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+ struct cs_dsp_coeff_parsed_coeff *blk)
+{
+ const struct wmfw_adsp_coeff_data *raw;
+ const u8 *tmp;
+ int length;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+ raw = (const struct wmfw_adsp_coeff_data *)*data;
+ *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
+
+ blk->offset = le16_to_cpu(raw->hdr.offset);
+ blk->mem_type = le16_to_cpu(raw->hdr.type);
+ blk->name = raw->name;
+ blk->name_len = strlen(raw->name);
+ blk->ctl_type = le16_to_cpu(raw->ctl_type);
+ blk->flags = le16_to_cpu(raw->flags);
+ blk->len = le32_to_cpu(raw->len);
+ break;
+ default:
+ tmp = *data;
+ blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
+ blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
+ length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
+ &blk->name);
+ cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
+ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
+ blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
+ blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
+ blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
+
+ *data = *data + sizeof(raw->hdr) + length;
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "\tCoefficient type: %#x\n", blk->mem_type);
+ cs_dsp_dbg(dsp, "\tCoefficient offset: %#x\n", blk->offset);
+ cs_dsp_dbg(dsp, "\tCoefficient name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
+ cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
+ cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
+}
+
+static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
+ const struct cs_dsp_coeff_parsed_coeff *coeff_blk,
+ unsigned int f_required,
+ unsigned int f_illegal)
+{
+ if ((coeff_blk->flags & f_illegal) ||
+ ((coeff_blk->flags & f_required) != f_required)) {
+ cs_dsp_err(dsp, "Illegal flags 0x%x for control type 0x%x\n",
+ coeff_blk->flags, coeff_blk->ctl_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
+ const struct wmfw_region *region)
+{
+ struct cs_dsp_alg_region alg_region = {};
+ struct cs_dsp_coeff_parsed_alg alg_blk;
+ struct cs_dsp_coeff_parsed_coeff coeff_blk;
+ const u8 *data = region->data;
+ int i, ret;
+
+ cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
+ for (i = 0; i < alg_blk.ncoeff; i++) {
+ cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
+
+ switch (coeff_blk.ctl_type) {
+ case WMFW_CTL_TYPE_BYTES:
+ break;
+ case WMFW_CTL_TYPE_ACKED:
+ if (coeff_blk.flags & WMFW_CTL_FLAG_SYS)
+ continue; /* ignore */
+
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOSTEVENT:
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOST_BUFFER:
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ default:
+ cs_dsp_err(dsp, "Unknown control type: %d\n",
+ coeff_blk.ctl_type);
+ return -EINVAL;
+ }
+
+ alg_region.type = coeff_blk.mem_type;
+ alg_region.alg = alg_blk.id;
+
+ ret = cs_dsp_create_control(dsp, &alg_region,
+ coeff_blk.offset,
+ coeff_blk.len,
+ coeff_blk.name,
+ coeff_blk.name_len,
+ coeff_blk.flags,
+ coeff_blk.ctl_type);
+ if (ret < 0)
+ cs_dsp_err(dsp, "Failed to create control: %.*s, %d\n",
+ coeff_blk.name_len, coeff_blk.name, ret);
+ }
+
+ return 0;
+}
+
+static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware)
+{
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+
+ adsp1_sizes = (void *)&firmware->data[pos];
+
+ cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
+ le32_to_cpu(adsp1_sizes->zm));
+
+ return pos + sizeof(*adsp1_sizes);
+}
+
+static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware)
+{
+ const struct wmfw_adsp2_sizes *adsp2_sizes;
+
+ adsp2_sizes = (void *)&firmware->data[pos];
+
+ cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
+ le32_to_cpu(adsp2_sizes->pm), le32_to_cpu(adsp2_sizes->zm));
+
+ return pos + sizeof(*adsp2_sizes);
+}
+
+static bool cs_dsp_validate_version(struct cs_dsp *dsp, unsigned int version)
+{
+ switch (version) {
+ case 0:
+ cs_dsp_warn(dsp, "Deprecated file format %d\n", version);
+ return true;
+ case 1:
+ case 2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs_dsp_halo_validate_version(struct cs_dsp *dsp, unsigned int version)
+{
+ switch (version) {
+ case 3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ const char *file)
+{
+ LIST_HEAD(buf_list);
+ struct regmap *regmap = dsp->regmap;
+ unsigned int pos = 0;
+ const struct wmfw_header *header;
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+ const struct wmfw_footer *footer;
+ const struct wmfw_region *region;
+ const struct cs_dsp_region *mem;
+ const char *region_name;
+ char *text = NULL;
+ struct cs_dsp_buf *buf;
+ unsigned int reg;
+ int regions = 0;
+ int ret, offset, type;
+
+ ret = -EINVAL;
+
+ pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+ if (pos >= firmware->size) {
+ cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ header = (void *)&firmware->data[0];
+
+ if (memcmp(&header->magic[0], "WMFW", 4) != 0) {
+ cs_dsp_err(dsp, "%s: invalid magic\n", file);
+ goto out_fw;
+ }
+
+ if (!dsp->ops->validate_version(dsp, header->ver)) {
+ cs_dsp_err(dsp, "%s: unknown file format %d\n",
+ file, header->ver);
+ goto out_fw;
+ }
+
+ cs_dsp_info(dsp, "Firmware version: %d\n", header->ver);
+ dsp->fw_ver = header->ver;
+
+ if (header->core != dsp->type) {
+ cs_dsp_err(dsp, "%s: invalid core %d != %d\n",
+ file, header->core, dsp->type);
+ goto out_fw;
+ }
+
+ pos = sizeof(*header);
+ pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
+
+ footer = (void *)&firmware->data[pos];
+ pos += sizeof(*footer);
+
+ if (le32_to_cpu(header->len) != pos) {
+ cs_dsp_err(dsp, "%s: unexpected header length %d\n",
+ file, le32_to_cpu(header->len));
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ le64_to_cpu(footer->timestamp));
+
+ while (pos < firmware->size &&
+ sizeof(*region) < firmware->size - pos) {
+ region = (void *)&(firmware->data[pos]);
+ region_name = "Unknown";
+ reg = 0;
+ text = NULL;
+ offset = le32_to_cpu(region->offset) & 0xffffff;
+ type = be32_to_cpu(region->type) & 0xff;
+
+ switch (type) {
+ case WMFW_NAME_TEXT:
+ region_name = "Firmware name";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ALGORITHM_DATA:
+ region_name = "Algorithm";
+ ret = cs_dsp_parse_coeff(dsp, region);
+ if (ret != 0)
+ goto out_fw;
+ break;
+ case WMFW_INFO_TEXT:
+ region_name = "Information";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ABSOLUTE:
+ region_name = "Absolute";
+ reg = offset;
+ break;
+ case WMFW_ADSP1_PM:
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP1_ZM:
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No region of type: %x\n", type);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ region_name = cs_dsp_mem_region_name(type);
+ reg = dsp->ops->region_to_reg(mem, offset);
+ break;
+ default:
+ cs_dsp_warn(dsp,
+ "%s.%d: Unknown region type %x at %d(%x)\n",
+ file, regions, type, pos, pos);
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes at %d in %s\n", file,
+ regions, le32_to_cpu(region->len), offset,
+ region_name);
+
+ if (le32_to_cpu(region->len) >
+ firmware->size - pos - sizeof(*region)) {
+ cs_dsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, regions, region_name,
+ le32_to_cpu(region->len), firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ if (text) {
+ memcpy(text, region->data, le32_to_cpu(region->len));
+ cs_dsp_info(dsp, "%s: %s\n", file, text);
+ kfree(text);
+ text = NULL;
+ }
+
+ if (reg) {
+ buf = cs_dsp_buf_alloc(region->data,
+ le32_to_cpu(region->len),
+ &buf_list);
+ if (!buf) {
+ cs_dsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
+
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+ file, regions,
+ le32_to_cpu(region->len), offset,
+ region_name, ret);
+ goto out_fw;
+ }
+ }
+
+ pos += le32_to_cpu(region->len) + sizeof(*region);
+ regions++;
+ }
+
+ ret = regmap_async_complete(regmap);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+ goto out_fw;
+ }
+
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, regions, pos - firmware->size);
+
+ cs_dsp_debugfs_save_wmfwname(dsp, file);
+
+out_fw:
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ kfree(text);
+
+ return ret;
+}
+
+/**
+ * cs_dsp_get_ctl() - Finds a matching coefficient control
+ * @dsp: pointer to DSP structure
+ * @name: pointer to string to match with a control's subname
+ * @type: the algorithm type to match
+ * @alg: the algorithm id to match
+ *
+ * Find cs_dsp_coeff_ctl with input name as its subname
+ *
+ * Return: pointer to the control on success, NULL if not found
+ */
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg)
+{
+ struct cs_dsp_coeff_ctl *pos, *rslt = NULL;
+
+ list_for_each_entry(pos, &dsp->ctl_list, list) {
+ if (!pos->subname)
+ continue;
+ if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ pos->fw_name == dsp->fw_name &&
+ pos->alg_region.alg == alg &&
+ pos->alg_region.type == type) {
+ rslt = pos;
+ break;
+ }
+ }
+
+ return rslt;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_get_ctl);
+
+static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
+ const struct cs_dsp_alg_region *alg_region)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->fw_name == dsp->fw_name &&
+ alg_region->alg == ctl->alg_region.alg &&
+ alg_region->type == ctl->alg_region.type) {
+ ctl->alg_region.base = alg_region->base;
+ }
+ }
+}
+
+static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
+ const struct cs_dsp_region *mem,
+ unsigned int pos, unsigned int len)
+{
+ void *alg;
+ unsigned int reg;
+ int ret;
+ __be32 val;
+
+ if (n_algs == 0) {
+ cs_dsp_err(dsp, "No algorithms\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (n_algs > 1024) {
+ cs_dsp_err(dsp, "Algorithm count %zx excessive\n", n_algs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Read the terminator first to validate the length */
+ reg = dsp->ops->region_to_reg(mem, pos + len);
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm list end: %d\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ if (be32_to_cpu(val) != 0xbedead)
+ cs_dsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
+ reg, be32_to_cpu(val));
+
+ /* Convert length from DSP words to bytes */
+ len *= sizeof(u32);
+
+ alg = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!alg)
+ return ERR_PTR(-ENOMEM);
+
+ reg = dsp->ops->region_to_reg(mem, pos);
+
+ ret = regmap_raw_read(dsp->regmap, reg, alg, len);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm list: %d\n", ret);
+ kfree(alg);
+ return ERR_PTR(ret);
+ }
+
+ return alg;
+}
+
+/**
+ * cs_dsp_find_alg_region() - Finds a matching algorithm region
+ * @dsp: pointer to DSP structure
+ * @type: the algorithm type to match
+ * @id: the algorithm id to match
+ *
+ * Return: Pointer to matching algorithm region, or NULL if not found.
+ */
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ list_for_each_entry(alg_region, &dsp->alg_regions, list) {
+ if (id == alg_region->alg && type == alg_region->type)
+ return alg_region;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_find_alg_region);
+
+static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
+ int type, __be32 id,
+ __be32 base)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
+ if (!alg_region)
+ return ERR_PTR(-ENOMEM);
+
+ alg_region->type = type;
+ alg_region->alg = be32_to_cpu(id);
+ alg_region->base = be32_to_cpu(base);
+
+ list_add_tail(&alg_region->list, &dsp->alg_regions);
+
+ if (dsp->fw_ver > 0)
+ cs_dsp_ctl_fixup_base(dsp, alg_region);
+
+ return alg_region;
+}
+
+static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ while (!list_empty(&dsp->alg_regions)) {
+ alg_region = list_first_entry(&dsp->alg_regions,
+ struct cs_dsp_alg_region,
+ list);
+ list_del(&alg_region->list);
+ kfree(alg_region);
+ }
+}
+
+static void cs_dsp_parse_wmfw_id_header(struct cs_dsp *dsp,
+ struct wmfw_id_hdr *fw, int nalgs)
+{
+ dsp->fw_id = be32_to_cpu(fw->id);
+ dsp->fw_id_version = be32_to_cpu(fw->ver);
+
+ cs_dsp_info(dsp, "Firmware: %x v%d.%d.%d, %d algorithms\n",
+ dsp->fw_id, (dsp->fw_id_version & 0xff0000) >> 16,
+ (dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
+ nalgs);
+}
+
+static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp,
+ struct wmfw_v3_id_hdr *fw, int nalgs)
+{
+ dsp->fw_id = be32_to_cpu(fw->id);
+ dsp->fw_id_version = be32_to_cpu(fw->ver);
+ dsp->fw_vendor_id = be32_to_cpu(fw->vendor_id);
+
+ cs_dsp_info(dsp, "Firmware: %x vendor: 0x%x v%d.%d.%d, %d algorithms\n",
+ dsp->fw_id, dsp->fw_vendor_id,
+ (dsp->fw_id_version & 0xff0000) >> 16,
+ (dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
+ nalgs);
+}
+
+static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, int nregions,
+ const int *type, __be32 *base)
+{
+ struct cs_dsp_alg_region *alg_region;
+ int i;
+
+ for (i = 0; i < nregions; i++) {
+ alg_region = cs_dsp_create_region(dsp, type[i], id, base[i]);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+ }
+
+ return 0;
+}
+
+static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_adsp1_id_hdr adsp1_id;
+ struct wmfw_adsp1_alg_hdr *adsp1_alg;
+ struct cs_dsp_alg_region *alg_region;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP1_DM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &adsp1_id,
+ sizeof(adsp1_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(adsp1_id.n_algs);
+
+ cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
+ adsp1_id.fw.id, adsp1_id.zm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
+ adsp1_id.fw.id, adsp1_id.dm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp1_id) / sizeof(u32);
+ len = (sizeof(*adsp1_alg) * n_algs) / sizeof(u32);
+
+ adsp1_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(adsp1_alg))
+ return PTR_ERR(adsp1_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp, "%d: ID %x v%d.%d.%d DM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp1_alg[i].alg.id),
+ (be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp1_alg[i].dm),
+ be32_to_cpu(adsp1_alg[i].zm));
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
+ adsp1_alg[i].alg.id,
+ adsp1_alg[i].dm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp1_alg[i + 1].dm);
+ len -= be32_to_cpu(adsp1_alg[i].dm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region DM with ID %x\n",
+ be32_to_cpu(adsp1_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
+ adsp1_alg[i].alg.id,
+ adsp1_alg[i].zm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp1_alg[i + 1].zm);
+ len -= be32_to_cpu(adsp1_alg[i].zm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+ be32_to_cpu(adsp1_alg[i].alg.id));
+ }
+ }
+ }
+
+out:
+ kfree(adsp1_alg);
+ return ret;
+}
+
+static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_adsp2_id_hdr adsp2_id;
+ struct wmfw_adsp2_alg_hdr *adsp2_alg;
+ struct cs_dsp_alg_region *alg_region;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &adsp2_id,
+ sizeof(adsp2_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(adsp2_id.n_algs);
+
+ cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ adsp2_id.fw.id, adsp2_id.xm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
+ adsp2_id.fw.id, adsp2_id.ym);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
+ adsp2_id.fw.id, adsp2_id.zm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp2_id) / sizeof(u32);
+ len = (sizeof(*adsp2_alg) * n_algs) / sizeof(u32);
+
+ adsp2_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(adsp2_alg))
+ return PTR_ERR(adsp2_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp2_alg[i].alg.id),
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp2_alg[i].xm),
+ be32_to_cpu(adsp2_alg[i].ym),
+ be32_to_cpu(adsp2_alg[i].zm));
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].xm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].xm);
+ len -= be32_to_cpu(adsp2_alg[i].xm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region XM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].ym);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].ym);
+ len -= be32_to_cpu(adsp2_alg[i].ym);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region YM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].zm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].zm);
+ len -= be32_to_cpu(adsp2_alg[i].zm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+ }
+
+out:
+ kfree(adsp2_alg);
+ return ret;
+}
+
+static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id,
+ __be32 xm_base, __be32 ym_base)
+{
+ static const int types[] = {
+ WMFW_ADSP2_XM, WMFW_HALO_XM_PACKED,
+ WMFW_ADSP2_YM, WMFW_HALO_YM_PACKED
+ };
+ __be32 bases[] = { xm_base, xm_base, ym_base, ym_base };
+
+ return cs_dsp_create_regions(dsp, id, ARRAY_SIZE(types), types, bases);
+}
+
+static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_halo_id_hdr halo_id;
+ struct wmfw_halo_alg_hdr *halo_alg;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &halo_id,
+ sizeof(halo_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(halo_id.n_algs);
+
+ cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs);
+
+ ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id,
+ halo_id.xm_base, halo_id.ym_base);
+ if (ret)
+ return ret;
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(halo_id) / sizeof(u32);
+ len = (sizeof(*halo_alg) * n_algs) / sizeof(u32);
+
+ halo_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(halo_alg))
+ return PTR_ERR(halo_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+ i, be32_to_cpu(halo_alg[i].alg.id),
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(halo_alg[i].xm_base),
+ be32_to_cpu(halo_alg[i].ym_base));
+
+ ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ halo_alg[i].xm_base,
+ halo_alg[i].ym_base);
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(halo_alg);
+ return ret;
+}
+
+static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware,
+ const char *file)
+{
+ LIST_HEAD(buf_list);
+ struct regmap *regmap = dsp->regmap;
+ struct wmfw_coeff_hdr *hdr;
+ struct wmfw_coeff_item *blk;
+ const struct cs_dsp_region *mem;
+ struct cs_dsp_alg_region *alg_region;
+ const char *region_name;
+ int ret, pos, blocks, type, offset, reg;
+ struct cs_dsp_buf *buf;
+
+ if (!firmware)
+ return 0;
+
+ ret = -EINVAL;
+
+ if (sizeof(*hdr) >= firmware->size) {
+ cs_dsp_err(dsp, "%s: coefficient file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ hdr = (void *)&firmware->data[0];
+ if (memcmp(hdr->magic, "WMDR", 4) != 0) {
+ cs_dsp_err(dsp, "%s: invalid coefficient magic\n", file);
+ goto out_fw;
+ }
+
+ switch (be32_to_cpu(hdr->rev) & 0xff) {
+ case 1:
+ break;
+ default:
+ cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n",
+ file, be32_to_cpu(hdr->rev) & 0xff);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s: v%d.%d.%d\n", file,
+ (le32_to_cpu(hdr->ver) >> 16) & 0xff,
+ (le32_to_cpu(hdr->ver) >> 8) & 0xff,
+ le32_to_cpu(hdr->ver) & 0xff);
+
+ pos = le32_to_cpu(hdr->len);
+
+ blocks = 0;
+ while (pos < firmware->size &&
+ sizeof(*blk) < firmware->size - pos) {
+ blk = (void *)(&firmware->data[pos]);
+
+ type = le16_to_cpu(blk->type);
+ offset = le16_to_cpu(blk->offset);
+
+ cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
+ file, blocks, le32_to_cpu(blk->id),
+ (le32_to_cpu(blk->ver) >> 16) & 0xff,
+ (le32_to_cpu(blk->ver) >> 8) & 0xff,
+ le32_to_cpu(blk->ver) & 0xff);
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes at 0x%x in %x\n",
+ file, blocks, le32_to_cpu(blk->len), offset, type);
+
+ reg = 0;
+ region_name = "Unknown";
+ switch (type) {
+ case (WMFW_NAME_TEXT << 8):
+ case (WMFW_INFO_TEXT << 8):
+ case (WMFW_METADATA << 8):
+ break;
+ case (WMFW_ABSOLUTE << 8):
+ /*
+ * Old files may use this for global
+ * coefficients.
+ */
+ if (le32_to_cpu(blk->id) == dsp->fw_id &&
+ offset == 0) {
+ region_name = "global coefficients";
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No ZM\n");
+ break;
+ }
+ reg = dsp->ops->region_to_reg(mem, 0);
+
+ } else {
+ region_name = "register";
+ reg = offset;
+ }
+ break;
+
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP1_ZM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ case WMFW_HALO_PM_PACKED:
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes in %x for %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ type, le32_to_cpu(blk->id));
+
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No base for region %x\n", type);
+ break;
+ }
+
+ alg_region = cs_dsp_find_alg_region(dsp, type,
+ le32_to_cpu(blk->id));
+ if (alg_region) {
+ reg = alg_region->base;
+ reg = dsp->ops->region_to_reg(mem, reg);
+ reg += offset;
+ } else {
+ cs_dsp_err(dsp, "No %x for algorithm %x\n",
+ type, le32_to_cpu(blk->id));
+ }
+ break;
+
+ default:
+ cs_dsp_err(dsp, "%s.%d: Unknown region type %x at %d\n",
+ file, blocks, type, pos);
+ break;
+ }
+
+ if (reg) {
+ if (le32_to_cpu(blk->len) >
+ firmware->size - pos - sizeof(*blk)) {
+ cs_dsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, blocks, region_name,
+ le32_to_cpu(blk->len),
+ firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ buf = cs_dsp_buf_alloc(blk->data,
+ le32_to_cpu(blk->len),
+ &buf_list);
+ if (!buf) {
+ cs_dsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ reg);
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write to %x in %s: %d\n",
+ file, blocks, reg, region_name, ret);
+ }
+ }
+
+ pos += (le32_to_cpu(blk->len) + sizeof(*blk) + 3) & ~0x03;
+ blocks++;
+ }
+
+ ret = regmap_async_complete(regmap);
+ if (ret != 0)
+ cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, blocks, pos - firmware->size);
+
+ cs_dsp_debugfs_save_binname(dsp, file);
+
+out_fw:
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ return ret;
+}
+
+static int cs_dsp_create_name(struct cs_dsp *dsp)
+{
+ if (!dsp->name) {
+ dsp->name = devm_kasprintf(dsp->dev, GFP_KERNEL, "DSP%d",
+ dsp->num);
+ if (!dsp->name)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_common_init(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = cs_dsp_create_name(dsp);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&dsp->alg_regions);
+ INIT_LIST_HEAD(&dsp->ctl_list);
+
+ mutex_init(&dsp->pwr_lock);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_adsp1_init() - Initialise a cs_dsp structure representing a ADSP1 device
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp1_init(struct cs_dsp *dsp)
+{
+ dsp->ops = &cs_dsp_adsp1_ops;
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp1_init);
+
+/**
+ * cs_dsp_adsp1_power_up() - Load and start the named firmware
+ * @dsp: pointer to DSP structure
+ * @wmfw_firmware: the firmware to be sent
+ * @wmfw_filename: file name of firmware to be sent
+ * @coeff_firmware: the coefficient data to be sent
+ * @coeff_filename: file name of coefficient to data be sent
+ * @fw_name: the user-friendly firmware name
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name)
+{
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->fw_name = fw_name;
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, ADSP1_SYS_ENA);
+
+ /*
+ * For simplicity set the DSP clock rate to be the
+ * SYSCLK rate rather than making it configurable.
+ */
+ if (dsp->sysclk_reg) {
+ ret = regmap_read(dsp->regmap, dsp->sysclk_reg, &val);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read SYSCLK state: %d\n", ret);
+ goto err_mutex;
+ }
+
+ val = (val & dsp->sysclk_mask) >> dsp->sysclk_shift;
+
+ ret = regmap_update_bits(dsp->regmap,
+ dsp->base + ADSP1_CONTROL_31,
+ ADSP1_CLK_SEL_MASK, val);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+ goto err_mutex;
+ }
+ }
+
+ ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_adsp1_setup_algs(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Initialize caches for enabled and unset controls */
+ ret = cs_dsp_coeff_init_control_caches(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Sync set controls */
+ ret = cs_dsp_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ dsp->booted = true;
+
+ /* Start the core running */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START,
+ ADSP1_CORE_ENA | ADSP1_START);
+
+ dsp->running = true;
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+
+err_ena:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+err_mutex:
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp1_power_up);
+
+/**
+ * cs_dsp_adsp1_power_down() - Halts the DSP
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->running = false;
+ dsp->booted = false;
+
+ /* Halt the core */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_19,
+ ADSP1_WDMA_BUFFER_LENGTH_MASK, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ cs_dsp_free_alg_regions(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp1_power_down);
+
+static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
+{
+ unsigned int val;
+ int ret, count;
+
+ /* Wait for the RAM to start, should be near instantaneous */
+ for (count = 0; count < 10; ++count) {
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val & ADSP2_RAM_RDY)
+ break;
+
+ usleep_range(250, 500);
+ }
+
+ if (!(val & ADSP2_RAM_RDY)) {
+ cs_dsp_err(dsp, "Failed to start DSP RAM\n");
+ return -EBUSY;
+ }
+
+ cs_dsp_dbg(dsp, "RAM ready after %d polls\n", count);
+
+ return 0;
+}
+
+static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ if (ret != 0)
+ return ret;
+
+ return cs_dsp_adsp2v2_enable_core(dsp);
+}
+
+static int cs_dsp_adsp2_lock(struct cs_dsp *dsp, unsigned int lock_regions)
+{
+ struct regmap *regmap = dsp->regmap;
+ unsigned int code0, code1, lock_reg;
+
+ if (!(lock_regions & CS_ADSP2_REGION_ALL))
+ return 0;
+
+ lock_regions &= CS_ADSP2_REGION_ALL;
+ lock_reg = dsp->base + ADSP2_LOCK_REGION_1_LOCK_REGION_0;
+
+ while (lock_regions) {
+ code0 = code1 = 0;
+ if (lock_regions & BIT(0)) {
+ code0 = ADSP2_LOCK_CODE_0;
+ code1 = ADSP2_LOCK_CODE_1;
+ }
+ if (lock_regions & BIT(1)) {
+ code0 |= ADSP2_LOCK_CODE_0 << ADSP2_LOCK_REGION_SHIFT;
+ code1 |= ADSP2_LOCK_CODE_1 << ADSP2_LOCK_REGION_SHIFT;
+ }
+ regmap_write(regmap, lock_reg, code0);
+ regmap_write(regmap, lock_reg, code1);
+ lock_regions >>= 2;
+ lock_reg += 2;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_adsp2_enable_memory(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, ADSP2_MEM_ENA);
+}
+
+static void cs_dsp_adsp2_disable_memory(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+}
+
+static void cs_dsp_adsp2_disable_core(struct cs_dsp *dsp)
+{
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, 0);
+}
+
+static void cs_dsp_adsp2v2_disable_core(struct cs_dsp *dsp)
+{
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2V2_WDMA_CONFIG_2, 0);
+}
+
+static int cs_dsp_halo_configure_mpu(struct cs_dsp *dsp, unsigned int lock_regions)
+{
+ struct reg_sequence config[] = {
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0x5555 },
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0xAAAA },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_0, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_0, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_1, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_1, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_2, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_2, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_3, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_3, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0 },
+ };
+
+ return regmap_multi_reg_write(dsp->regmap, config, ARRAY_SIZE(config));
+}
+
+/**
+ * cs_dsp_set_dspclk() - Applies the given frequency to the given cs_dsp
+ * @dsp: pointer to DSP structure
+ * @freq: clock rate to set
+ *
+ * This is only for use on ADSP2 cores.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
+{
+ int ret;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CLOCKING,
+ ADSP2_CLK_SEL_MASK,
+ freq << ADSP2_CLK_SEL_SHIFT);
+ if (ret)
+ cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_set_dspclk);
+
+static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_WATCHDOG,
+ ADSP2_WDT_ENA_MASK, 0);
+}
+
+static void cs_dsp_halo_stop_watchdog(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_WDT_CONTROL,
+ HALO_WDT_EN_MASK, 0);
+}
+
+/**
+ * cs_dsp_power_up() - Downloads firmware to the DSP
+ * @dsp: pointer to DSP structure
+ * @wmfw_firmware: the firmware to be sent
+ * @wmfw_filename: file name of firmware to be sent
+ * @coeff_firmware: the coefficient data to be sent
+ * @coeff_filename: file name of coefficient to data be sent
+ * @fw_name: the user-friendly firmware name
+ *
+ * This function is used on ADSP2 and Halo DSP cores, it powers-up the DSP core
+ * and downloads the firmware but does not start the firmware running. The
+ * cs_dsp booted flag will be set once completed and if the core has a low-power
+ * memory retention mode it will be put into this state after the firmware is
+ * downloaded.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name)
+{
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->fw_name = fw_name;
+
+ if (dsp->ops->enable_memory) {
+ ret = dsp->ops->enable_memory(dsp);
+ if (ret != 0)
+ goto err_mutex;
+ }
+
+ if (dsp->ops->enable_core) {
+ ret = dsp->ops->enable_core(dsp);
+ if (ret != 0)
+ goto err_mem;
+ }
+
+ ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = dsp->ops->setup_algs(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Initialize caches for enabled and unset controls */
+ ret = cs_dsp_coeff_init_control_caches(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+
+ dsp->booted = true;
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+err_ena:
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+err_mem:
+ if (dsp->ops->disable_memory)
+ dsp->ops->disable_memory(dsp);
+err_mutex:
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_power_up);
+
+/**
+ * cs_dsp_power_down() - Powers-down the DSP
+ * @dsp: pointer to DSP structure
+ *
+ * cs_dsp_stop() must have been called before this function. The core will be
+ * fully powered down and so the memory will not be retained.
+ */
+void cs_dsp_power_down(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ cs_dsp_debugfs_clear(dsp);
+
+ dsp->fw_id = 0;
+ dsp->fw_id_version = 0;
+
+ dsp->booted = false;
+
+ if (dsp->ops->disable_memory)
+ dsp->ops->disable_memory(dsp);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ cs_dsp_free_alg_regions(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ cs_dsp_dbg(dsp, "Shutdown complete\n");
+}
+EXPORT_SYMBOL_GPL(cs_dsp_power_down);
+
+static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START,
+ ADSP2_CORE_ENA | ADSP2_START);
+}
+
+static void cs_dsp_adsp2_stop_core(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START, 0);
+}
+
+/**
+ * cs_dsp_run() - Starts the firmware running
+ * @dsp: pointer to DSP structure
+ *
+ * cs_dsp_power_up() must have previously been called successfully.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_run(struct cs_dsp *dsp)
+{
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->booted) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (dsp->ops->enable_core) {
+ ret = dsp->ops->enable_core(dsp);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Sync set controls */
+ ret = cs_dsp_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err;
+
+ if (dsp->ops->lock_memory) {
+ ret = dsp->ops->lock_memory(dsp, dsp->lock_regions);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Error configuring MPU: %d\n", ret);
+ goto err;
+ }
+ }
+
+ if (dsp->ops->start_core) {
+ ret = dsp->ops->start_core(dsp);
+ if (ret != 0)
+ goto err;
+ }
+
+ dsp->running = true;
+
+ if (dsp->client_ops->post_run) {
+ ret = dsp->client_ops->post_run(dsp);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+
+err:
+ if (dsp->ops->stop_core)
+ dsp->ops->stop_core(dsp);
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_run);
+
+/**
+ * cs_dsp_stop() - Stops the firmware
+ * @dsp: pointer to DSP structure
+ *
+ * Memory will not be disabled so firmware will remain loaded.
+ */
+void cs_dsp_stop(struct cs_dsp *dsp)
+{
+ /* Tell the firmware to cleanup */
+ cs_dsp_signal_event_controls(dsp, CS_DSP_FW_EVENT_SHUTDOWN);
+
+ if (dsp->ops->stop_watchdog)
+ dsp->ops->stop_watchdog(dsp);
+
+ /* Log firmware state, it can be useful for analysis */
+ if (dsp->ops->show_fw_status)
+ dsp->ops->show_fw_status(dsp);
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->running = false;
+
+ if (dsp->ops->stop_core)
+ dsp->ops->stop_core(dsp);
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+
+ if (dsp->client_ops->post_stop)
+ dsp->client_ops->post_stop(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ cs_dsp_dbg(dsp, "Execution stopped\n");
+}
+EXPORT_SYMBOL_GPL(cs_dsp_stop);
+
+static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap,
+ dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
+}
+
+static void cs_dsp_halo_stop_core(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_EN, 0);
+
+ /* reset halo core with CORE_SOFT_RESET */
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_CORE_SOFT_RESET,
+ HALO_CORE_SOFT_RESET_MASK, 1);
+}
+
+/**
+ * cs_dsp_adsp2_init() - Initialise a cs_dsp structure representing a ADSP2 core
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp2_init(struct cs_dsp *dsp)
+{
+ int ret;
+
+ switch (dsp->rev) {
+ case 0:
+ /*
+ * Disable the DSP memory by default when in reset for a small
+ * power saving.
+ */
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to clear memory retention: %d\n", ret);
+ return ret;
+ }
+
+ dsp->ops = &cs_dsp_adsp2_ops[0];
+ break;
+ case 1:
+ dsp->ops = &cs_dsp_adsp2_ops[1];
+ break;
+ default:
+ dsp->ops = &cs_dsp_adsp2_ops[2];
+ break;
+ }
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp2_init);
+
+/**
+ * cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_halo_init(struct cs_dsp *dsp)
+{
+ dsp->ops = &cs_dsp_halo_ops;
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_halo_init);
+
+/**
+ * cs_dsp_remove() - Clean a cs_dsp before deletion
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_remove(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ while (!list_empty(&dsp->ctl_list)) {
+ ctl = list_first_entry(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+
+ if (dsp->client_ops->control_remove)
+ dsp->client_ops->control_remove(ctl);
+
+ list_del(&ctl->list);
+ cs_dsp_free_ctl_blk(ctl);
+ }
+}
+EXPORT_SYMBOL_GPL(cs_dsp_remove);
+
+/**
+ * cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be read
+ * @mem_addr: the address of the data within the memory region
+ * @num_words: the length of the data to read
+ * @data: a buffer to store the fetched data
+ *
+ * If this is used to read unpacked 24-bit memory, each 24-bit DSP word will
+ * occupy 32-bits in data (MSbyte will be 0). This padding can be removed using
+ * cs_dsp_remove_padding()
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data)
+{
+ struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
+ unsigned int reg;
+ int ret;
+
+ if (!mem)
+ return -EINVAL;
+
+ reg = dsp->ops->region_to_reg(mem, mem_addr);
+
+ ret = regmap_raw_read(dsp->regmap, reg, data,
+ sizeof(*data) * num_words);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_read_raw_data_block);
+
+/**
+ * cs_dsp_read_data_word() - Reads a word from DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be read
+ * @mem_addr: the address of the data within the memory region
+ * @data: a buffer to store the fetched data
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data)
+{
+ __be32 raw;
+ int ret;
+
+ ret = cs_dsp_read_raw_data_block(dsp, mem_type, mem_addr, 1, &raw);
+ if (ret < 0)
+ return ret;
+
+ *data = be32_to_cpu(raw) & 0x00ffffffu;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_read_data_word);
+
+/**
+ * cs_dsp_write_data_word() - Writes a word to DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be written
+ * @mem_addr: the address of the data within the memory region
+ * @data: the data to be written
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data)
+{
+ struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
+ __be32 val = cpu_to_be32(data & 0x00ffffffu);
+ unsigned int reg;
+
+ if (!mem)
+ return -EINVAL;
+
+ reg = dsp->ops->region_to_reg(mem, mem_addr);
+
+ return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+}
+EXPORT_SYMBOL_GPL(cs_dsp_write_data_word);
+
+/**
+ * cs_dsp_remove_padding() - Convert unpacked words to packed bytes
+ * @buf: buffer containing DSP words read from DSP memory
+ * @nwords: number of words to convert
+ *
+ * DSP words from the register map have pad bytes and the data bytes
+ * are in swapped order. This swaps to the native endian order and
+ * strips the pad bytes.
+ */
+void cs_dsp_remove_padding(u32 *buf, int nwords)
+{
+ const __be32 *pack_in = (__be32 *)buf;
+ u8 *pack_out = (u8 *)buf;
+ int i;
+
+ for (i = 0; i < nwords; i++) {
+ u32 word = be32_to_cpu(*pack_in++);
+ *pack_out++ = (u8)word;
+ *pack_out++ = (u8)(word >> 8);
+ *pack_out++ = (u8)(word >> 16);
+ }
+}
+EXPORT_SYMBOL_GPL(cs_dsp_remove_padding);
+
+/**
+ * cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
+ * @dsp: pointer to DSP structure
+ *
+ * The firmware and DSP state will be logged for future analysis.
+ */
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
+{
+ unsigned int val;
+ struct regmap *regmap = dsp->regmap;
+ int ret = 0;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Region Lock Ctrl register: %d\n", ret);
+ goto error;
+ }
+
+ if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
+ cs_dsp_err(dsp, "watchdog timeout error\n");
+ dsp->ops->stop_watchdog(dsp);
+ if (dsp->client_ops->watchdog_expired)
+ dsp->client_ops->watchdog_expired(dsp);
+ }
+
+ if (val & (ADSP2_ADDR_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
+ if (val & ADSP2_ADDR_ERR_MASK)
+ cs_dsp_err(dsp, "bus error: address error\n");
+ else
+ cs_dsp_err(dsp, "bus error: region lock error\n");
+
+ ret = regmap_read(regmap, dsp->base + ADSP2_BUS_ERR_ADDR, &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Bus Err Addr register: %d\n",
+ ret);
+ goto error;
+ }
+
+ cs_dsp_err(dsp, "bus error address = 0x%x\n",
+ val & ADSP2_BUS_ERR_ADDR_MASK);
+
+ ret = regmap_read(regmap,
+ dsp->base + ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
+ &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Pmem Xmem Err Addr register: %d\n",
+ ret);
+ goto error;
+ }
+
+ cs_dsp_err(dsp, "xmem error address = 0x%x\n",
+ val & ADSP2_XMEM_ERR_ADDR_MASK);
+ cs_dsp_err(dsp, "pmem error address = 0x%x\n",
+ (val & ADSP2_PMEM_ERR_ADDR_MASK) >>
+ ADSP2_PMEM_ERR_ADDR_SHIFT);
+ }
+
+ regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
+
+error:
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp2_bus_error);
+
+/**
+ * cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
+ * @dsp: pointer to DSP structure
+ *
+ * The firmware and DSP state will be logged for future analysis.
+ */
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
+{
+ struct regmap *regmap = dsp->regmap;
+ unsigned int fault[6];
+ struct reg_sequence clear[] = {
+ { dsp->base + HALO_MPU_XM_VIO_STATUS, 0x0 },
+ { dsp->base + HALO_MPU_YM_VIO_STATUS, 0x0 },
+ { dsp->base + HALO_MPU_PM_VIO_STATUS, 0x0 },
+ };
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_1,
+ fault);
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read AHB DEBUG_1: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "AHB: STATUS: 0x%x ADDR: 0x%x\n",
+ *fault & HALO_AHBM_FLAGS_ERR_MASK,
+ (*fault & HALO_AHBM_CORE_ERR_ADDR_MASK) >>
+ HALO_AHBM_CORE_ERR_ADDR_SHIFT);
+
+ ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_0,
+ fault);
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read AHB DEBUG_0: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "AHB: SYS_ADDR: 0x%x\n", *fault);
+
+ ret = regmap_bulk_read(regmap, dsp->base + HALO_MPU_XM_VIO_ADDR,
+ fault, ARRAY_SIZE(fault));
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read MPU fault info: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "XM: STATUS:0x%x ADDR:0x%x\n", fault[1], fault[0]);
+ cs_dsp_warn(dsp, "YM: STATUS:0x%x ADDR:0x%x\n", fault[3], fault[2]);
+ cs_dsp_warn(dsp, "PM: STATUS:0x%x ADDR:0x%x\n", fault[5], fault[4]);
+
+ ret = regmap_multi_reg_write(dsp->regmap, clear, ARRAY_SIZE(clear));
+ if (ret)
+ cs_dsp_warn(dsp, "Failed to clear MPU status: %d\n", ret);
+
+exit_unlock:
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_halo_bus_error);
+
+/**
+ * cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
+ * @dsp: pointer to DSP structure
+ *
+ * This is logged for future analysis.
+ */
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
+{
+ mutex_lock(&dsp->pwr_lock);
+
+ cs_dsp_warn(dsp, "WDT Expiry Fault\n");
+
+ dsp->ops->stop_watchdog(dsp);
+ if (dsp->client_ops->watchdog_expired)
+ dsp->client_ops->watchdog_expired(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_halo_wdt_expire);
+
+static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
+ .validate_version = cs_dsp_validate_version,
+ .parse_sizes = cs_dsp_adsp1_parse_sizes,
+ .region_to_reg = cs_dsp_region_to_reg,
+};
+
+static const struct cs_dsp_ops cs_dsp_adsp2_ops[] = {
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2_show_fw_status,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+
+ .enable_core = cs_dsp_adsp2_enable_core,
+ .disable_core = cs_dsp_adsp2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+
+ },
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2v2_show_fw_status,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+ .lock_memory = cs_dsp_adsp2_lock,
+
+ .enable_core = cs_dsp_adsp2v2_enable_core,
+ .disable_core = cs_dsp_adsp2v2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+ },
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2v2_show_fw_status,
+ .stop_watchdog = cs_dsp_stop_watchdog,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+ .lock_memory = cs_dsp_adsp2_lock,
+
+ .enable_core = cs_dsp_adsp2v2_enable_core,
+ .disable_core = cs_dsp_adsp2v2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+ },
+};
+
+static const struct cs_dsp_ops cs_dsp_halo_ops = {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_halo_validate_version,
+ .setup_algs = cs_dsp_halo_setup_algs,
+ .region_to_reg = cs_dsp_halo_region_to_reg,
+
+ .show_fw_status = cs_dsp_halo_show_fw_status,
+ .stop_watchdog = cs_dsp_halo_stop_watchdog,
+
+ .lock_memory = cs_dsp_halo_configure_mpu,
+
+ .start_core = cs_dsp_halo_start_core,
+ .stop_core = cs_dsp_halo_stop_core,
+};
+
+MODULE_DESCRIPTION("Cirrus Logic DSP Support");
+MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 847f33ffc4ae..ae79c3300129 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -66,7 +66,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
-static bool disable_runtime;
+static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT);
static int __init setup_noefi(char *arg)
{
disable_runtime = true;
@@ -97,6 +97,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "runtime"))
+ disable_runtime = false;
+
if (parse_option_str(str, "nosoftreserve"))
set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 9a369a2eda71..116eb465cdb4 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -155,7 +155,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return -ENOMEM;
- cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+ cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
GFP_KERNEL);
if (!cpu_groups) {
free_cpumask_var(tmp);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 2ee97bab7440..7db8066b19fd 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -252,7 +252,7 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
break;
default:
pr_err("Unknown SMC convention being used\n");
- return -EINVAL;
+ return false;
}
ret = qcom_scm_call(dev, &desc, &res);
@@ -1348,6 +1348,10 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
},
+ { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 3e9fa4b54358..6d66fe03fb6a 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -74,28 +74,36 @@ static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
static const char *get_filename(struct tegra_bpmp *bpmp,
const struct file *file, char *buf, int size)
{
- char root_path_buf[512];
- const char *root_path;
- const char *filename;
+ const char *root_path, *filename = NULL;
+ char *root_path_buf;
size_t root_len;
+ root_path_buf = kzalloc(512, GFP_KERNEL);
+ if (!root_path_buf)
+ goto out;
+
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
sizeof(root_path_buf));
if (IS_ERR(root_path))
- return NULL;
+ goto out;
root_len = strlen(root_path);
filename = dentry_path(file->f_path.dentry, buf, size);
- if (IS_ERR(filename))
- return NULL;
+ if (IS_ERR(filename)) {
+ filename = NULL;
+ goto out;
+ }
- if (strlen(filename) < root_len ||
- strncmp(filename, root_path, root_len))
- return NULL;
+ if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) {
+ filename = NULL;
+ goto out;
+ }
filename += root_len;
+out:
+ kfree(root_path_buf);
return filename;
}
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
index c32754055c60..c9c830f658c3 100644
--- a/drivers/firmware/tegra/bpmp-tegra210.c
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -162,7 +162,6 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
{
struct platform_device *pdev = to_platform_device(bpmp->dev);
struct tegra210_bpmp *priv;
- struct resource *res;
unsigned int i;
int err;
@@ -172,13 +171,11 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
bpmp->priv = priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ priv->atomics = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->atomics))
return PTR_ERR(priv->atomics);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ priv->arb_sema = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->arb_sema))
return PTR_ERR(priv->arb_sema);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index a3cadbaf3cba..1436e03ff4f7 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -648,6 +648,23 @@ int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
/**
+ * zynqmp_pm_ospi_mux_select() - OSPI Mux selection
+ *
+ * @dev_id: Device Id of the OSPI device.
+ * @select: OSPI Mux select value.
+ *
+ * This function select the OSPI Mux.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
+ select, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
+
+/**
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
* @index: GGS register index
* @value: Register value to be written
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index b223f0ef337b..7eaab1be0aa4 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/fsi-occ.h>
@@ -33,13 +34,6 @@
#define OCC_P10_SRAM_MODE 0x58 /* Normal mode, OCB channel 2 */
-/*
- * Assume we don't have much FFDC, if we do we'll overflow and
- * fail the command. This needs to be big enough for simple
- * commands as well.
- */
-#define OCC_SBE_STATUS_WORDS 32
-
#define OCC_TIMEOUT_MS 1000
#define OCC_CMD_IN_PRG_WAIT_MS 50
@@ -50,6 +44,11 @@ struct occ {
struct device *sbefifo;
char name[32];
int idx;
+ u8 sequence_number;
+ void *buffer;
+ void *client_buffer;
+ size_t client_buffer_size;
+ size_t client_response_size;
enum versions version;
struct miscdevice mdev;
struct mutex occ_lock;
@@ -141,8 +140,7 @@ static ssize_t occ_write(struct file *file, const char __user *buf,
{
struct occ_client *client = file->private_data;
size_t rlen, data_length;
- u16 checksum = 0;
- ssize_t rc, i;
+ ssize_t rc;
u8 *cmd;
if (!client)
@@ -156,9 +154,6 @@ static ssize_t occ_write(struct file *file, const char __user *buf,
/* Construct the command */
cmd = client->buffer;
- /* Sequence number (we could increment and compare with response) */
- cmd[0] = 1;
-
/*
* Copy the user command (assume user data follows the occ command
* format)
@@ -178,14 +173,7 @@ static ssize_t occ_write(struct file *file, const char __user *buf,
goto done;
}
- /* Calculate checksum */
- for (i = 0; i < data_length + 4; ++i)
- checksum += cmd[i];
-
- cmd[data_length + 4] = checksum >> 8;
- cmd[data_length + 5] = checksum & 0xFF;
-
- /* Submit command */
+ /* Submit command; 4 bytes before the data and 2 bytes after */
rlen = PAGE_SIZE;
rc = fsi_occ_submit(client->occ->dev, cmd, data_length + 6, cmd,
&rlen);
@@ -223,6 +211,22 @@ static const struct file_operations occ_fops = {
.release = occ_release,
};
+static void occ_save_ffdc(struct occ *occ, __be32 *resp, size_t parsed_len,
+ size_t resp_len)
+{
+ if (resp_len > parsed_len) {
+ size_t dh = resp_len - parsed_len;
+ size_t ffdc_len = (dh - 1) * 4; /* SBE words are four bytes */
+ __be32 *ffdc = &resp[parsed_len];
+
+ if (ffdc_len > occ->client_buffer_size)
+ ffdc_len = occ->client_buffer_size;
+
+ memcpy(occ->client_buffer, ffdc, ffdc_len);
+ occ->client_response_size = ffdc_len;
+ }
+}
+
static int occ_verify_checksum(struct occ *occ, struct occ_response *resp,
u16 data_length)
{
@@ -251,8 +255,10 @@ static int occ_verify_checksum(struct occ *occ, struct occ_response *resp,
static int occ_getsram(struct occ *occ, u32 offset, void *data, ssize_t len)
{
u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */
- size_t cmd_len, resp_len, resp_data_len;
- __be32 *resp, cmd[6];
+ size_t cmd_len, parsed_len, resp_data_len;
+ size_t resp_len = OCC_MAX_RESP_WORDS;
+ __be32 *resp = occ->buffer;
+ __be32 cmd[6];
int idx = 0, rc;
/*
@@ -279,21 +285,22 @@ static int occ_getsram(struct occ *occ, u32 offset, void *data, ssize_t len)
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_OCC_SRAM);
cmd[4 + idx] = cpu_to_be32(data_len);
- resp_len = (data_len >> 2) + OCC_SBE_STATUS_WORDS;
- resp = kzalloc(resp_len << 2, GFP_KERNEL);
- if (!resp)
- return -ENOMEM;
-
rc = sbefifo_submit(occ->sbefifo, cmd, cmd_len, resp, &resp_len);
if (rc)
- goto free;
+ return rc;
rc = sbefifo_parse_status(occ->sbefifo, SBEFIFO_CMD_GET_OCC_SRAM,
- resp, resp_len, &resp_len);
- if (rc)
- goto free;
+ resp, resp_len, &parsed_len);
+ if (rc > 0) {
+ dev_err(occ->dev, "SRAM read returned failure status: %08x\n",
+ rc);
+ occ_save_ffdc(occ, resp, parsed_len, resp_len);
+ return -ECOMM;
+ } else if (rc) {
+ return rc;
+ }
- resp_data_len = be32_to_cpu(resp[resp_len - 1]);
+ resp_data_len = be32_to_cpu(resp[parsed_len - 1]);
if (resp_data_len != data_len) {
dev_err(occ->dev, "SRAM read expected %d bytes got %zd\n",
data_len, resp_data_len);
@@ -302,37 +309,21 @@ static int occ_getsram(struct occ *occ, u32 offset, void *data, ssize_t len)
memcpy(data, resp, len);
}
-free:
- /* Convert positive SBEI status */
- if (rc > 0) {
- dev_err(occ->dev, "SRAM read returned failure status: %08x\n",
- rc);
- rc = -EBADMSG;
- }
-
- kfree(resp);
return rc;
}
-static int occ_putsram(struct occ *occ, const void *data, ssize_t len)
+static int occ_putsram(struct occ *occ, const void *data, ssize_t len,
+ u8 seq_no, u16 checksum)
{
- size_t cmd_len, buf_len, resp_len, resp_data_len;
u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */
- __be32 *buf;
+ size_t cmd_len, parsed_len, resp_data_len;
+ size_t resp_len = OCC_MAX_RESP_WORDS;
+ __be32 *buf = occ->buffer;
+ u8 *byte_buf;
int idx = 0, rc;
cmd_len = (occ->version == occ_p10) ? 6 : 5;
-
- /*
- * We use the same buffer for command and response, make
- * sure it's big enough
- */
- resp_len = OCC_SBE_STATUS_WORDS;
cmd_len += data_len >> 2;
- buf_len = max(cmd_len, resp_len);
- buf = kzalloc(buf_len << 2, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
/*
* Magic sequence to do SBE putsram command. SBE will transfer
@@ -358,18 +349,33 @@ static int occ_putsram(struct occ *occ, const void *data, ssize_t len)
buf[4 + idx] = cpu_to_be32(data_len);
memcpy(&buf[5 + idx], data, len);
+ byte_buf = (u8 *)&buf[5 + idx];
+ /*
+ * Overwrite the first byte with our sequence number and the last two
+ * bytes with the checksum.
+ */
+ byte_buf[0] = seq_no;
+ byte_buf[len - 2] = checksum >> 8;
+ byte_buf[len - 1] = checksum & 0xff;
+
rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len);
if (rc)
- goto free;
+ return rc;
rc = sbefifo_parse_status(occ->sbefifo, SBEFIFO_CMD_PUT_OCC_SRAM,
- buf, resp_len, &resp_len);
- if (rc)
- goto free;
+ buf, resp_len, &parsed_len);
+ if (rc > 0) {
+ dev_err(occ->dev, "SRAM write returned failure status: %08x\n",
+ rc);
+ occ_save_ffdc(occ, buf, parsed_len, resp_len);
+ return -ECOMM;
+ } else if (rc) {
+ return rc;
+ }
- if (resp_len != 1) {
+ if (parsed_len != 1) {
dev_err(occ->dev, "SRAM write response length invalid: %zd\n",
- resp_len);
+ parsed_len);
rc = -EBADMSG;
} else {
resp_data_len = be32_to_cpu(buf[0]);
@@ -381,27 +387,16 @@ static int occ_putsram(struct occ *occ, const void *data, ssize_t len)
}
}
-free:
- /* Convert positive SBEI status */
- if (rc > 0) {
- dev_err(occ->dev, "SRAM write returned failure status: %08x\n",
- rc);
- rc = -EBADMSG;
- }
-
- kfree(buf);
return rc;
}
static int occ_trigger_attn(struct occ *occ)
{
- __be32 buf[OCC_SBE_STATUS_WORDS];
- size_t cmd_len, resp_len, resp_data_len;
+ __be32 *buf = occ->buffer;
+ size_t cmd_len, parsed_len, resp_data_len;
+ size_t resp_len = OCC_MAX_RESP_WORDS;
int idx = 0, rc;
- BUILD_BUG_ON(OCC_SBE_STATUS_WORDS < 8);
- resp_len = OCC_SBE_STATUS_WORDS;
-
switch (occ->version) {
default:
case occ_p9:
@@ -426,16 +421,22 @@ static int occ_trigger_attn(struct occ *occ)
rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len);
if (rc)
- goto error;
+ return rc;
rc = sbefifo_parse_status(occ->sbefifo, SBEFIFO_CMD_PUT_OCC_SRAM,
- buf, resp_len, &resp_len);
- if (rc)
- goto error;
+ buf, resp_len, &parsed_len);
+ if (rc > 0) {
+ dev_err(occ->dev, "SRAM attn returned failure status: %08x\n",
+ rc);
+ occ_save_ffdc(occ, buf, parsed_len, resp_len);
+ return -ECOMM;
+ } else if (rc) {
+ return rc;
+ }
- if (resp_len != 1) {
+ if (parsed_len != 1) {
dev_err(occ->dev, "SRAM attn response length invalid: %zd\n",
- resp_len);
+ parsed_len);
rc = -EBADMSG;
} else {
resp_data_len = be32_to_cpu(buf[0]);
@@ -447,14 +448,6 @@ static int occ_trigger_attn(struct occ *occ)
}
}
- error:
- /* Convert positive SBEI status */
- if (rc > 0) {
- dev_err(occ->dev, "SRAM attn returned failure status: %08x\n",
- rc);
- rc = -EBADMSG;
- }
-
return rc;
}
@@ -466,24 +459,49 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS);
struct occ *occ = dev_get_drvdata(dev);
struct occ_response *resp = response;
+ size_t user_resp_len = *resp_len;
u8 seq_no;
+ u16 checksum = 0;
u16 resp_data_length;
+ const u8 *byte_request = (const u8 *)request;
unsigned long start;
int rc;
+ size_t i;
+
+ *resp_len = 0;
if (!occ)
return -ENODEV;
- if (*resp_len < 7) {
- dev_dbg(dev, "Bad resplen %zd\n", *resp_len);
+ if (user_resp_len < 7) {
+ dev_dbg(dev, "Bad resplen %zd\n", user_resp_len);
return -EINVAL;
}
+ /* Checksum the request, ignoring first byte (sequence number). */
+ for (i = 1; i < req_len - 2; ++i)
+ checksum += byte_request[i];
+
mutex_lock(&occ->occ_lock);
- /* Extract the seq_no from the command (first byte) */
- seq_no = *(const u8 *)request;
- rc = occ_putsram(occ, request, req_len);
+ occ->client_buffer = response;
+ occ->client_buffer_size = user_resp_len;
+ occ->client_response_size = 0;
+
+ /*
+ * Get a sequence number and update the counter. Avoid a sequence
+ * number of 0 which would pass the response check below even if the
+ * OCC response is uninitialized. Any sequence number the user is
+ * trying to send is overwritten since this function is the only common
+ * interface to the OCC and therefore the only place we can guarantee
+ * unique sequence numbers.
+ */
+ seq_no = occ->sequence_number++;
+ if (!occ->sequence_number)
+ occ->sequence_number = 1;
+ checksum += seq_no;
+
+ rc = occ_putsram(occ, request, req_len, seq_no, checksum);
if (rc)
goto done;
@@ -520,7 +538,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
resp_data_length = get_unaligned_be16(&resp->data_length);
/* Message size is data length + 5 bytes header + 2 bytes checksum */
- if ((resp_data_length + 7) > *resp_len) {
+ if ((resp_data_length + 7) > user_resp_len) {
rc = -EMSGSIZE;
goto done;
}
@@ -536,10 +554,11 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
goto done;
}
- *resp_len = resp_data_length + 7;
+ occ->client_response_size = resp_data_length + 7;
rc = occ_verify_checksum(occ, resp, resp_data_length);
done:
+ *resp_len = occ->client_response_size;
mutex_unlock(&occ->occ_lock);
return rc;
@@ -571,9 +590,15 @@ static int occ_probe(struct platform_device *pdev)
if (!occ)
return -ENOMEM;
+ /* SBE words are always four bytes */
+ occ->buffer = kvmalloc(OCC_MAX_RESP_WORDS * 4, GFP_KERNEL);
+ if (!occ->buffer)
+ return -ENOMEM;
+
occ->version = (uintptr_t)of_device_get_match_data(dev);
occ->dev = dev;
occ->sbefifo = dev->parent;
+ occ->sequence_number = 1;
mutex_init(&occ->occ_lock);
if (dev->of_node) {
@@ -605,6 +630,7 @@ static int occ_probe(struct platform_device *pdev)
if (rc) {
dev_err(dev, "failed to register miscdevice: %d\n", rc);
ida_simple_remove(&occ_ida, occ->idx);
+ kvfree(occ->buffer);
return rc;
}
@@ -620,6 +646,8 @@ static int occ_remove(struct platform_device *pdev)
{
struct occ *occ = platform_get_drvdata(pdev);
+ kvfree(occ->buffer);
+
misc_deregister(&occ->mdev);
device_for_each_child(&pdev->dev, NULL, occ_unregister_child);
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index 84cb965bfed5..52328adef643 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -124,6 +124,7 @@ struct sbefifo {
bool broken;
bool dead;
bool async_ffdc;
+ bool timed_out;
};
struct sbefifo_user {
@@ -136,6 +137,14 @@ struct sbefifo_user {
static DEFINE_MUTEX(sbefifo_ffdc_mutex);
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
+
+ return sysfs_emit(buf, "%d\n", sbefifo->timed_out ? 1 : 0);
+}
+static DEVICE_ATTR_RO(timeout);
static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
size_t ffdc_sz, bool internal)
@@ -462,11 +471,14 @@ static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
break;
}
if (!ready) {
+ sysfs_notify(&sbefifo->dev.kobj, NULL, dev_attr_timeout.attr.name);
+ sbefifo->timed_out = true;
dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
return -ETIMEDOUT;
}
dev_vdbg(dev, "End of wait status: %08x\n", sts);
+ sbefifo->timed_out = false;
*status = sts;
return 0;
@@ -740,7 +752,9 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
/* Perform the command */
- mutex_lock(&sbefifo->lock);
+ rc = mutex_lock_interruptible(&sbefifo->lock);
+ if (rc)
+ return rc;
rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
mutex_unlock(&sbefifo->lock);
@@ -820,7 +834,9 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
/* Perform the command */
- mutex_lock(&sbefifo->lock);
+ rc = mutex_lock_interruptible(&sbefifo->lock);
+ if (rc)
+ goto bail;
rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
mutex_unlock(&sbefifo->lock);
if (rc < 0)
@@ -875,7 +891,9 @@ static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
user->pending_len = 0;
/* Trigger reset request */
- mutex_lock(&sbefifo->lock);
+ rc = mutex_lock_interruptible(&sbefifo->lock);
+ if (rc)
+ goto bail;
rc = sbefifo_request_reset(user->sbefifo);
mutex_unlock(&sbefifo->lock);
if (rc == 0)
@@ -993,6 +1011,8 @@ static int sbefifo_probe(struct device *dev)
child_name);
}
+ device_create_file(&sbefifo->dev, &dev_attr_timeout);
+
return 0;
err_free_minor:
fsi_free_minor(sbefifo->dev.devt);
@@ -1018,6 +1038,8 @@ static int sbefifo_remove(struct device *dev)
dev_dbg(dev, "Removing sbefifo device...\n");
+ device_remove_file(&sbefifo->dev, &dev_attr_timeout);
+
mutex_lock(&sbefifo->lock);
sbefifo->dead = true;
mutex_unlock(&sbefifo->lock);
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 44398992ae15..bbf53e289141 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -72,12 +72,10 @@ static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
static int pt_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_dev;
- acpi_handle handle = ACPI_HANDLE(dev);
struct pt_gpio_chip *pt_gpio;
int ret = 0;
- if (acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!ACPI_COMPANION(dev)) {
dev_err(dev, "PT GPIO device node not found\n");
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 177d03ef4529..40a052bc6784 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -256,6 +256,11 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
NULL,
0);
+ if (ret) {
+ dev_err(dev, "bgpio_init failed\n");
+ return ret;
+ }
+
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
gc->ngpio = npins;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index fa9b4d8c3ff5..43ca52fa6f9a 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
}
chip->gc.label = dev_name(dev);
- if (of_property_read_u32(dn, "ngpios", &num_gpios))
+ if (!of_property_read_u32(dn, "ngpios", &num_gpios))
chip->gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 47712b6903b5..985e8589c58b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -95,10 +95,7 @@ static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
- if (!gc->parent)
- return false;
-
- return ACPI_HANDLE(gc->parent) == data;
+ return gc->parent && device_match_acpi_handle(gc->parent, data);
}
/**
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cea777ae7fb9..2a926d0de423 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -103,7 +103,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
- depends on FB
+ depends on FB=y || FB=DRM
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER
config DRM_GEM_SHMEM_HELPER
bool
- depends on DRM
+ depends on DRM && MMU
help
Choose this if you need the GEM shmem helper functions
@@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
tristate "Virtual GEM provider"
- depends on DRM
+ depends on DRM && MMU
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option to get a virtual graphics memory manager,
as used by Mesa's software renderer for enhanced performance.
@@ -279,7 +280,7 @@ config DRM_VGEM
config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
- depends on DRM
+ depends on DRM && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
@@ -351,8 +352,6 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
-source "drivers/gpu/drm/zte/Kconfig"
-
source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ad1112154898..0dff40bb863c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -113,7 +113,6 @@ obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
-obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 8d0748184a14..653726588956 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -73,10 +73,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
- vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
- arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
- beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o
+ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
+ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
# add DF block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 148f6c3343ab..bcfdb63b1d42 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -307,6 +307,8 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = true;
}
+ amdgpu_ras_set_error_query_ready(adev, true);
+
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 269437b01328..b85b67a88a3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -205,6 +205,7 @@ extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
extern int amdgpu_bad_page_threshold;
+extern bool amdgpu_ignore_bad_page_threshold;
extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
@@ -744,6 +745,7 @@ enum amd_hw_ip_block_type {
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
JPEG_HWIP = VCN_HWIP,
+ VCN1_HWIP,
VCE_HWIP,
DF_HWIP,
DCE_HWIP,
@@ -755,11 +757,16 @@ enum amd_hw_ip_block_type {
CLK_HWIP,
UMC_HWIP,
RSMU_HWIP,
+ XGMI_HWIP,
+ DCI_HWIP,
MAX_HWIP
};
#define HWIP_MAX_INSTANCE 10
+#define HW_ID_MAX 300
+#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -830,6 +837,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
+ struct debugfs_blob_wrapper debugfs_discovery_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1078,8 +1086,6 @@ struct amdgpu_device {
char product_name[32];
char serial[20];
- struct amdgpu_autodump autodump;
-
atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_enabled;
@@ -1090,6 +1096,7 @@ struct amdgpu_device {
pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl;
+ uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1d41c2c00623..7077f21f0021 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -31,6 +31,8 @@
#include <linux/dma-buf.h>
#include "amdgpu_xgmi.h"
#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
/* Total memory size in system memory and all GPU VRAM. Used to
* estimate worst case amount of memory to reserve for page tables
@@ -70,8 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
if (!kfd_initialized)
return;
- adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, adev->asic_type, vf);
+ adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -780,3 +781,15 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
return adev->have_atomics_support;
}
+
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+
+ /* CPU MCA will handle page retirement if connected_to_cpu is 1 */
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL);
+ else
+ amdgpu_amdkfd_gpu_reset(kgd);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3bc52b2c604f..751557af09bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -279,6 +279,8 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
struct kgd_mem *mem, void **kptr, uint64_t *size);
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem);
+
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
@@ -290,6 +292,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
uint64_t *mmap_offset);
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config);
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
@@ -321,8 +324,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
#if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- unsigned int asic_type, bool vf);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
@@ -346,8 +348,7 @@ static inline void kgd2kfd_exit(void)
}
static inline
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- unsigned int asic_type, bool vf)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
{
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 054c1a224def..0e9cfe99ae9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1503,7 +1503,7 @@ allocate_init_user_pages_failed:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
- amdgpu_bo_unref(&bo);
+ drm_gem_object_put(gobj);
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
@@ -1871,6 +1871,16 @@ bo_reserve_failed:
return ret;
}
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem)
+{
+ struct amdgpu_bo *bo = mem->bo;
+
+ amdgpu_bo_reserve(bo, true);
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+}
+
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *mem)
{
@@ -2041,19 +2051,26 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (ret) {
- pr_debug("%s: Failed to get user pages: %d\n",
- __func__, ret);
+ pr_debug("Failed %d to get user pages\n", ret);
+
+ /* Return -EFAULT bad address error as success. It will
+ * fail later with a VM fault if the GPU tries to access
+ * it. Better than hanging indefinitely with stalled
+ * user mode queues.
+ *
+ * Return other error -EBUSY or -ENOMEM to retry restore
+ */
+ if (ret != -EFAULT)
+ return ret;
+ } else {
- /* Return error -EBUSY or -ENOMEM, retry restore */
- return ret;
+ /*
+ * FIXME: Cannot ignore the return code, must hold
+ * notifier_lock
+ */
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
}
- /*
- * FIXME: Cannot ignore the return code, must hold
- * notifier_lock
- */
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
-
/* Mark the BO as valid unless it was invalidated
* again concurrently.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 15c45b2a3983..714178f1b6c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref)
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries, struct amdgpu_bo_list **result)
+ size_t num_entries, struct amdgpu_bo_list **result)
{
unsigned last_entry = 0, first_userptr = num_entries;
struct amdgpu_bo_list_entry *array;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index c905a4cfc173..044b41f0bfd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -61,7 +61,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
int amdgpu_bo_list_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
+ size_t num_entries,
struct amdgpu_bo_list **list);
static inline struct amdgpu_bo_list_entry *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 913f9eaa9cd6..0311d799a010 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1222,6 +1222,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
+ drm_sched_job_arm(&job->base);
+
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
* added to BOs.
@@ -1259,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e7a010b7ca1f..468003583b2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+ switch (ctx_prio) {
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+ switch (ctx_prio) {
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ return DRM_SCHED_PRIORITY_UNSET;
+
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ return DRM_SCHED_PRIORITY_MIN;
+
+ case AMDGPU_CTX_PRIORITY_LOW:
+ return DRM_SCHED_PRIORITY_MIN;
+
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return DRM_SCHED_PRIORITY_NORMAL;
+
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return DRM_SCHED_PRIORITY_HIGH;
+
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return DRM_SCHED_PRIORITY_HIGH;
+
+ /* This should not happen as we sanitized userspace provided priority
+ * already, WARN if this happens.
+ */
+ default:
+ WARN(1, "Invalid context priority %d\n", ctx_prio);
+ return DRM_SCHED_PRIORITY_NORMAL;
+ }
+
+}
+
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum drm_sched_priority priority)
+ int32_t priority)
{
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+ if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
/* NORMAL and below are accessible by everyone */
- if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+ if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
@@ -62,26 +109,51 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
-static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
{
switch (prio) {
- case DRM_SCHED_PRIORITY_HIGH:
- case DRM_SCHED_PRIORITY_KERNEL:
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
}
-static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
- enum drm_sched_priority prio,
- u32 hw_ip)
+static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
{
+ switch (prio) {
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return AMDGPU_RING_PRIO_1;
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
+
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ int32_t ctx_prio;
unsigned int hw_prio;
- hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
- amdgpu_ctx_sched_prio_to_compute_prio(prio) :
- AMDGPU_RING_PRIO_DEFAULT;
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ switch (hw_ip) {
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+ break;
+ case AMDGPU_HW_IP_VCE:
+ case AMDGPU_HW_IP_VCN_ENC:
+ hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
+ break;
+ default:
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+ break;
+ }
+
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +161,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
return hw_prio;
}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
- const u32 ring)
+ const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ int32_t ctx_prio;
unsigned int hw_prio;
- enum drm_sched_priority priority;
+ enum drm_sched_priority drm_prio;
int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@@ -105,10 +179,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
if (!entity)
return -ENOMEM;
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
entity->sequence = 1;
- priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
- ctx->init_priority : ctx->override_priority;
- hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+ hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
+ drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
@@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
num_scheds = 1;
}
- r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+ r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
&ctx->guilty);
if (r)
goto error_free_entity;
@@ -139,7 +214,7 @@ error_free_entity:
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum drm_sched_priority priority,
+ int32_t priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
@@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
- ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
+ ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
return 0;
}
@@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
- enum drm_sched_priority priority,
+ int32_t priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
- enum drm_sched_priority priority;
+ int32_t priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
id = args->in.ctx_id;
- r = amdgpu_to_sched_priority(args->in.priority, &priority);
+ priority = args->in.priority;
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (r == -EINVAL)
- priority = DRM_SCHED_PRIORITY_NORMAL;
+ if (!amdgpu_ctx_priority_is_valid(priority))
+ priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -515,9 +590,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
- struct amdgpu_ctx_entity *aentity,
- int hw_ip,
- enum drm_sched_priority priority)
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ int32_t priority)
{
struct amdgpu_device *adev = ctx->adev;
unsigned int hw_prio;
@@ -525,12 +600,12 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
unsigned num_scheds;
/* set sw priority */
- drm_sched_entity_set_priority(&aentity->entity, priority);
+ drm_sched_entity_set_priority(&aentity->entity,
+ amdgpu_ctx_to_drm_sched_prio(priority));
/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
- hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
- AMDGPU_HW_IP_COMPUTE);
+ hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
@@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum drm_sched_priority priority)
+ int32_t priority)
{
- enum drm_sched_priority ctx_prio;
+ int32_t ctx_prio;
unsigned i, j;
ctx->override_priority = priority;
- ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 14db16bc3322..a44b8b8ed39c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -47,8 +47,8 @@ struct amdgpu_ctx {
spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
- enum drm_sched_priority init_priority;
- enum drm_sched_priority override_priority;
+ int32_t init_priority;
+ int32_t override_priority;
struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
@@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
uint64_t seq);
-void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum drm_sched_priority priority);
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 463b9c0283f7..164d6a9e9fbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -27,7 +27,6 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-#include <linux/poll.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -36,87 +35,10 @@
#include "amdgpu_rap.h"
#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
-
-int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned long timeout = 600 * HZ;
- int ret;
-
- wake_up_interruptible(&adev->autodump.gpu_hang);
-
- ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
- if (ret == 0) {
- pr_err("autodump: timeout, move on to gpu recovery\n");
- return -ETIMEDOUT;
- }
-#endif
- return 0;
-}
+#include "amdgpu_umr.h"
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
-{
- struct amdgpu_device *adev = inode->i_private;
- int ret;
-
- file->private_data = adev;
-
- ret = down_read_killable(&adev->reset_sem);
- if (ret)
- return ret;
-
- if (adev->autodump.dumping.done) {
- reinit_completion(&adev->autodump.dumping);
- ret = 0;
- } else {
- ret = -EBUSY;
- }
-
- up_read(&adev->reset_sem);
-
- return ret;
-}
-
-static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
-{
- struct amdgpu_device *adev = file->private_data;
-
- complete_all(&adev->autodump.dumping);
- return 0;
-}
-
-static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
-{
- struct amdgpu_device *adev = file->private_data;
-
- poll_wait(file, &adev->autodump.gpu_hang, poll_table);
-
- if (amdgpu_in_reset(adev))
- return POLLIN | POLLRDNORM | POLLWRNORM;
-
- return 0;
-}
-
-static const struct file_operations autodump_debug_fops = {
- .owner = THIS_MODULE,
- .open = amdgpu_debugfs_autodump_open,
- .poll = amdgpu_debugfs_autodump_poll,
- .release = amdgpu_debugfs_autodump_release,
-};
-
-static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
-{
- init_completion(&adev->autodump.dumping);
- complete_all(&adev->autodump.dumping);
- init_waitqueue_head(&adev->autodump.gpu_hang);
-
- debugfs_create_file("amdgpu_autodump", 0600,
- adev_to_drm(adev)->primary->debugfs_root,
- adev, &autodump_debug_fops);
-}
-
/**
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
*
@@ -279,6 +201,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
+static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_regs2_data *rd;
+
+ rd = kzalloc(sizeof *rd, GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+ rd->adev = file_inode(file)->i_private;
+ file->private_data = rd;
+ mutex_init(&rd->lock);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_regs2_data *rd = file->private_data;
+ mutex_destroy(&rd->lock);
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
+{
+ struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ struct amdgpu_device *adev = rd->adev;
+ ssize_t result = 0;
+ int r;
+ uint32_t value;
+
+ if (size & 0x3 || offset & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ mutex_lock(&rd->lock);
+
+ if (rd->id.use_grbm) {
+ if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
+ (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ mutex_unlock(&rd->lock);
+ return -EINVAL;
+ }
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
+ rd->id.grbm.sh,
+ rd->id.grbm.instance);
+ }
+
+ if (rd->id.use_srbm) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
+ rd->id.srbm.queue, rd->id.srbm.vmid);
+ }
+
+ if (rd->id.pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ if (!write_en) {
+ value = RREG32(offset >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ } else {
+ r = get_user(value, (uint32_t *)buf);
+ if (!r)
+ amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
+ }
+ if (r) {
+ result = r;
+ goto end;
+ }
+ offset += 4;
+ size -= 4;
+ result += 4;
+ buf += 4;
+ }
+end:
+ if (rd->id.use_grbm) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (rd->id.use_srbm) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ if (rd->id.pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ mutex_unlock(&rd->lock);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ amdgpu_virt_disable_access_debugfs(adev);
+ return result;
+}
+
+static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
+{
+ struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ int r;
+
+ switch (cmd) {
+ case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
+ mutex_lock(&rd->lock);
+ r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
+ mutex_unlock(&rd->lock);
+ return r ? -EINVAL : 0;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
+}
+
+static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
+}
+
/**
* amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
@@ -1091,6 +1152,16 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
return result;
}
+static const struct file_operations amdgpu_debugfs_regs2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
+ .read = amdgpu_debugfs_regs2_read,
+ .write = amdgpu_debugfs_regs2_write,
+ .open = amdgpu_debugfs_regs2_open,
+ .release = amdgpu_debugfs_regs2_release,
+ .llseek = default_llseek
+};
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -1148,6 +1219,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs2_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
@@ -1160,6 +1232,7 @@ static const struct file_operations *debugfs_regs[] = {
static const char *debugfs_regs_names[] = {
"amdgpu_regs",
+ "amdgpu_regs2",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
@@ -1206,7 +1279,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
}
/* Avoid accidently unparking the sched thread during GPU reset */
- r = down_read_killable(&adev->reset_sem);
+ r = down_write_killable(&adev->reset_sem);
if (r)
return r;
@@ -1235,7 +1308,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_unpark(ring->sched.thread);
}
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1255,7 +1328,7 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
return r;
}
- *val = amdgpu_bo_evict_vram(adev);
+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1268,17 +1341,15 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
struct drm_device *dev = adev_to_drm(adev);
- struct ttm_resource_manager *man;
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return r;
}
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1544,6 +1615,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent;
int r, i;
+ if (!debugfs_initialized())
+ return 0;
+
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
if (IS_ERR(ent)) {
@@ -1582,13 +1656,10 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!ring)
continue;
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
+ amdgpu_debugfs_ring_init(adev, ring);
}
amdgpu_ras_debugfs_create_all(adev);
- amdgpu_debugfs_autodump_init(adev);
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
@@ -1607,6 +1678,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
+ adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
+ adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->debugfs_discovery_blob);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 141a8474e24f..371a6f0deb29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -22,14 +22,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
/*
* Debugfs
*/
-struct amdgpu_autodump {
- struct completion dumping;
- struct wait_queue_head gpu_hang;
-};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
@@ -37,4 +32,3 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
-int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index af9bdf16eefd..6e40cc1bc6dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -125,6 +125,7 @@ const char *amdgpu_asic_name[] = {
"DIMGREY_CAVEFISH",
"BEIGE_GOBY",
"YELLOW_CARP",
+ "IP DISCOVERY",
"LAST",
};
@@ -305,7 +306,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
uint64_t last;
int idx;
- if (!drm_dev_enter(&adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
@@ -2126,46 +2127,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
- if (adev->flags & AMD_IS_APU)
- adev->family = AMDGPU_FAMILY_RV;
- else
- adev->family = AMDGPU_FAMILY_AI;
-
- r = soc15_set_ip_blocks(adev);
- if (r)
- return r;
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- if (adev->asic_type == CHIP_VANGOGH)
- adev->family = AMDGPU_FAMILY_VGH;
- else if (adev->asic_type == CHIP_YELLOW_CARP)
- adev->family = AMDGPU_FAMILY_YC;
- else
- adev->family = AMDGPU_FAMILY_NV;
-
- r = nv_set_ip_blocks(adev);
+ default:
+ r = amdgpu_discovery_set_ip_blocks(adev);
if (r)
return r;
break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
}
amdgpu_amdkfd_device_probe(adev);
@@ -2745,6 +2711,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_release_full_gpu(adev, false))
+ DRM_ERROR("failed to release exclusive mode on fini\n");
+ }
+
return 0;
}
@@ -2805,10 +2776,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ras_fini(adev);
- if (amdgpu_sriov_vf(adev))
- if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
-
return 0;
}
@@ -3240,6 +3207,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_RENOIR:
+ case CHIP_CYAN_SKILLFISH:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -3247,13 +3215,15 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VANGOGH:
case CHIP_YELLOW_CARP:
#endif
+ default:
return amdgpu_dc != 0;
-#endif
+#else
default:
if (amdgpu_dc > 0)
DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
+#endif
}
}
@@ -3354,6 +3324,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
continue;
} else if (timeout < 0) {
timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
} else {
timeout = msecs_to_jiffies(timeout);
}
@@ -3538,17 +3510,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* enable PCIE atomic ops */
- r = pci_enable_atomic_ops_to_root(adev->pdev,
- PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
- PCI_EXP_DEVCAP2_ATOMIC_COMP64);
- if (r) {
- adev->have_atomics_support = false;
- DRM_INFO("PCIE atomic ops is not supported\n");
- } else {
- adev->have_atomics_support = true;
- }
-
amdgpu_device_get_pcie_info(adev);
if (amdgpu_mcbp)
@@ -3571,6 +3532,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* enable PCIE atomic ops */
+ if (amdgpu_sriov_vf(adev))
+ adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
+ adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
+ (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ else
+ adev->have_atomics_support =
+ !pci_enable_atomic_ops_to_root(adev->pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ if (!adev->have_atomics_support)
+ dev_info(adev->dev, "PCIE atomic ops is not supported\n");
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3865,9 +3839,11 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fbdev_fini(adev);
+ amdgpu_device_ip_fini_early(adev);
+
amdgpu_irq_fini_hw(adev);
- amdgpu_device_ip_fini_early(adev);
+ ttm_device_clear_dma_mappings(&adev->mman.bdev);
amdgpu_gart_dummy_page_fini(adev);
@@ -3876,8 +3852,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
- amdgpu_device_ip_fini(adev);
amdgpu_fence_driver_sw_fini(adev);
+ amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@@ -3909,6 +3885,25 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
}
+/**
+ * amdgpu_device_evict_resources - evict device resources
+ * @adev: amdgpu device object
+ *
+ * Evicts all ttm device resources(vram BOs, gart table) from the lru list
+ * of the vram memory type. Mainly used for evicting device resources
+ * at suspend time.
+ *
+ */
+static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
+{
+ /* No need to evict vram on APUs for suspend to ram */
+ if (adev->in_s3 && (adev->flags & AMD_IS_APU))
+ return;
+
+ if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
+ DRM_WARN("evicting device resources failed\n");
+
+}
/*
* Suspend & resume.
@@ -3949,17 +3944,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+ /* First evict vram memory */
+ amdgpu_device_evict_resources(adev);
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);
- /* evict remaining vram memory
- * This second call to evict vram is to evict the gart page table
- * using the CPU.
+ /* This second call to evict device resources is to evict
+ * the gart page table using the CPU.
*/
- amdgpu_bo_evict_vram(adev);
+ amdgpu_device_evict_resources(adev);
return 0;
}
@@ -4466,10 +4460,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (reset_context->reset_req_dev == adev)
job = reset_context->job;
- /* no need to dump if device is not in good state during probe period */
- if (!adev->gmc.xgmi.pending_reset)
- amdgpu_debugfs_wait_dump(adev);
-
if (amdgpu_sriov_vf(adev)) {
/* stop the data exchange thread */
amdgpu_virt_fini_data_exchange(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 52488bb45112..6b25837955c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,6 +52,7 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
+ bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
};
struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index ada7bc19118a..d7c8d9e3c203 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -21,16 +21,58 @@
*
*/
+#include <linux/firmware.h>
+
#include "amdgpu.h"
#include "amdgpu_discovery.h"
#include "soc15_hw_ip.h"
#include "discovery.h"
+#include "soc15.h"
+#include "gfx_v9_0.h"
+#include "gmc_v9_0.h"
+#include "df_v1_7.h"
+#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
+#include "hdp_v4_0.h"
+#include "vega10_ih.h"
+#include "vega20_ih.h"
+#include "sdma_v4_0.h"
+#include "uvd_v7_0.h"
+#include "vce_v4_0.h"
+#include "vcn_v1_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
+#include "smuio_v9_0.h"
+#include "gmc_v10_0.h"
+#include "gfxhub_v2_0.h"
+#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
+#include "nbio_v7_2.h"
+#include "hdp_v5_0.h"
+#include "nv.h"
+#include "navi10_ih.h"
+#include "gfx_v10_0.h"
+#include "sdma_v5_0.h"
+#include "sdma_v5_2.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v3_0.h"
+#include "jpeg_v3_0.h"
+#include "amdgpu_vkms.h"
+#include "mes_v10_1.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
+#include "smuio_v13_0.h"
+
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
-#define HW_ID_MAX 300
static const char *hw_id_names[HW_ID_MAX] = {
[MP1_HWID] = "MP1",
@@ -66,6 +108,8 @@ static const char *hw_id_names[HW_ID_MAX] = {
[HDP_HWID] = "HDP",
[SDMA0_HWID] = "SDMA0",
[SDMA1_HWID] = "SDMA1",
+ [SDMA2_HWID] = "SDMA2",
+ [SDMA3_HWID] = "SDMA3",
[ISP_HWID] = "ISP",
[DBGU_IO_HWID] = "DBGU_IO",
[DF_HWID] = "DF",
@@ -129,6 +173,8 @@ static int hw_id_map[MAX_HWIP] = {
[THM_HWIP] = THM_HWID,
[CLK_HWIP] = CLKA_HWID,
[UMC_HWIP] = UMC_HWID,
+ [XGMI_HWIP] = XGMI_HWID,
+ [DCI_HWIP] = DCI_HWID,
};
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
@@ -164,6 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct gpu_info_header *ghdr;
+ const struct firmware *fw;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -174,10 +221,21 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!adev->mman.discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
- if (r) {
- DRM_ERROR("failed to read ip discovery binary\n");
- goto out;
+ if (amdgpu_discovery == 2) {
+ r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev);
+ if (r)
+ goto get_from_vram;
+ dev_info(adev->dev, "Using IP discovery from file\n");
+ memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data,
+ adev->mman.discovery_tmr_size);
+ release_firmware(fw);
+ } else {
+get_from_vram:
+ r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
+ if (r) {
+ DRM_ERROR("failed to read ip discovery binary\n");
+ goto out;
+ }
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
@@ -245,6 +303,22 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev)
adev->mman.discovery_bin = NULL;
}
+static int amdgpu_discovery_validate_ip(const struct ip *ip)
+{
+ if (ip->number_instance >= HWIP_MAX_INSTANCE) {
+ DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
+ ip->number_instance);
+ return -EINVAL;
+ }
+ if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
+ DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
+ le16_to_cpu(ip->hw_id));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
@@ -290,6 +364,10 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -301,6 +379,11 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
if (le16_to_cpu(ip->hw_id) == VCN_HWID)
adev->vcn.num_vcn_inst++;
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID)
+ adev->sdma.num_instances++;
for (k = 0; k < num_base_address; k++) {
/*
@@ -317,10 +400,21 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address;
+ /* Instance support is somewhat inconsistent.
+ * SDMA is a good example. Sienna cichlid has 4 total
+ * SDMA instances, each enumerated separately (HWIDs
+ * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
+ * but they are enumerated as multiple instances of the
+ * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
+ * example. On most chips there are multiple instances
+ * with the same HWID.
+ */
+ adev->ip_versions[hw_ip][ip->number_instance] =
+ IP_VERSION(ip->major, ip->minor, ip->revision);
}
-
}
+next_ip:
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
}
}
@@ -401,6 +495,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
vcn_harvest_count++;
+ if (harvest_info->list[i].number_instance == 0)
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ else
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -409,10 +507,21 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break;
}
}
+ /* some IP discovery tables on Navy Flounder don't have this set correctly */
+ if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
}
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
}
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@@ -450,3 +559,753 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
return 0;
}
+
+static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+{
+ /* what IP to use for this? */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+{
+ /* use GC or MMHUB IP version */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[OSSSYS_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 3, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ break;
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ } else if (adev->ip_versions[DCE_HWIP][0]) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (adev->ip_versions[DCI_HWIP][0]) {
+ switch (adev->ip_versions[DCI_HWIP][0]) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ break;
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->ip_versions[VCE_HWIP][0]) {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 2, 0):
+ /* UVD is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (adev->ip_versions[VCE_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ /* VCE is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 2, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 3):
+ break;
+ case IP_VERSION(2, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case IP_VERSION(2, 6, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 0, 2):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
+ break;
+ case IP_VERSION(3, 0, 33):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
+ break;
+ default:
+ break;;
+ }
+ return 0;
+}
+
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ break;
+ case CHIP_VEGA12:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ break;
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 1;
+ adev->vcn.num_vcn_inst = 1;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ } else {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ }
+ break;
+ case CHIP_VEGA20:
+ vega20_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ adev->sdma.num_instances = 8;
+ adev->vcn.num_vcn_inst = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ adev->sdma.num_instances = 5;
+ adev->vcn.num_vcn_inst = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ adev->family = AMDGPU_FAMILY_AI;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ adev->family = AMDGPU_FAMILY_RV;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->family = AMDGPU_FAMILY_NV;
+ break;
+ case IP_VERSION(10, 3, 1):
+ adev->family = AMDGPU_FAMILY_VGH;
+ break;
+ case IP_VERSION(10, 3, 3):
+ adev->family = AMDGPU_FAMILY_YC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
+ adev->gmc.xgmi.supported = true;
+
+ /* set NBIO version */
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 4):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald;
+ break;
+ case IP_VERSION(7, 2, 0):
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 5, 0):
+ adev->nbio.funcs = &nbio_v7_2_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
+ break;
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc;
+ break;
+ default:
+ break;
+ }
+
+ switch (adev->ip_versions[HDP_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ adev->hdp.funcs = &hdp_v4_0_funcs;
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 0, 4):
+ case IP_VERSION(5, 2, 0):
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (adev->ip_versions[DF_HWIP][0]) {
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(3, 6, 1):
+ case IP_VERSION(3, 6, 2):
+ adev->df.funcs = &df_v3_6_funcs;
+ break;
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 5, 2):
+ adev->df.funcs = &df_v1_7_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (adev->ip_versions[SMUIO_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(10, 0, 2):
+ adev->smuio.funcs = &smuio_v9_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ adev->smuio.funcs = &smuio_v11_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 6):
+ case IP_VERSION(11, 0, 10):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->smuio.funcs = &smuio_v13_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ r = amdgpu_discovery_set_common_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gmc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ /* For SR-IOV, PSP needs to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+ } else {
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_display_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_sdma_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (adev->enable_mes) {
+ r = amdgpu_discovery_set_mes_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 48e6b88cfdfe..0ea029e3b850 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -36,5 +36,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f18240f87387..ad95de6399af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,7 @@
#include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
+#include <linux/cc_platform.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -96,9 +97,11 @@
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
* - 3.41.0 - Add video codec query
* - 3.42.0 - Add 16bpc fixed point display support
+ * - 3.43.0 - Add device hot plug/unplug support
+ * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 42
+#define KMS_DRIVER_MINOR 44
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -627,7 +630,7 @@ module_param_named(mcbp, amdgpu_mcbp, int, 0444);
/**
* DOC: discovery (int)
* Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
- * (-1 = auto (default), 0 = disabled, 1 = enabled)
+ * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file)
*/
MODULE_PARM_DESC(discovery,
"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
@@ -875,7 +878,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -890,6 +893,636 @@ MODULE_PARM_DESC(smu_pptable_id,
"specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
+/* These devices are not supported by amdgpu.
+ * They are supported by the mach64, r128, radeon drivers
+ */
+static const u16 amdgpu_unsupported_pciidlist[] = {
+ /* mach64 */
+ 0x4354,
+ 0x4358,
+ 0x4554,
+ 0x4742,
+ 0x4744,
+ 0x4749,
+ 0x474C,
+ 0x474D,
+ 0x474E,
+ 0x474F,
+ 0x4750,
+ 0x4751,
+ 0x4752,
+ 0x4753,
+ 0x4754,
+ 0x4755,
+ 0x4756,
+ 0x4757,
+ 0x4758,
+ 0x4759,
+ 0x475A,
+ 0x4C42,
+ 0x4C44,
+ 0x4C47,
+ 0x4C49,
+ 0x4C4D,
+ 0x4C4E,
+ 0x4C50,
+ 0x4C51,
+ 0x4C52,
+ 0x4C53,
+ 0x5654,
+ 0x5655,
+ 0x5656,
+ /* r128 */
+ 0x4c45,
+ 0x4c46,
+ 0x4d46,
+ 0x4d4c,
+ 0x5041,
+ 0x5042,
+ 0x5043,
+ 0x5044,
+ 0x5045,
+ 0x5046,
+ 0x5047,
+ 0x5048,
+ 0x5049,
+ 0x504A,
+ 0x504B,
+ 0x504C,
+ 0x504D,
+ 0x504E,
+ 0x504F,
+ 0x5050,
+ 0x5051,
+ 0x5052,
+ 0x5053,
+ 0x5054,
+ 0x5055,
+ 0x5056,
+ 0x5057,
+ 0x5058,
+ 0x5245,
+ 0x5246,
+ 0x5247,
+ 0x524b,
+ 0x524c,
+ 0x534d,
+ 0x5446,
+ 0x544C,
+ 0x5452,
+ /* radeon */
+ 0x3150,
+ 0x3151,
+ 0x3152,
+ 0x3154,
+ 0x3155,
+ 0x3E50,
+ 0x3E54,
+ 0x4136,
+ 0x4137,
+ 0x4144,
+ 0x4145,
+ 0x4146,
+ 0x4147,
+ 0x4148,
+ 0x4149,
+ 0x414A,
+ 0x414B,
+ 0x4150,
+ 0x4151,
+ 0x4152,
+ 0x4153,
+ 0x4154,
+ 0x4155,
+ 0x4156,
+ 0x4237,
+ 0x4242,
+ 0x4336,
+ 0x4337,
+ 0x4437,
+ 0x4966,
+ 0x4967,
+ 0x4A48,
+ 0x4A49,
+ 0x4A4A,
+ 0x4A4B,
+ 0x4A4C,
+ 0x4A4D,
+ 0x4A4E,
+ 0x4A4F,
+ 0x4A50,
+ 0x4A54,
+ 0x4B48,
+ 0x4B49,
+ 0x4B4A,
+ 0x4B4B,
+ 0x4B4C,
+ 0x4C57,
+ 0x4C58,
+ 0x4C59,
+ 0x4C5A,
+ 0x4C64,
+ 0x4C66,
+ 0x4C67,
+ 0x4E44,
+ 0x4E45,
+ 0x4E46,
+ 0x4E47,
+ 0x4E48,
+ 0x4E49,
+ 0x4E4A,
+ 0x4E4B,
+ 0x4E50,
+ 0x4E51,
+ 0x4E52,
+ 0x4E53,
+ 0x4E54,
+ 0x4E56,
+ 0x5144,
+ 0x5145,
+ 0x5146,
+ 0x5147,
+ 0x5148,
+ 0x514C,
+ 0x514D,
+ 0x5157,
+ 0x5158,
+ 0x5159,
+ 0x515A,
+ 0x515E,
+ 0x5460,
+ 0x5462,
+ 0x5464,
+ 0x5548,
+ 0x5549,
+ 0x554A,
+ 0x554B,
+ 0x554C,
+ 0x554D,
+ 0x554E,
+ 0x554F,
+ 0x5550,
+ 0x5551,
+ 0x5552,
+ 0x5554,
+ 0x564A,
+ 0x564B,
+ 0x564F,
+ 0x5652,
+ 0x5653,
+ 0x5657,
+ 0x5834,
+ 0x5835,
+ 0x5954,
+ 0x5955,
+ 0x5974,
+ 0x5975,
+ 0x5960,
+ 0x5961,
+ 0x5962,
+ 0x5964,
+ 0x5965,
+ 0x5969,
+ 0x5a41,
+ 0x5a42,
+ 0x5a61,
+ 0x5a62,
+ 0x5b60,
+ 0x5b62,
+ 0x5b63,
+ 0x5b64,
+ 0x5b65,
+ 0x5c61,
+ 0x5c63,
+ 0x5d48,
+ 0x5d49,
+ 0x5d4a,
+ 0x5d4c,
+ 0x5d4d,
+ 0x5d4e,
+ 0x5d4f,
+ 0x5d50,
+ 0x5d52,
+ 0x5d57,
+ 0x5e48,
+ 0x5e4a,
+ 0x5e4b,
+ 0x5e4c,
+ 0x5e4d,
+ 0x5e4f,
+ 0x6700,
+ 0x6701,
+ 0x6702,
+ 0x6703,
+ 0x6704,
+ 0x6705,
+ 0x6706,
+ 0x6707,
+ 0x6708,
+ 0x6709,
+ 0x6718,
+ 0x6719,
+ 0x671c,
+ 0x671d,
+ 0x671f,
+ 0x6720,
+ 0x6721,
+ 0x6722,
+ 0x6723,
+ 0x6724,
+ 0x6725,
+ 0x6726,
+ 0x6727,
+ 0x6728,
+ 0x6729,
+ 0x6738,
+ 0x6739,
+ 0x673e,
+ 0x6740,
+ 0x6741,
+ 0x6742,
+ 0x6743,
+ 0x6744,
+ 0x6745,
+ 0x6746,
+ 0x6747,
+ 0x6748,
+ 0x6749,
+ 0x674A,
+ 0x6750,
+ 0x6751,
+ 0x6758,
+ 0x6759,
+ 0x675B,
+ 0x675D,
+ 0x675F,
+ 0x6760,
+ 0x6761,
+ 0x6762,
+ 0x6763,
+ 0x6764,
+ 0x6765,
+ 0x6766,
+ 0x6767,
+ 0x6768,
+ 0x6770,
+ 0x6771,
+ 0x6772,
+ 0x6778,
+ 0x6779,
+ 0x677B,
+ 0x6840,
+ 0x6841,
+ 0x6842,
+ 0x6843,
+ 0x6849,
+ 0x684C,
+ 0x6850,
+ 0x6858,
+ 0x6859,
+ 0x6880,
+ 0x6888,
+ 0x6889,
+ 0x688A,
+ 0x688C,
+ 0x688D,
+ 0x6898,
+ 0x6899,
+ 0x689b,
+ 0x689c,
+ 0x689d,
+ 0x689e,
+ 0x68a0,
+ 0x68a1,
+ 0x68a8,
+ 0x68a9,
+ 0x68b0,
+ 0x68b8,
+ 0x68b9,
+ 0x68ba,
+ 0x68be,
+ 0x68bf,
+ 0x68c0,
+ 0x68c1,
+ 0x68c7,
+ 0x68c8,
+ 0x68c9,
+ 0x68d8,
+ 0x68d9,
+ 0x68da,
+ 0x68de,
+ 0x68e0,
+ 0x68e1,
+ 0x68e4,
+ 0x68e5,
+ 0x68e8,
+ 0x68e9,
+ 0x68f1,
+ 0x68f2,
+ 0x68f8,
+ 0x68f9,
+ 0x68fa,
+ 0x68fe,
+ 0x7100,
+ 0x7101,
+ 0x7102,
+ 0x7103,
+ 0x7104,
+ 0x7105,
+ 0x7106,
+ 0x7108,
+ 0x7109,
+ 0x710A,
+ 0x710B,
+ 0x710C,
+ 0x710E,
+ 0x710F,
+ 0x7140,
+ 0x7141,
+ 0x7142,
+ 0x7143,
+ 0x7144,
+ 0x7145,
+ 0x7146,
+ 0x7147,
+ 0x7149,
+ 0x714A,
+ 0x714B,
+ 0x714C,
+ 0x714D,
+ 0x714E,
+ 0x714F,
+ 0x7151,
+ 0x7152,
+ 0x7153,
+ 0x715E,
+ 0x715F,
+ 0x7180,
+ 0x7181,
+ 0x7183,
+ 0x7186,
+ 0x7187,
+ 0x7188,
+ 0x718A,
+ 0x718B,
+ 0x718C,
+ 0x718D,
+ 0x718F,
+ 0x7193,
+ 0x7196,
+ 0x719B,
+ 0x719F,
+ 0x71C0,
+ 0x71C1,
+ 0x71C2,
+ 0x71C3,
+ 0x71C4,
+ 0x71C5,
+ 0x71C6,
+ 0x71C7,
+ 0x71CD,
+ 0x71CE,
+ 0x71D2,
+ 0x71D4,
+ 0x71D5,
+ 0x71D6,
+ 0x71DA,
+ 0x71DE,
+ 0x7200,
+ 0x7210,
+ 0x7211,
+ 0x7240,
+ 0x7243,
+ 0x7244,
+ 0x7245,
+ 0x7246,
+ 0x7247,
+ 0x7248,
+ 0x7249,
+ 0x724A,
+ 0x724B,
+ 0x724C,
+ 0x724D,
+ 0x724E,
+ 0x724F,
+ 0x7280,
+ 0x7281,
+ 0x7283,
+ 0x7284,
+ 0x7287,
+ 0x7288,
+ 0x7289,
+ 0x728B,
+ 0x728C,
+ 0x7290,
+ 0x7291,
+ 0x7293,
+ 0x7297,
+ 0x7834,
+ 0x7835,
+ 0x791e,
+ 0x791f,
+ 0x793f,
+ 0x7941,
+ 0x7942,
+ 0x796c,
+ 0x796d,
+ 0x796e,
+ 0x796f,
+ 0x9400,
+ 0x9401,
+ 0x9402,
+ 0x9403,
+ 0x9405,
+ 0x940A,
+ 0x940B,
+ 0x940F,
+ 0x94A0,
+ 0x94A1,
+ 0x94A3,
+ 0x94B1,
+ 0x94B3,
+ 0x94B4,
+ 0x94B5,
+ 0x94B9,
+ 0x9440,
+ 0x9441,
+ 0x9442,
+ 0x9443,
+ 0x9444,
+ 0x9446,
+ 0x944A,
+ 0x944B,
+ 0x944C,
+ 0x944E,
+ 0x9450,
+ 0x9452,
+ 0x9456,
+ 0x945A,
+ 0x945B,
+ 0x945E,
+ 0x9460,
+ 0x9462,
+ 0x946A,
+ 0x946B,
+ 0x947A,
+ 0x947B,
+ 0x9480,
+ 0x9487,
+ 0x9488,
+ 0x9489,
+ 0x948A,
+ 0x948F,
+ 0x9490,
+ 0x9491,
+ 0x9495,
+ 0x9498,
+ 0x949C,
+ 0x949E,
+ 0x949F,
+ 0x94C0,
+ 0x94C1,
+ 0x94C3,
+ 0x94C4,
+ 0x94C5,
+ 0x94C6,
+ 0x94C7,
+ 0x94C8,
+ 0x94C9,
+ 0x94CB,
+ 0x94CC,
+ 0x94CD,
+ 0x9500,
+ 0x9501,
+ 0x9504,
+ 0x9505,
+ 0x9506,
+ 0x9507,
+ 0x9508,
+ 0x9509,
+ 0x950F,
+ 0x9511,
+ 0x9515,
+ 0x9517,
+ 0x9519,
+ 0x9540,
+ 0x9541,
+ 0x9542,
+ 0x954E,
+ 0x954F,
+ 0x9552,
+ 0x9553,
+ 0x9555,
+ 0x9557,
+ 0x955f,
+ 0x9580,
+ 0x9581,
+ 0x9583,
+ 0x9586,
+ 0x9587,
+ 0x9588,
+ 0x9589,
+ 0x958A,
+ 0x958B,
+ 0x958C,
+ 0x958D,
+ 0x958E,
+ 0x958F,
+ 0x9590,
+ 0x9591,
+ 0x9593,
+ 0x9595,
+ 0x9596,
+ 0x9597,
+ 0x9598,
+ 0x9599,
+ 0x959B,
+ 0x95C0,
+ 0x95C2,
+ 0x95C4,
+ 0x95C5,
+ 0x95C6,
+ 0x95C7,
+ 0x95C9,
+ 0x95CC,
+ 0x95CD,
+ 0x95CE,
+ 0x95CF,
+ 0x9610,
+ 0x9611,
+ 0x9612,
+ 0x9613,
+ 0x9614,
+ 0x9615,
+ 0x9616,
+ 0x9640,
+ 0x9641,
+ 0x9642,
+ 0x9643,
+ 0x9644,
+ 0x9645,
+ 0x9647,
+ 0x9648,
+ 0x9649,
+ 0x964a,
+ 0x964b,
+ 0x964c,
+ 0x964e,
+ 0x964f,
+ 0x9710,
+ 0x9711,
+ 0x9712,
+ 0x9713,
+ 0x9714,
+ 0x9715,
+ 0x9802,
+ 0x9803,
+ 0x9804,
+ 0x9805,
+ 0x9806,
+ 0x9807,
+ 0x9808,
+ 0x9809,
+ 0x980A,
+ 0x9900,
+ 0x9901,
+ 0x9903,
+ 0x9904,
+ 0x9905,
+ 0x9906,
+ 0x9907,
+ 0x9908,
+ 0x9909,
+ 0x990A,
+ 0x990B,
+ 0x990C,
+ 0x990D,
+ 0x990E,
+ 0x990F,
+ 0x9910,
+ 0x9913,
+ 0x9917,
+ 0x9918,
+ 0x9919,
+ 0x9990,
+ 0x9991,
+ 0x9992,
+ 0x9993,
+ 0x9994,
+ 0x9995,
+ 0x9996,
+ 0x9997,
+ 0x9998,
+ 0x9999,
+ 0x999A,
+ 0x999B,
+ 0x999C,
+ 0x999D,
+ 0x99A0,
+ 0x99A2,
+ 0x99A4,
+};
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1239,6 +1872,16 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_DISPLAY_VGA << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_DISPLAY_OTHER << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
{0, 0, 0}
};
@@ -1252,9 +1895,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
struct drm_device *ddev;
struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
- int ret, retry = 0;
+ int ret, retry = 0, i;
bool supports_atomic = false;
+ /* skip devices which are owned by radeon */
+ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
+ if (amdgpu_unsupported_pciidlist[i] == pdev->device)
+ return -ENODEV;
+ }
+
+ if (flags == 0) {
+ DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n");
+ return -ENODEV;
+ }
+
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -1269,7 +1923,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
* however, SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip.
*/
- if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
+ ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
dev_info(&pdev->dev,
"SME is not compatible with RAVEN\n");
return -ENOTSUPP;
@@ -1508,6 +2163,10 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
+ /* Avoids registers access if device is physically gone */
+ if (!pci_device_is_present(adev->pdev))
+ adev->no_hw_access = true;
+
r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8d682befe0d6..3b7e86ea7167 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -266,7 +266,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
- int r;
do {
last_seq = atomic_read(&ring->fence_drv.last_seq);
@@ -298,12 +297,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = dma_fence_signal(fence);
- if (!r)
- DMA_FENCE_TRACE(fence, "signaled from irq context\n");
- else
- BUG();
-
+ dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -556,7 +550,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
drm_sched_stop(&ring->sched, NULL);
/* You can't wait for HW to signal if it's gone */
- if (!drm_dev_is_unplugged(&adev->ddev))
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)))
r = amdgpu_fence_wait_empty(ring);
else
r = -ENODEV;
@@ -684,8 +678,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
-
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 76efd5f8950f..d3e4203f6217 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -34,6 +34,7 @@
#include <asm/set_memory.h>
#endif
#include "amdgpu.h"
+#include <drm/drm_drv.h>
/*
* GART
@@ -230,12 +231,16 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0;
+ int idx;
if (!adev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
return -EINVAL;
}
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return 0;
+
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
@@ -254,6 +259,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+ drm_dev_exit(idx);
return 0;
}
@@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
{
uint64_t page_base;
unsigned i, j, t;
+ int idx;
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return 0;
+
t = offset / AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < pages; i++) {
@@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
+ drm_dev_exit(idx);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d6aa032890ee..a573424a6e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -60,10 +60,9 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
-
- drm_dev_exit(idx);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index d43fe2ed8116..f851196c83a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -42,10 +42,9 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
-enum gfx_pipe_priority {
- AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
- AMDGPU_GFX_PIPE_PRIO_HIGH,
- AMDGPU_GFX_PIPE_PRIO_MAX
+enum amdgpu_gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
};
/* Argument for PPSMC_MSG_GpuChangeState */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 9ff600a38559..08478fce00f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -153,10 +153,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
- int idx;
-
- if (!drm_dev_enter(&adev->ddev, &idx))
- return 0;
/*
* The following is for PTE only. GART does not have PDEs.
@@ -165,8 +161,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
value |= flags;
writeq(value, ptr + (gpu_page_idx * 8));
- drm_dev_exit(idx);
-
return 0;
}
@@ -749,6 +743,10 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
+ int idx;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE;
@@ -770,6 +768,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
/* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
+ drm_dev_exit(idx);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index c076a6b9a5a2..bc1297dcdf97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -300,20 +300,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- unsigned size;
int r, i;
if (adev->ib_pool_ready)
return 0;
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
- if (i == AMDGPU_IB_POOL_DIRECT)
- size = PAGE_SIZE * 6;
- else
- size = AMDGPU_IB_POOL_SIZE;
-
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
- size, AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_SIZE,
+ AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index de29518673dd..bfc47bea23db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -38,7 +38,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_device *adev = ring->adev;
int idx;
- if (!drm_dev_enter(&adev->ddev, &idx)) {
+ if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
__func__, s_job->sched->name);
@@ -182,9 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
+ drm_sched_job_arm(&job->base);
+
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7e45640fbee0..dfe667ea8b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -340,28 +340,35 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
case AMDGPU_INFO_FW_TA:
switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.xgmi.feature_version;
+ fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.xgmi_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_RAS:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ras.feature_version;
+ fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.ras_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_HDCP:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.hdcp.feature_version;
+ fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.hdcp_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_DTM:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.dtm.feature_version;
+ fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.dtm_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_RAP:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.rap.feature_version;
+ fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.rap_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.securedisplay.feature_version;
+ fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
+ fw_info->feature =
+ adev->psp.securedisplay_context.context.bin_desc
+ .feature_version;
break;
default:
return -EINVAL;
@@ -378,8 +385,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->psp.sos.feature_version;
break;
case AMDGPU_INFO_FW_ASD:
- fw_info->ver = adev->psp.asd.fw_version;
- fw_info->feature = adev->psp.asd.feature_version;
+ fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
break;
case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index a2d3dbbf7d25..ce538f4819f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -31,7 +31,7 @@ void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
{
- uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -42,7 +42,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
{
- uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
@@ -56,7 +56,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr)
{
- WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
+ WREG64_PCIE(mc_status_addr, 0x0ULL);
}
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
@@ -87,8 +87,8 @@ int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
if (!mca_dev->ras_if)
return -ENOMEM;
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
+ mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block;
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- mca_dev->ras_if->sub_block_index = 0;
}
ih_info.head = fs_info.head = *mca_dev->ras_if;
r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index f860f2f0e296..c74bc7177066 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -29,6 +29,7 @@ struct amdgpu_mca_ras_funcs {
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
uint32_t ras_block;
+ uint32_t ras_sub_block;
const char* sysfs_name;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 01a78c786536..aeb92e5677ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -695,40 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
- * @bo: pointer to the buffer object
- *
- * Sets placement according to domain; and changes placement and caching
- * policy of the buffer object according to the placement.
- * This is used for validating shadow bos. It calls ttm_bo_validate() to
- * make sure the buffer is resident where it needs to be.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_validate(struct amdgpu_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- uint32_t domain;
- int r;
-
- if (bo->tbo.pin_count)
- return 0;
-
- domain = bo->preferred_domains;
-
-retry:
- amdgpu_bo_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
-
- return r;
-}
-
-/**
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
*
* @vmbo: BO that will be inserted into the shadow list
@@ -1038,29 +1004,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
}
-/**
- * amdgpu_bo_evict_vram - evict VRAM buffers
- * @adev: amdgpu device object
- *
- * Evicts all VRAM buffers on the lru list of the memory type.
- * Mainly used for evicting vram at suspend time.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
-{
- struct ttm_resource_manager *man;
-
- if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
- /* No need to evict vram on APUs for suspend to ram */
- return 0;
- }
-
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
-}
-
static const char *amdgpu_vram_names[] = {
"UNKNOWN",
"GDDR1",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9d6c001c15f8..4c9cbdc66995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -304,7 +304,6 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset);
void amdgpu_bo_unpin(struct amdgpu_bo *bo);
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
@@ -327,7 +326,6 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-int amdgpu_bo_validate(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9b41cb8c3de5..c641f84649d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -46,6 +46,10 @@ static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
static int psp_load_smu_fw(struct psp_context *psp);
+static int psp_ta_unload(struct psp_context *psp, struct ta_context *context);
+static int psp_ta_load(struct psp_context *psp, struct ta_context *context);
+static int psp_rap_terminate(struct psp_context *psp);
+static int psp_securedisplay_terminate(struct psp_context *psp);
/*
* Due to DF Cstate management centralized to PMFW, the firmware
@@ -61,23 +65,32 @@ static int psp_load_smu_fw(struct psp_context *psp);
*
* This new sequence is required for
* - Arcturus and onwards
- * - Navi12 and onwards
*/
static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- psp->pmfw_centralized_cstate_management = false;
-
- if (amdgpu_sriov_vf(adev))
- return;
-
- if (adev->flags & AMD_IS_APU)
+ if (amdgpu_sriov_vf(adev)) {
+ psp->pmfw_centralized_cstate_management = false;
return;
+ }
- if ((adev->asic_type >= CHIP_ARCTURUS) ||
- (adev->asic_type >= CHIP_NAVI12))
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(13, 0, 2):
psp->pmfw_centralized_cstate_management = true;
+ break;
+ default:
+ psp->pmfw_centralized_cstate_management = false;
+ break;
+ }
}
static int psp_early_init(void *handle)
@@ -85,43 +98,45 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
psp_v3_1_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
psp_v10_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
psp_v12_0_set_psp_funcs(psp);
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(13, 0, 2):
psp_v13_0_set_psp_funcs(psp);
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(11, 0, 8):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
psp_v11_0_8_set_psp_funcs(psp);
psp->autoload_supported = false;
@@ -264,7 +279,8 @@ static int psp_sw_init(void *handle)
DRM_ERROR("Failed to load psp firmware!\n");
return ret;
}
- } else if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_ALDEBARAN) {
+ } else if (amdgpu_sriov_vf(adev) &&
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) {
ret = psp_init_ta_microcode(psp, "aldebaran");
if (ret) {
DRM_ERROR("Failed to initialize ta microcode!\n");
@@ -307,7 +323,8 @@ static int psp_sw_init(void *handle)
}
}
- if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) {
ret= psp_sysfs_init(adev);
if (ret) {
return ret;
@@ -337,8 +354,8 @@ static int psp_sw_fini(void *handle)
psp->ta_fw = NULL;
}
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
kfree(cmd);
@@ -424,7 +441,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (psp->adev->no_hw_access)
return 0;
- if (!drm_dev_enter(&psp->adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return 0;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -597,10 +614,10 @@ static int psp_tmr_init(struct psp_context *psp)
static bool psp_skip_tmr(struct psp_context *psp)
{
- switch (psp->adev->asic_type) {
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_ALDEBARAN:
+ switch (psp->adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(13, 0, 2):
return true;
default:
return false;
@@ -778,46 +795,29 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret;
}
-static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t asd_mc, uint32_t size)
+static int psp_asd_load(struct psp_context *psp)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
- cmd->cmd.cmd_load_ta.app_len = size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
- cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
+ return psp_ta_load(psp, &psp->asd_context);
}
-static int psp_asd_load(struct psp_context *psp)
+static int psp_asd_initialize(struct psp_context *psp)
{
int ret;
- struct psp_gfx_cmd_resp *cmd;
/* If PSP version doesn't match ASD version, asd loading will be failed.
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0;
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes);
-
- psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->asd.size_bytes);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
- if (!ret) {
- psp->asd_context.asd_initialized = true;
- psp->asd_context.session_id = cmd->resp.session_id;
- }
+ psp->asd_context.mem_context.shared_mc_addr = 0;
+ psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
+ psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
- release_psp_cmd_buf(psp);
+ ret = psp_asd_load(psp);
+ if (!ret)
+ psp->asd_context.initialized = true;
return ret;
}
@@ -829,27 +829,39 @@ static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_unload_ta.session_id = session_id;
}
+static int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
static int psp_asd_unload(struct psp_context *psp)
{
+ return psp_ta_unload(psp, &psp->asd_context);
+}
+
+static int psp_asd_terminate(struct psp_context *psp)
+{
int ret;
- struct psp_gfx_cmd_resp *cmd;
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->asd_context.asd_initialized)
+ if (!psp->asd_context.initialized)
return 0;
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
+ ret = psp_asd_unload(psp);
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
if (!ret)
- psp->asd_context.asd_initialized = false;
-
- release_psp_cmd_buf(psp);
+ psp->asd_context.initialized = false;
return ret;
}
@@ -885,23 +897,22 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t ta_bin_mc,
- uint32_t ta_bin_size,
- uint64_t ta_shared_mc,
- uint32_t ta_shared_size)
+ struct ta_context *context)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd_id = context->ta_load_type;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
- cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
+ cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
- cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
}
static int psp_ta_init_shared_buf(struct psp_context *psp,
- struct ta_mem_context *mem_ctx,
- uint32_t shared_mem_size)
+ struct ta_mem_context *mem_ctx)
{
int ret;
@@ -909,8 +920,8 @@ static int psp_ta_init_shared_buf(struct psp_context *psp,
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for ta to host memory
*/
- ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
@@ -926,8 +937,7 @@ static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context,
- PSP_XGMI_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
}
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@@ -941,12 +951,12 @@ static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
static int psp_ta_invoke(struct psp_context *psp,
uint32_t ta_cmd_id,
- uint32_t session_id)
+ struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
+ psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -956,31 +966,23 @@ static int psp_ta_invoke(struct psp_context *psp,
return ret;
}
-static int psp_xgmi_load(struct psp_context *psp)
+static int psp_ta_load(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
- /*
- * TODO: bypass the loading in sriov for now
- */
-
cmd = acquire_psp_cmd_buf(psp);
- psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes);
+ psp_copy_fw(psp, context->bin_desc.start_addr,
+ context->bin_desc.size_bytes);
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->xgmi.size_bytes,
- psp->xgmi_context.context.mem_context.shared_mc_addr,
- PSP_XGMI_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->xgmi_context.context.initialized = true;
- psp->xgmi_context.context.session_id = cmd->resp.session_id;
+ context->session_id = cmd->resp.session_id;
}
release_psp_cmd_buf(psp);
@@ -988,41 +990,31 @@ static int psp_xgmi_load(struct psp_context *psp)
return ret;
}
-static int psp_xgmi_unload(struct psp_context *psp)
+static int psp_xgmi_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
- struct amdgpu_device *adev = psp->adev;
-
- /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
- if (adev->asic_type == CHIP_ARCTURUS ||
- (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu))
- return 0;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
+ return psp_ta_load(psp, &psp->xgmi_context.context);
+}
- return ret;
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+ return psp_ta_unload(psp, &psp->xgmi_context.context);
}
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
}
int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
+ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ adev->gmc.xgmi.connected_to_cpu))
+ return 0;
if (!psp->xgmi_context.context.initialized)
return 0;
@@ -1045,13 +1037,16 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
int ret;
if (!psp->ta_fw ||
- !psp->xgmi.size_bytes ||
- !psp->xgmi.start_addr)
+ !psp->xgmi_context.context.bin_desc.size_bytes ||
+ !psp->xgmi_context.context.bin_desc.start_addr)
return -ENOENT;
if (!load_ta)
goto invoke;
+ psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
+ psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->xgmi_context.context.initialized) {
ret = psp_xgmi_init_shared_buf(psp);
if (ret)
@@ -1060,7 +1055,9 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
/* Load XGMI TA */
ret = psp_xgmi_load(psp);
- if (ret)
+ if (!ret)
+ psp->xgmi_context.context.initialized = true;
+ else
return ret;
invoke:
@@ -1117,8 +1114,8 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
{
- return psp->adev->asic_type == CHIP_ALDEBARAN &&
- psp->xgmi.feature_version >= 0x2000000b;
+ return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b;
}
/*
@@ -1282,80 +1279,40 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context,
- PSP_RAS_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
}
static int psp_ras_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
- struct ta_ras_shared_memory *ras_cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
-
- if (psp->adev->gmc.xgmi.connected_to_cpu)
- ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
- else
- ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->ras.size_bytes,
- psp->ras_context.context.mem_context.shared_mc_addr,
- PSP_RAS_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->ras_context.context.session_id = cmd->resp.session_id;
-
- if (!ras_cmd->ras_status)
- psp->ras_context.context.initialized = true;
- else
- dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
- }
-
- release_psp_cmd_buf(psp);
-
- if (ret || ras_cmd->ras_status)
- amdgpu_ras_fini(psp->adev);
-
- return ret;
+ return psp_ta_load(psp, &psp->ras_context.context);
}
static int psp_ras_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
+ return psp_ta_unload(psp, &psp->ras_context.context);
+}
- release_psp_cmd_buf(psp);
+static void psp_ras_ta_check_status(struct psp_context *psp)
+{
+ struct ta_ras_shared_memory *ras_cmd =
+ (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
- return ret;
+ switch (ras_cmd->ras_status) {
+ case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case TA_RAS_STATUS__SUCCESS:
+ break;
+ default:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
}
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@@ -1371,7 +1328,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
if (amdgpu_ras_intr_triggered())
return ret;
@@ -1391,31 +1348,8 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev,
"RAS internal register access blocked\n");
- }
-
- return ret;
-}
-static int psp_ras_status_to_errno(struct amdgpu_device *adev,
- enum ta_ras_status ras_status)
-{
- int ret = -EINVAL;
-
- switch (ras_status) {
- case TA_RAS_STATUS__SUCCESS:
- ret = 0;
- break;
- case TA_RAS_STATUS__RESET_NEEDED:
- ret = -EAGAIN;
- break;
- case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
- dev_warn(adev->dev, "RAS WARN: ras function unavailable\n");
- break;
- case TA_RAS_STATUS__ERROR_ASD_READ_WRITE:
- dev_warn(adev->dev, "RAS WARN: asd read or write failed\n");
- break;
- default:
- dev_err(adev->dev, "RAS ERROR: ras function failed ret 0x%X\n", ret);
+ psp_ras_ta_check_status(psp);
}
return ret;
@@ -1444,7 +1378,7 @@ int psp_ras_enable_features(struct psp_context *psp,
if (ret)
return -EINVAL;
- return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status);
+ return 0;
}
static int psp_ras_terminate(struct psp_context *psp)
@@ -1477,6 +1411,7 @@ static int psp_ras_initialize(struct psp_context *psp)
int ret;
uint32_t boot_cfg = 0xFF;
struct amdgpu_device *adev = psp->adev;
+ struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the initialize in sriov for now
@@ -1484,8 +1419,8 @@ static int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(adev))
return 0;
- if (!adev->psp.ras.size_bytes ||
- !adev->psp.ras.start_addr) {
+ if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
+ !adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -1531,17 +1466,34 @@ static int psp_ras_initialize(struct psp_context *psp)
}
}
+ psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
+ psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->ras_context.context.initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
return ret;
}
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
+
ret = psp_ras_load(psp);
- if (ret)
- return ret;
- return 0;
+ if (!ret && !ras_cmd->ras_status)
+ psp->ras_context.context.initialized = true;
+ else {
+ if (ras_cmd->ras_status)
+ dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+ amdgpu_ras_fini(psp->adev);
+ }
+
+ return ret;
}
int psp_ras_trigger_error(struct psp_context *psp,
@@ -1568,51 +1520,24 @@ int psp_ras_trigger_error(struct psp_context *psp,
if (amdgpu_ras_intr_triggered())
return 0;
- return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status);
+ if (ras_cmd->ras_status)
+ return -EINVAL;
+
+ return 0;
}
// ras end
// HDCP start
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context,
- PSP_HDCP_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
}
static int psp_hdcp_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- psp_copy_fw(psp, psp->hdcp.start_addr,
- psp->hdcp.size_bytes);
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->hdcp.size_bytes,
- psp->hdcp_context.context.mem_context.shared_mc_addr,
- PSP_HDCP_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->hdcp_context.context.initialized = true;
- psp->hdcp_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->hdcp_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->hdcp_context.context);
}
+
static int psp_hdcp_initialize(struct psp_context *psp)
{
int ret;
@@ -1623,12 +1548,15 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp.size_bytes ||
- !psp->hdcp.start_addr) {
+ if (!psp->hdcp_context.context.bin_desc.size_bytes ||
+ !psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
+ psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
+ psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->hdcp_context.context.initialized) {
ret = psp_hdcp_init_shared_buf(psp);
if (ret)
@@ -1636,32 +1564,17 @@ static int psp_hdcp_initialize(struct psp_context *psp)
}
ret = psp_hdcp_load(psp);
- if (ret)
- return ret;
+ if (!ret) {
+ psp->hdcp_context.context.initialized = true;
+ mutex_init(&psp->hdcp_context.mutex);
+ }
- return 0;
+ return ret;
}
static int psp_hdcp_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->hdcp_context.context);
}
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@@ -1672,7 +1585,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
static int psp_hdcp_terminate(struct psp_context *psp)
@@ -1709,42 +1622,12 @@ out:
// DTM start
static int psp_dtm_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context,
- PSP_DTM_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
}
static int psp_dtm_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->dtm.size_bytes,
- psp->dtm_context.context.mem_context.shared_mc_addr,
- PSP_DTM_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->dtm_context.context.initialized = true;
- psp->dtm_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->dtm_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->dtm_context.context);
}
static int psp_dtm_initialize(struct psp_context *psp)
@@ -1757,12 +1640,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm.size_bytes ||
- !psp->dtm.start_addr) {
+ if (!psp->dtm_context.context.bin_desc.size_bytes ||
+ !psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
+ psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
+ psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->dtm_context.context.initialized) {
ret = psp_dtm_init_shared_buf(psp);
if (ret)
@@ -1770,32 +1656,17 @@ static int psp_dtm_initialize(struct psp_context *psp)
}
ret = psp_dtm_load(psp);
- if (ret)
- return ret;
+ if (!ret) {
+ psp->dtm_context.context.initialized = true;
+ mutex_init(&psp->dtm_context.mutex);
+ }
- return 0;
+ return ret;
}
static int psp_dtm_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->dtm_context.context);
}
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@@ -1806,7 +1677,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
static int psp_dtm_terminate(struct psp_context *psp)
@@ -1843,50 +1714,17 @@ out:
// RAP start
static int psp_rap_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context,
- PSP_RAP_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
}
static int psp_rap_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->rap.size_bytes,
- psp->rap_context.context.mem_context.shared_mc_addr,
- PSP_RAP_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->rap_context.context.initialized = true;
- psp->rap_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->rap_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->rap_context.context);
}
static int psp_rap_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->rap_context.context);
}
static int psp_rap_initialize(struct psp_context *psp)
@@ -1900,12 +1738,15 @@ static int psp_rap_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->rap.size_bytes ||
- !psp->rap.start_addr) {
+ if (!psp->rap_context.context.bin_desc.size_bytes ||
+ !psp->rap_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0;
}
+ psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
+ psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->rap_context.context.initialized) {
ret = psp_rap_init_shared_buf(psp);
if (ret)
@@ -1913,16 +1754,15 @@ static int psp_rap_initialize(struct psp_context *psp)
}
ret = psp_rap_load(psp);
- if (ret)
+ if (!ret) {
+ psp->rap_context.context.initialized = true;
+ mutex_init(&psp->rap_context.mutex);
+ } else
return ret;
ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
if (ret || status != TA_RAP_STATUS__SUCCESS) {
- psp_rap_unload(psp);
-
- psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
-
- psp->rap_context.context.initialized = false;
+ psp_rap_terminate(psp);
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
ret, status);
@@ -1971,7 +1811,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
rap_cmd->cmd_id = ta_cmd_id;
rap_cmd->validation_method_id = METHOD_A;
- ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id);
+ ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
if (ret)
goto out_unlock;
@@ -1989,49 +1829,17 @@ out_unlock:
static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
{
return psp_ta_init_shared_buf(
- psp, &psp->securedisplay_context.context.mem_context,
- PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
+ psp, &psp->securedisplay_context.context.mem_context);
}
static int psp_securedisplay_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->securedisplay.size_bytes,
- psp->securedisplay_context.context.mem_context.shared_mc_addr,
- PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->securedisplay_context.context.initialized = true;
- psp->securedisplay_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->securedisplay_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->securedisplay_context.context);
}
static int psp_securedisplay_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->securedisplay_context.context);
}
static int psp_securedisplay_initialize(struct psp_context *psp)
@@ -2045,12 +1853,16 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->securedisplay.size_bytes ||
- !psp->securedisplay.start_addr) {
+ if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
+ !psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
return 0;
}
+ psp->securedisplay_context.context.mem_context.shared_mem_size =
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
+ psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->securedisplay_context.context.initialized) {
ret = psp_securedisplay_init_shared_buf(psp);
if (ret)
@@ -2058,7 +1870,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_securedisplay_load(psp);
- if (ret)
+ if (!ret) {
+ psp->securedisplay_context.context.initialized = true;
+ mutex_init(&psp->securedisplay_context.mutex);
+ } else
return ret;
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
@@ -2066,12 +1881,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
if (ret) {
- psp_securedisplay_unload(psp);
-
- psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
-
- psp->securedisplay_context.context.initialized = false;
-
+ psp_securedisplay_terminate(psp);
dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
return -EINVAL;
}
@@ -2123,7 +1933,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
mutex_lock(&psp->securedisplay_context.mutex);
- ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
mutex_unlock(&psp->securedisplay_context.mutex);
@@ -2443,8 +2253,8 @@ static int psp_load_smu_fw(struct psp_context *psp)
if ((amdgpu_in_reset(adev) &&
ras && adev->ras_enabled &&
- (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_VEGA20))) {
+ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2541,8 +2351,9 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
continue;
if (psp->autoload_supported &&
- (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH) &&
+ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) &&
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
@@ -2629,7 +2440,7 @@ skip_memalloc:
if (ret)
goto failed;
- ret = psp_asd_load(psp);
+ ret = psp_asd_initialize(psp);
if (ret) {
DRM_ERROR("PSP load asd failed!\n");
return ret;
@@ -2721,7 +2532,7 @@ static int psp_hw_fini(void *handle)
psp_hdcp_terminate(psp);
}
- psp_asd_unload(psp);
+ psp_asd_terminate(psp);
psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -2779,9 +2590,9 @@ static int psp_suspend(void *handle)
}
}
- ret = psp_asd_unload(psp);
+ ret = psp_asd_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to unload asd\n");
+ DRM_ERROR("Failed to terminate asd\n");
return ret;
}
@@ -2826,12 +2637,18 @@ static int psp_resume(void *handle)
if (ret)
goto failed;
- ret = psp_asd_load(psp);
+ ret = psp_asd_initialize(psp);
if (ret) {
DRM_ERROR("PSP load asd failed!\n");
goto failed;
}
+ ret = psp_rl_load(adev);
+ if (ret) {
+ dev_err(adev->dev, "PSP load RL failed!\n");
+ goto failed;
+ }
+
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp, false, true);
/* Warning the XGMI seesion initialize failure
@@ -2994,10 +2811,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
goto out;
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
- adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd.start_addr = (uint8_t *)asd_hdr +
+ adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
+ adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
@@ -3129,7 +2946,8 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
ucode_array_start_addr = (uint8_t *)sos_hdr +
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
- if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) {
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) {
adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
@@ -3284,40 +3102,43 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd.fw_version = le32_to_cpu(desc->fw_version);
- psp->asd.feature_version = le32_to_cpu(desc->fw_version);
- psp->asd.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->asd.start_addr = ucode_start_addr;
+ psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
+ psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->asd_context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_XGMI:
- psp->xgmi.feature_version = le32_to_cpu(desc->fw_version);
- psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->xgmi.start_addr = ucode_start_addr;
+ psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAS:
- psp->ras.feature_version = le32_to_cpu(desc->fw_version);
- psp->ras.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->ras.start_addr = ucode_start_addr;
+ psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_HDCP:
- psp->hdcp.feature_version = le32_to_cpu(desc->fw_version);
- psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->hdcp.start_addr = ucode_start_addr;
+ psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_DTM:
- psp->dtm.feature_version = le32_to_cpu(desc->fw_version);
- psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->dtm.start_addr = ucode_start_addr;
+ psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAP:
- psp->rap.feature_version = le32_to_cpu(desc->fw_version);
- psp->rap.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->rap.start_addr = ucode_start_addr;
+ psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
- psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version);
- psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->securedisplay.start_addr = ucode_start_addr;
+ psp->securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(desc->fw_version);
+ psp->securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(desc->size_bytes);
+ psp->securedisplay_context.context.bin_desc.start_addr =
+ ucode_start_addr;
break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
@@ -3478,7 +3299,7 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size
{
int idx;
- if (!drm_dev_enter(&psp->adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8ef2d28af92a..f29afabbff1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -34,17 +34,20 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
-#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
-#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
-#define PSP_DTM_SHARED_MEM_SIZE 0x4000
-#define PSP_RAP_SHARED_MEM_SIZE 0x4000
-#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000
-#define PSP_SHARED_MEM_SIZE 0x4000
#define PSP_FW_NAME_LEN 0x24
+enum psp_shared_mem_size {
+ PSP_ASD_SHARED_MEM_SIZE = 0x0,
+ PSP_XGMI_SHARED_MEM_SIZE = 0x4000,
+ PSP_RAS_SHARED_MEM_SIZE = 0x4000,
+ PSP_HDCP_SHARED_MEM_SIZE = 0x4000,
+ PSP_DTM_SHARED_MEM_SIZE = 0x4000,
+ PSP_RAP_SHARED_MEM_SIZE = 0x4000,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000,
+};
+
struct psp_context;
struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
@@ -131,21 +134,26 @@ struct psp_xgmi_topology_info {
struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
-struct psp_asd_context {
- bool asd_initialized;
- uint32_t session_id;
+struct psp_bin_desc {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t size_bytes;
+ uint8_t *start_addr;
};
struct ta_mem_context {
struct amdgpu_bo *shared_bo;
uint64_t shared_mc_addr;
void *shared_buf;
+ enum psp_shared_mem_size shared_mem_size;
};
struct ta_context {
bool initialized;
uint32_t session_id;
struct ta_mem_context mem_context;
+ struct psp_bin_desc bin_desc;
+ enum psp_gfx_cmd_id ta_load_type;
};
struct ta_cp_context {
@@ -263,13 +271,6 @@ struct psp_runtime_boot_cfg_entry {
uint32_t reserved;
};
-struct psp_bin_desc {
- uint32_t fw_version;
- uint32_t feature_version;
- uint32_t size_bytes;
- uint8_t *start_addr;
-};
-
struct psp_context
{
struct amdgpu_device *adev;
@@ -301,7 +302,6 @@ struct psp_context
/* asd firmware */
const struct firmware *asd_fw;
- struct psp_bin_desc asd;
/* toc firmware */
const struct firmware *toc_fw;
@@ -326,14 +326,8 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
uint32_t ta_fw_version;
- struct psp_bin_desc xgmi;
- struct psp_bin_desc ras;
- struct psp_bin_desc hdcp;
- struct psp_bin_desc dtm;
- struct psp_bin_desc rap;
- struct psp_bin_desc securedisplay;
-
- struct psp_asd_context asd_context;
+
+ struct ta_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras_context;
struct ta_cp_context hdcp_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 96a8fd0ca1df..08133de21fdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -35,7 +35,11 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "atom.h"
+#ifdef CONFIG_X86_MCE_AMD
+#include <asm/mce.h>
+static bool notifier_registered;
+#endif
static const char *RAS_FS_NAME = "ras";
const char *ras_error_string[] = {
@@ -61,8 +65,30 @@ const char *ras_block_string[] = {
"mp0",
"mp1",
"fuse",
+ "mca",
};
+const char *ras_mca_block_string[] = {
+ "mca_mp0",
+ "mca_mp1",
+ "mca_mpio",
+ "mca_iohc",
+};
+
+const char *get_ras_block_str(struct ras_common_if *ras_block)
+{
+ if (!ras_block)
+ return "NULL";
+
+ if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return "OUT OF RANGE";
+
+ if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
+ return ras_mca_block_string[ras_block->sub_block_index];
+
+ return ras_block_string[ras_block->block];
+}
+
#define ras_err_str(i) (ras_error_string[ffs(i)])
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@@ -85,6 +111,14 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+#ifdef CONFIG_X86_MCE_AMD
+static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+struct mce_notifier_adev_list {
+ struct amdgpu_device *devs[MAX_GPU_INSTANCE];
+ int num_gpu;
+};
+static struct mce_notifier_adev_list mce_adev_list;
+#endif
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
{
@@ -187,7 +221,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
*block_id = i;
- if (strcmp(name, ras_block_str(i)) == 0)
+ if (strcmp(name, ras_block_string[i]) == 0)
return 0;
}
return -EINVAL;
@@ -509,7 +543,6 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
-
if (obj->adev->asic_type == CHIP_ALDEBARAN) {
if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
DRM_WARN("Failed to reset error counter and error status");
@@ -529,7 +562,7 @@ static inline void put_obj(struct ras_manager *obj)
if (obj && (--obj->use == 0))
list_del(&obj->node);
if (obj && (obj->use < 0))
- DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
}
/* make one obj and return it. */
@@ -545,7 +578,14 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
return NULL;
- obj = &con->objs[head->block];
+ if (head->block == AMDGPU_RAS_BLOCK__MCA) {
+ if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
+ return NULL;
+
+ obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
+ } else
+ obj = &con->objs[head->block];
+
/* already exist. return obj? */
if (alive_obj(obj))
return NULL;
@@ -573,19 +613,21 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
return NULL;
- obj = &con->objs[head->block];
+ if (head->block == AMDGPU_RAS_BLOCK__MCA) {
+ if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
+ return NULL;
+
+ obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
+ } else
+ obj = &con->objs[head->block];
- if (alive_obj(obj)) {
- WARN_ON(head->block != obj->head.block);
+ if (alive_obj(obj))
return obj;
- }
} else {
- for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
obj = &con->objs[i];
- if (alive_obj(obj)) {
- WARN_ON(i != obj->head.block);
+ if (alive_obj(obj))
return obj;
- }
}
}
@@ -626,8 +668,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
*/
if (!amdgpu_ras_is_feature_allowed(adev, head))
return 0;
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
- return 0;
if (enable) {
if (!obj) {
@@ -678,19 +718,14 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
- /* Are we alerady in that state we are going to set? */
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
- ret = 0;
- goto out;
- }
if (!amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
- dev_err(adev->dev, "ras %s %s failed %d\n",
+ dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
+ get_ras_block_str(head),
+ amdgpu_ras_is_poison_mode_supported(adev), ret);
goto out;
}
}
@@ -731,7 +766,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (!ret)
dev_info(adev->dev,
"RAS INFO: %s setup object\n",
- ras_block_str(head->block));
+ get_ras_block_str(head));
}
} else {
/* setup the object then issue a ras TA disable cmd.*/
@@ -781,17 +816,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
bool bypass)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
int i;
- const enum amdgpu_ras_error_type default_ras_type =
- AMDGPU_RAS_ERROR__NONE;
+ const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
- for (i = 0; i < ras_block_count; i++) {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
struct ras_common_if head = {
.block = i,
.type = default_ras_type,
.sub_block_index = 0,
};
+
+ if (i == AMDGPU_RAS_BLOCK__MCA)
+ continue;
+
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .type = default_ras_type,
+ .sub_block_index = i,
+ };
+
if (bypass) {
/*
* bypass psp. vbios enable ras for us.
@@ -809,6 +866,32 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
}
/* feature ctl end */
+
+void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_err_data *err_data)
+{
+ switch (ras_block->sub_block_index) {
+ case AMDGPU_RAS_MCA_BLOCK__MP0:
+ if (adev->mca.mp0.ras_funcs &&
+ adev->mca.mp0.ras_funcs->query_ras_error_count)
+ adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_MCA_BLOCK__MP1:
+ if (adev->mca.mp1.ras_funcs &&
+ adev->mca.mp1.ras_funcs->query_ras_error_count)
+ adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_MCA_BLOCK__MPIO:
+ if (adev->mca.mpio.ras_funcs &&
+ adev->mca.mpio.ras_funcs->query_ras_error_count)
+ adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
+ break;
+ default:
+ break;
+ }
+}
+
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
@@ -872,6 +955,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->hdp.ras_funcs->query_ras_error_count)
adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__MCA:
+ amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
+ break;
default:
break;
}
@@ -893,13 +979,13 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
obj->err_data.ce_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
} else {
dev_info(adev->dev, "%ld correctable hardware errors "
"detected in %s block, no user "
"action is needed.\n",
obj->err_data.ce_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
}
}
if (err_data.ue_count) {
@@ -912,15 +998,18 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
obj->err_data.ue_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
} else {
dev_info(adev->dev, "%ld uncorrectable hardware errors "
"detected in %s block\n",
obj->err_data.ue_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
}
}
+ if (!amdgpu_persistent_edc_harvesting_supported(adev))
+ amdgpu_ras_reset_error_status(adev, info->head.block);
+
return 0;
}
@@ -1027,6 +1116,7 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
case AMDGPU_RAS_BLOCK__SDMA:
case AMDGPU_RAS_BLOCK__MMHUB:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ case AMDGPU_RAS_BLOCK__MCA:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
@@ -1034,13 +1124,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
default:
dev_info(adev->dev, "%s error injection is not supported yet\n",
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
ret = -EINVAL;
}
if (ret)
dev_err(adev->dev, "ras inject %s failed %d\n",
- ras_block_str(info->head.block), ret);
+ get_ras_block_str(&info->head), ret);
return ret;
}
@@ -1383,7 +1473,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
(obj->attr_inuse == 1)) {
sprintf(fs_info.debugfs_name, "%s_err_inject",
- ras_block_str(obj->head.block));
+ get_ras_block_str(&obj->head));
fs_info.head = obj->head;
amdgpu_ras_debugfs_create(adev, &fs_info, dir);
}
@@ -1469,22 +1559,28 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
data->rptr = (data->aligned_element_size +
data->rptr) % data->ring_size;
- /* Let IP handle its data, maybe we need get the output
- * from the callback to udpate the error type/count, etc
- */
if (data->cb) {
- ret = data->cb(obj->adev, &err_data, &entry);
- /* ue will trigger an interrupt, and in that case
- * we need do a reset to recovery the whole system.
- * But leave IP do that recovery, here we just dispatch
- * the error.
- */
- if (ret == AMDGPU_RAS_SUCCESS) {
- /* these counts could be left as 0 if
- * some blocks do not count error number
+ if (amdgpu_ras_is_poison_mode_supported(obj->adev) &&
+ obj->head.block == AMDGPU_RAS_BLOCK__UMC)
+ dev_info(obj->adev->dev,
+ "Poison is created, no user action is needed.\n");
+ else {
+ /* Let IP handle its data, maybe we need get the output
+ * from the callback to udpate the error type/count, etc
+ */
+ ret = data->cb(obj->adev, &err_data, &entry);
+ /* ue will trigger an interrupt, and in that case
+ * we need do a reset to recovery the whole system.
+ * But leave IP do that recovery, here we just dispatch
+ * the error.
*/
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
+ if (ret == AMDGPU_RAS_SUCCESS) {
+ /* these counts could be left as 0 if
+ * some blocks do not count error number
+ */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
}
}
}
@@ -2014,6 +2110,11 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
}
+#ifdef CONFIG_X86_MCE_AMD
+ if ((adev->asic_type == CHIP_ALDEBARAN) &&
+ (adev->gmc.xgmi.connected_to_cpu))
+ amdgpu_register_bad_pages_mca_notifier(adev);
+#endif
return 0;
free:
@@ -2056,19 +2157,6 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
}
/* recovery end */
-/* return 0 if ras will reset gpu and repost.*/
-int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
- unsigned int block)
-{
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-
- if (!ras)
- return -EINVAL;
-
- ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
- return 0;
-}
-
static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
return adev->asic_type == CHIP_VEGA10 ||
@@ -2176,12 +2264,14 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int r;
+ bool df_poison, umc_poison;
if (con)
return 0;
con = kmalloc(sizeof(struct amdgpu_ras) +
- sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
+ sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
+ sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
GFP_KERNEL|__GFP_ZERO);
if (!con)
return -ENOMEM;
@@ -2245,6 +2335,23 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ /* Init poison supported flag, the default value is false */
+ if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras_funcs->query_ras_poison_mode(adev);
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
goto release_con;
@@ -2288,6 +2395,16 @@ static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
return 0;
}
+bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->poison_supported;
+}
+
/* helper function to handle common stuff in ip late init phase */
int amdgpu_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
@@ -2306,12 +2423,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
if (r) {
- if (r == -EAGAIN) {
- /* request gpu reset. will run again */
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- return 0;
- } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
+ if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */
goto cleanup;
@@ -2403,19 +2515,6 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
}
}
}
-
- if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
- con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
- /* setup ras obj state as disabled.
- * for init_by_vbios case.
- * if we want to enable ras, just enable it in a normal way.
- * If we want do disable it, need setup ras obj as enabled,
- * then issue another TA disable cmd.
- * See feature_enable_on_boot
- */
- amdgpu_ras_disable_all_features(adev, 1);
- amdgpu_ras_reset_gpu(adev);
- }
}
void amdgpu_ras_suspend(struct amdgpu_device *adev)
@@ -2507,3 +2606,136 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev)
kfree(con);
}
}
+
+#ifdef CONFIG_X86_MCE_AMD
+static struct amdgpu_device *find_adev(uint32_t node_id)
+{
+ int i;
+ struct amdgpu_device *adev = NULL;
+
+ for (i = 0; i < mce_adev_list.num_gpu; i++) {
+ adev = mce_adev_list.devs[i];
+
+ if (adev && adev->gmc.xgmi.connected_to_cpu &&
+ adev->gmc.xgmi.physical_node_id == node_id)
+ break;
+ adev = NULL;
+ }
+
+ return adev;
+}
+
+#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
+#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
+#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
+#define GPU_ID_OFFSET 8
+
+static int amdgpu_bad_page_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct mce *m = (struct mce *)data;
+ struct amdgpu_device *adev = NULL;
+ uint32_t gpu_id = 0;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst, channel_index = 0;
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct eeprom_table_record err_rec;
+ uint64_t retired_page;
+
+ /*
+ * If the error was generated in UMC_V2, which belongs to GPU UMCs,
+ * and error occurred in DramECC (Extended error code = 0) then only
+ * process the error, else bail out.
+ */
+ if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) &&
+ (XEC(m->status, 0x3f) == 0x0)))
+ return NOTIFY_DONE;
+
+ /*
+ * If it is correctable error, return.
+ */
+ if (mce_is_correctable(m))
+ return NOTIFY_OK;
+
+ /*
+ * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
+ */
+ gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
+
+ adev = find_adev(gpu_id);
+ if (!adev) {
+ DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
+ gpu_id);
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * If it is uncorrectable error, then find out UMC instance and
+ * channel index.
+ */
+ umc_inst = GET_UMC_INST(m->ipid);
+ ch_inst = GET_CHAN_INDEX(m->ipid);
+
+ dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
+ umc_inst, ch_inst);
+
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+
+ /*
+ * Translate UMC channel address to Physical address
+ */
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
+ + ch_inst];
+
+ retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(m->addr);
+
+ err_rec.address = m->addr;
+ err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec.ts = (uint64_t)ktime_get_real_seconds();
+ err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec.cu = 0;
+ err_rec.mem_channel = channel_index;
+ err_rec.mcumc_id = umc_inst;
+
+ err_data.err_addr = &err_rec;
+ err_data.err_addr_cnt = 1;
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+ err_data.err_addr_cnt);
+ amdgpu_ras_save_bad_pages(adev);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block amdgpu_bad_page_nb = {
+ .notifier_call = amdgpu_bad_page_notifier,
+ .priority = MCE_PRIO_UC,
+};
+
+static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ /*
+ * Add the adev to the mce_adev_list.
+ * During mode2 reset, amdgpu device is temporarily
+ * removed from the mgpu_info list which can cause
+ * page retirement to fail.
+ * Use this list instead of mgpu_info to find the amdgpu
+ * device on which the UMC error was reported.
+ */
+ mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
+
+ /*
+ * Register the x86 notifier only once
+ * with MCE subsystem.
+ */
+ if (notifier_registered == false) {
+ mce_register_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = true;
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index eae604fd90b8..e36f4de9fa55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -32,7 +32,6 @@
#include "amdgpu_ras_eeprom.h"
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
-#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1)
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -49,15 +48,22 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MP0,
AMDGPU_RAS_BLOCK__MP1,
AMDGPU_RAS_BLOCK__FUSE,
- AMDGPU_RAS_BLOCK__MPIO,
+ AMDGPU_RAS_BLOCK__MCA,
AMDGPU_RAS_BLOCK__LAST
};
-extern const char *ras_block_string[];
+enum amdgpu_ras_mca_block {
+ AMDGPU_RAS_MCA_BLOCK__MP0 = 0,
+ AMDGPU_RAS_MCA_BLOCK__MP1,
+ AMDGPU_RAS_MCA_BLOCK__MPIO,
+ AMDGPU_RAS_MCA_BLOCK__IOHC,
+
+ AMDGPU_RAS_MCA_BLOCK__LAST
+};
-#define ras_block_str(i) (ras_block_string[i])
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
+#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
enum amdgpu_ras_gfx_subblock {
@@ -345,6 +351,9 @@ struct amdgpu_ras {
/* disable ras error count harvest in recovery */
bool disable_ras_err_cnt_harvest;
+ /* is poison mode supported */
+ bool poison_supported;
+
/* RAS count errors delayed work */
struct delayed_work ras_counte_delay_work;
atomic_t ras_ue_count;
@@ -488,8 +497,6 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
-int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
- unsigned int block);
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
@@ -544,6 +551,8 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__MP1;
case AMDGPU_RAS_BLOCK__FUSE:
return TA_RAS_BLOCK__FUSE;
+ case AMDGPU_RAS_BLOCK__MCA:
+ return TA_RAS_BLOCK__MCA;
default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC;
@@ -638,4 +647,8 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev);
int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev);
+const char *get_ras_block_str(struct ras_common_if *ras_block);
+
+bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 98732518543e..05117eda105b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -1077,6 +1077,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
if (res)
DRM_ERROR("RAS table incorrect checksum or error:%d\n",
res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_recs,
+ ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
res = __verify_ras_table_checksum(control);
@@ -1098,11 +1105,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
- *exceed_err_limit = true;
- dev_err(adev->dev,
- "RAS records:%d exceed threshold:%d, "
- "maybe retire this GPU?",
+ dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
control->ras_num_recs, ras->bad_page_cnt_threshold);
+ if (amdgpu_bad_page_threshold == -2) {
+ dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2.");
+ res = 0;
+ } else {
+ *exceed_err_limit = true;
+ dev_err(adev->dev,
+ "RAS records:%d exceed threshold:%d, "
+ "GPU will not be initialized. Replace this GPU or increase the threshold",
+ control->ras_num_recs, ras->bad_page_cnt_threshold);
+ }
}
} else {
DRM_INFO("Creating a new EEPROM table");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 0554576d3695..ab2351ba9574 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -415,26 +415,20 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif
-int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
- struct dentry *ent, *root = minor->debugfs_root;
+ struct dentry *root = minor->debugfs_root;
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
+ debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
- ent = debugfs_create_file(name,
- S_IFREG | S_IRUGO, root,
- ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- i_size_write(ent->d_inode, ring->ring_size + 12);
- ring->ent = ent;
#endif
- return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index e713d31619fe..4d380e79752c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -36,8 +36,13 @@
#define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2
-#define AMDGPU_RING_PRIO_DEFAULT 1
-#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
+enum amdgpu_ring_priority_level {
+ AMDGPU_RING_PRIO_0,
+ AMDGPU_RING_PRIO_1,
+ AMDGPU_RING_PRIO_DEFAULT = 1,
+ AMDGPU_RING_PRIO_2,
+ AMDGPU_RING_PRIO_MAX
+};
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
@@ -248,10 +253,6 @@ struct amdgpu_ring {
bool has_compute_vm_bug;
bool no_scheduler;
int hw_prio;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
};
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
@@ -351,8 +352,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
-int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
-
+void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index b7d861ed5284..e9b45089a28a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -32,37 +32,9 @@
#include "amdgpu_sched.h"
#include "amdgpu_vm.h"
-int amdgpu_to_sched_priority(int amdgpu_priority,
- enum drm_sched_priority *prio)
-{
- switch (amdgpu_priority) {
- case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- *prio = DRM_SCHED_PRIORITY_HIGH;
- break;
- case AMDGPU_CTX_PRIORITY_HIGH:
- *prio = DRM_SCHED_PRIORITY_HIGH;
- break;
- case AMDGPU_CTX_PRIORITY_NORMAL:
- *prio = DRM_SCHED_PRIORITY_NORMAL;
- break;
- case AMDGPU_CTX_PRIORITY_LOW:
- case AMDGPU_CTX_PRIORITY_VERY_LOW:
- *prio = DRM_SCHED_PRIORITY_MIN;
- break;
- case AMDGPU_CTX_PRIORITY_UNSET:
- *prio = DRM_SCHED_PRIORITY_UNSET;
- break;
- default:
- WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
- enum drm_sched_priority priority)
+ int32_t priority)
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
@@ -89,7 +61,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
int fd,
unsigned ctx_id,
- enum drm_sched_priority priority)
+ int32_t priority)
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
@@ -124,7 +96,6 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
- enum drm_sched_priority priority;
int r;
/* First check the op, then the op's argument.
@@ -138,21 +109,22 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- r = amdgpu_to_sched_priority(args->in.priority, &priority);
- if (r)
- return r;
+ if (!amdgpu_ctx_priority_is_valid(args->in.priority)) {
+ WARN(1, "Invalid context priority %d\n", args->in.priority);
+ return -EINVAL;
+ }
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
r = amdgpu_sched_process_priority_override(adev,
args->in.fd,
- priority);
+ args->in.priority);
break;
case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
r = amdgpu_sched_context_priority_override(adev,
args->in.fd,
args->in.ctx_id,
- priority);
+ args->in.priority);
break;
default:
/* Impossible.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 94126dc39688..c875f1cdd2af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -41,6 +41,7 @@
#include <linux/swiotlb.h>
#include <linux/dma-buf.h>
#include <linux/sizes.h>
+#include <linux/module.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -59,6 +60,8 @@
#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
@@ -696,6 +699,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
true, NULL);
out_unlock:
mmap_read_unlock(mm);
+ if (r)
+ pr_debug("failed %d to get user pages 0x%lx\n", r, start);
+
mmput(mm);
return r;
@@ -894,7 +900,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
DRM_ERROR("failed to pin userptr\n");
return r;
}
- } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
if (!ttm->sg) {
struct dma_buf_attachment *attach;
struct sg_table *sgt;
@@ -1066,8 +1072,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- amdgpu_ttm_backend_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1121,6 +1125,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ pgoff_t i;
+ int ret;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt->userptr) {
@@ -1130,10 +1136,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
return 0;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0;
- return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = bdev->dev_mapping;
+
+ return 0;
}
/*
@@ -1147,6 +1160,9 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
+ pgoff_t i;
+
+ amdgpu_ttm_backend_unbind(bdev, ttm);
if (gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1155,9 +1171,12 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
return;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return;
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = NULL;
+
adev = amdgpu_ttm_adev(bdev);
return ttm_pool_free(&adev->mman.bdev.pool, ttm);
}
@@ -1185,8 +1204,8 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
- /* Set TTM_PAGE_FLAG_SG before populate but after create. */
- bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
+ bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
gtt = (void *)bo->ttm;
gtt->userptr = addr;
@@ -1222,7 +1241,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end)
+ unsigned long end, unsigned long *userptr)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long size;
@@ -1237,6 +1256,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
+ if (userptr)
+ *userptr = gtt->userptr;
return true;
}
@@ -2036,6 +2057,36 @@ error_free:
return r;
}
+/**
+ * amdgpu_ttm_evict_resources - evict memory buffers
+ * @adev: amdgpu device object
+ * @mem_type: evicted BO's memory type
+ *
+ * Evicts all @mem_type buffers on the lru list of the memory type.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
+{
+ struct ttm_resource_manager *man;
+
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ case TTM_PL_TT:
+ case AMDGPU_PL_GWS:
+ case AMDGPU_PL_GDS:
+ case AMDGPU_PL_OA:
+ man = ttm_manager_type(&adev->mman.bdev, mem_type);
+ break;
+ default:
+ DRM_ERROR("Trying to evict invalid memory type\n");
+ return -EINVAL;
+ }
+
+ return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 3205fd520060..7346ecff4438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -182,7 +182,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end);
+ unsigned long end, unsigned long *userptr);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
@@ -190,6 +190,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_resource *mem);
+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type);
void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index abd8469380e5..ca3350502618 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -416,10 +416,11 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_PSP;
default:
- DRM_ERROR("Unknown firmware load type\n");
+ if (!load_type)
+ return AMDGPU_FW_LOAD_DIRECT;
+ else
+ return AMDGPU_FW_LOAD_PSP;
}
-
- return AMDGPU_FW_LOAD_DIRECT;
}
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
@@ -508,7 +509,7 @@ static ssize_t show_##name(struct device *dev, \
struct drm_device *ddev = dev_get_drvdata(dev); \
struct amdgpu_device *adev = drm_to_adev(ddev); \
\
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
+ return sysfs_emit(buf, "0x%08x\n", adev->field); \
} \
static DEVICE_ATTR(name, mode, show_##name, NULL)
@@ -525,9 +526,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
-FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
@@ -572,6 +573,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
const struct mes_firmware_header_v1_0 *mes_hdr = NULL;
+ u8 *ucode_addr;
if (NULL == ucode->fw)
return 0;
@@ -588,94 +590,83 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
- (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES_DATA &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4;
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) {
- ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes) +
- le32_to_cpu(cp_hdr->jt_offset) * 4),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(cp_hdr->jt_offset) * 4;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_cntl;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_gpm;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_srm;
+ break;
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlc_iram_ucode;
+ break;
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlc_dram_ucode;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MES_DATA:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCU_ERAM:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
le32_to_cpu(dmcu_hdr->intv_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) {
- ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes) +
- le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) {
- ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
- memcpy(ucode->kaddr,
- (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
- ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
- ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
- ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
- ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
- ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
- ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
- memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
- le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA) {
- ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
- memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
- le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)),
- ucode->ucode_size);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCU_INTV:
+ ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(dmcu_hdr->intv_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCUB:
+ ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ default:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ }
+ } else {
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
}
+ memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index e5a75fb788dd..1f5fe2315236 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -48,6 +48,7 @@ struct amdgpu_umc_ras_funcs {
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+ bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
new file mode 100644
index 000000000000..919d9d401750
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/ioctl.h>
+
+/*
+ * MMIO debugfs IOCTL structure
+ */
+struct amdgpu_debugfs_regs2_iocdata {
+ __u32 use_srbm, use_grbm, pg_lock;
+ struct {
+ __u32 se, sh, instance;
+ } grbm;
+ struct {
+ __u32 me, pipe, queue, vmid;
+ } srbm;
+};
+
+/*
+ * MMIO debugfs state data (per file* handle)
+ */
+struct amdgpu_debugfs_regs2_data {
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ struct amdgpu_debugfs_regs2_iocdata id;
+};
+
+enum AMDGPU_DEBUGFS_REGS2_CMDS {
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+};
+
+#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d451c359606a..6f8de11a17f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
+
+static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
+ uint32_t size,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_bo *bo = NULL;
+ void *addr;
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo, NULL, &addr);
+ if (r)
+ return r;
+
+ if (adev->uvd.address_64_bit)
+ goto succ;
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ goto err;
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto err_pin;
+ r = amdgpu_bo_kmap(bo, &addr);
+ if (r)
+ goto err_kmap;
+succ:
+ amdgpu_bo_unreserve(bo);
+ *bo_ptr = bo;
+ return 0;
+err_kmap:
+ amdgpu_bo_unpin(bo);
+err_pin:
+err:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+ return r;
+}
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
@@ -302,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
+ r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
+ if (r)
+ return r;
+
switch (adev->asic_type) {
case CHIP_TONGA:
adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
@@ -324,6 +373,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
+ void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
int i, j;
drm_sched_entity_destroy(&adev->uvd.entity);
@@ -342,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
+ amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
release_firmware(adev->uvd.fw);
return 0;
@@ -403,7 +454,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
/* re-write 0 since err_event_athub will corrupt VCPU buffer */
if (in_ras_intr)
memset(adev->uvd.inst[j].saved_bo, 0, size);
@@ -436,7 +487,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.inst[i].saved_bo != NULL) {
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
@@ -449,7 +500,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
@@ -1080,23 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
-
- if (!ring->adev->uvd.address_64_bit) {
- struct ttm_operation_ctx ctx = { true, false };
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- goto err;
- }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
- goto err;
+ return r;
if (adev->asic_type >= CHIP_VEGA10) {
offset_idx = 1 + ring->me;
@@ -1147,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
}
+ amdgpu_bo_reserve(bo, true);
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
if (fence)
*fence = dma_fence_get(f);
@@ -1159,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
err_free:
amdgpu_job_free(job);
-
-err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
@@ -1173,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo *bo = adev->uvd.ib_bo;
uint32_t *msg;
- int r, i;
-
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &bo, NULL, (void **)&msg);
- if (r)
- return r;
+ int i;
+ msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
@@ -1199,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
msg[i] = cpu_to_le32(0x0);
return amdgpu_uvd_send_msg(ring, bo, true, fence);
+
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
@@ -1209,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &bo, NULL, (void **)&msg);
- if (r)
- return r;
+ if (direct) {
+ bo = adev->uvd.ib_bo;
+ } else {
+ r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
+ if (r)
+ return r;
+ }
+ msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
@@ -1223,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- return amdgpu_uvd_send_msg(ring, bo, direct, fence);
+ r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
+
+ if (!direct)
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+
+ return r;
}
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
@@ -1298,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence;
long r;
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ r = amdgpu_uvd_get_create_msg(ring, 1, &fence);
if (r)
goto error;
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ dma_fence_put(fence);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r < 0)
+ goto error;
+
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index edbb8194ee81..76ac9699885d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -68,6 +68,7 @@ struct amdgpu_uvd {
/* store image width to adjust nb memory state */
unsigned decode_image_width;
uint32_t keyselect;
+ struct amdgpu_bo *ib_bo;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 8e8dee9fac9f..688bef1649b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -82,7 +82,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
struct dma_fence **fence);
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
@@ -314,7 +313,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
@@ -441,12 +440,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ struct amdgpu_ib ib_msg;
struct dma_fence *f = NULL;
uint64_t addr;
int i, r;
@@ -456,9 +455,17 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
if (r)
return r;
- ib = &job->ibs[0];
+ memset(&ib_msg, 0, sizeof(ib_msg));
+ /* only one gpu page is needed, alloc +1 page to make addr aligned. */
+ r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ &ib_msg);
+ if (r)
+ goto err;
- addr = amdgpu_bo_gpu_offset(bo);
+ ib = &job->ibs[0];
+ /* let addr point to page boundary */
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -498,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
+ amdgpu_ib_free(ring->adev, &ib_msg, f);
if (r)
goto err;
@@ -1134,20 +1142,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
- if (r)
- return r;
-
- r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
+ r = amdgpu_vce_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -1163,7 +1164,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
+
+enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
+{
+ switch(ring) {
+ case 0:
+ return AMDGPU_RING_PRIO_0;
+ case 1:
+ return AMDGPU_RING_PRIO_1;
+ case 2:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index d6d83a3ec803..be4a6e773c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -71,5 +71,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
+enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 008a308a4eca..2658414c503d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -86,8 +86,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
fw_name = FIRMWARE_RAVEN2;
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
@@ -95,13 +96,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
else
fw_name = FIRMWARE_RAVEN;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(2, 5, 0):
fw_name = FIRMWARE_ARCTURUS;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(2, 2, 0):
if (adev->apu_flags & AMD_APU_IS_RENOIR)
fw_name = FIRMWARE_RENOIR;
else
@@ -111,58 +112,52 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(2, 6, 0):
fw_name = FIRMWARE_ALDEBARAN;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_NAVI10:
+ case IP_VERSION(2, 0, 0):
fw_name = FIRMWARE_NAVI10;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_NAVI14:
- fw_name = FIRMWARE_NAVI14;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case CHIP_NAVI12:
- fw_name = FIRMWARE_NAVI12;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case CHIP_SIENNA_CICHLID:
- fw_name = FIRMWARE_SIENNA_CICHLID;
+ case IP_VERSION(2, 0, 2):
+ if (adev->asic_type == CHIP_NAVI12)
+ fw_name = FIRMWARE_NAVI12;
+ else
+ fw_name = FIRMWARE_NAVI14;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_NAVY_FLOUNDER:
- fw_name = FIRMWARE_NAVY_FLOUNDER;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ fw_name = FIRMWARE_SIENNA_CICHLID;
+ else
+ fw_name = FIRMWARE_NAVY_FLOUNDER;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(3, 0, 2):
fw_name = FIRMWARE_VANGOGH;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(3, 0, 16):
fw_name = FIRMWARE_DIMGREY_CAVEFISH;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(3, 0, 33):
fw_name = FIRMWARE_BEIGE_GOBY;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(3, 1, 1):
fw_name = FIRMWARE_YELLOW_CARP;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
@@ -330,7 +325,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
if (!adev->vcn.inst[i].saved_bo)
return -ENOMEM;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
drm_dev_exit(idx);
}
@@ -354,7 +349,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
ptr = adev->vcn.inst[i].cpu_addr;
if (adev->vcn.inst[i].saved_bo != NULL) {
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
@@ -367,7 +362,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
@@ -541,15 +536,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
+ struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint64_t addr;
- void *msg = NULL;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -558,8 +552,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
- msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -576,9 +568,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+ amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -588,27 +578,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
-
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+ amdgpu_ib_free(adev, ib_msg, f);
return r;
}
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
- *bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+ memset(ib, 0, sizeof(*ib));
+ r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ ib);
if (r)
return r;
+ msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
@@ -630,19 +619,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
}
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
- *bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+ memset(ib, 0, sizeof(*ib));
+ r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ ib);
if (r)
return r;
+ msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000);
@@ -658,21 +648,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_ib ib;
long r;
- r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
+ r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
if (r)
goto error;
- r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
+ r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
if (r)
goto error;
@@ -688,8 +678,8 @@ error:
}
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+ struct amdgpu_ib *ib_msg,
+ struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
const unsigned int ib_size_dw = 64;
@@ -697,7 +687,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint64_t addr;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
@@ -706,7 +696,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
@@ -726,9 +715,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -738,31 +725,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
-
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_ib_free(adev, ib_msg, f);
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_ib ib;
long r;
- r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
+ r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
if (r)
goto error;
- r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
+ r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
if (r)
goto error;
@@ -809,7 +794,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
@@ -825,7 +810,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -863,7 +848,7 @@ err:
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
@@ -879,7 +864,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -918,21 +903,23 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_ib ib;
long r;
- r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_DIRECT,
+ &ib);
if (r)
return r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
if (r)
goto error;
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
if (r)
goto error;
@@ -943,9 +930,49 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
+ amdgpu_ib_free(adev, &ib, fence);
dma_fence_put(fence);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
+
+enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
+{
+ switch(ring) {
+ case 0:
+ return AMDGPU_RING_PRIO_0;
+ case 1:
+ return AMDGPU_RING_PRIO_1;
+ case 2:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
+
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
+{
+ int i;
+ unsigned int idx;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ break;
+ }
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d74c62b49795..bfa27ea94804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -308,4 +308,8 @@ int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
+
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ca058fbcccd4..04cf9b207e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -532,9 +532,12 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
+ adev->psp.asd_context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
+ adev->psp.ras_context.context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
+ adev->psp.xgmi_context.context.bin_desc.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
@@ -581,6 +584,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->encode_usage = 0;
vf2pf_info->decode_usage = 0;
+ vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
vf2pf_info->checksum =
amd_sriov_msg_checksum(
vf2pf_info, vf2pf_info->header.size, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6b15cad78de9..0e7dc23f78e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -800,7 +800,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries;
uint64_t addr;
- int r;
+ int r, idx;
/* Figure out our place in the hierarchy */
if (ancestor->parent) {
@@ -845,9 +845,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
return r;
}
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
r = vm->update_funcs->map_table(vmbo);
if (r)
- return r;
+ goto exit;
memset(&params, 0, sizeof(params));
params.adev = adev;
@@ -856,7 +859,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
- return r;
+ goto exit;
addr = 0;
if (ats_entries) {
@@ -872,7 +875,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags);
if (r)
- return r;
+ goto exit;
addr += ats_entries * 8;
}
@@ -895,10 +898,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags);
if (r)
- return r;
+ goto exit;
}
- return vm->update_funcs->commit(&params, NULL);
+ r = vm->update_funcs->commit(&params, NULL);
+exit:
+ drm_dev_exit(idx);
+ return r;
}
/**
@@ -1384,11 +1390,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- int r;
+ int r, idx;
if (list_empty(&vm->relocated))
return 0;
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
@@ -1396,7 +1405,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
- return r;
+ goto exit;
while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_bo_base *entry;
@@ -1414,10 +1423,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, &vm->last_update);
if (r)
goto error;
+ drm_dev_exit(idx);
return 0;
error:
amdgpu_vm_invalidate_pds(adev, vm);
+exit:
+ drm_dev_exit(idx);
return r;
}
@@ -1706,7 +1718,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
enum amdgpu_sync_mode sync_mode;
int r, idx;
- if (!drm_dev_enter(&adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
memset(&params, 0, sizeof(params));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index a434c71fde8e..7326b6c1b71c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -204,8 +204,10 @@ struct amd_sriov_msg_pf2vf_info {
} mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
/* UUID info */
struct amd_sriov_msg_uuid_info uuid_info;
+ /* pcie atomic Ops info */
+ uint32_t pcie_atomic_ops_enabled_flags;
/* reserved */
- uint32_t reserved[256 - 47];
+ uint32_t reserved[256 - 48];
};
struct amd_sriov_msg_vf2pf_info_header {
@@ -259,9 +261,10 @@ struct amd_sriov_msg_vf2pf_info {
uint8_t id;
uint32_t version;
} ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+ uint64_t dummy_page_addr;
/* reserved */
- uint32_t reserved[256-68];
+ uint32_t reserved[256-70];
};
/* mailbox message send from guest to host */
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 3ac505d954c4..ab6a07e5e8c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -77,10 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
athub_v2_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_v2_0_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index c12c2900732b..2edefd10e56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -70,11 +70,10 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c b/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c
deleted file mode 100644
index 608a113ce354..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "beige_goby_ip_offset.h"
-
-int beige_goby_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialize the block needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
deleted file mode 100644
index 58808814d8fb..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "cyan_skillfish_ip_offset.h"
-
-int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocke needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 14514a145c17..43c5e3ec9a39 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -637,6 +637,36 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
+static bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t hw_assert_msklo, hw_assert_mskhi;
+ uint32_t v0, v1, v28, v31;
+
+ hw_assert_msklo = RREG32_SOC15(DF, 0,
+ mmDF_CS_UMC_AON0_HardwareAssertMaskLow);
+ hw_assert_mskhi = RREG32_SOC15(DF, 0,
+ mmDF_NCS_PG0_HardwareAssertMaskHigh);
+
+ v0 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0);
+ v1 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1);
+ v28 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28);
+ v31 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31);
+
+ if (v0 && v1 && v28 && v31)
+ return true;
+ else if (!v0 && !v1 && !v28 && !v31)
+ return false;
+ else {
+ dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n",
+ v0, v1, v28, v31);
+ return false;
+ }
+}
+
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -651,4 +681,5 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
.set_fica = df_v3_6_set_fica,
+ .query_ras_poison_mode = df_v3_6_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 16dbe593cba2..90a834dc4008 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -270,25 +270,6 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin");
-static const struct soc15_reg_golden golden_settings_gc_10_0[] =
-{
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
- /* TA_GRAD_ADJ_UCONFIG -> TA_GRAD_ADJ */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382),
- /* VGT_TF_RING_SIZE_UMD -> VGT_TF_RING_SIZE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2262c24e),
- /* VGT_HS_OFFCHIP_PARAM_UMD -> VGT_HS_OFFCHIP_PARAM */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226cc24f),
- /* VGT_TF_MEMORY_BASE_UMD -> VGT_TF_MEMORY_BASE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226ec250),
- /* VGT_TF_MEMORY_BASE_HI_UMD -> VGT_TF_MEMORY_BASE_HI */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2278c261),
- /* VGT_ESGS_RING_SIZE_UMD -> VGT_ESGS_RING_SIZE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2232c240),
- /* VGT_GSVS_RING_SIZE_UMD -> VGT_GSVS_RING_SIZE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2233c241),
-};
-
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
@@ -1537,7 +1518,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
scratch_reg3 = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
spare_int = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+ mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
@@ -3727,18 +3708,18 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
@@ -3750,8 +3731,8 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
golden_settings_gc_10_1,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1));
@@ -3759,7 +3740,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_1,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
@@ -3767,7 +3748,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
@@ -3775,7 +3756,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 3, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3));
@@ -3783,35 +3764,32 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_3_sienna_cichlid,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_sienna_cichlid));
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(10, 3, 2):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_2,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_2));
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(10, 3, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_vangogh,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh));
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_3,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_3));
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(10, 3, 4):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_4,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(10, 3, 5):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_5,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
break;
- case CHIP_CYAN_SKILLFISH:
- soc15_program_register_sequence(adev,
- golden_settings_gc_10_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_10_0));
+ case IP_VERSION(10, 1, 3):
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_cyan_skillfish,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish));
@@ -3985,11 +3963,11 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.cp_fw_write_wait = false;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
if ((adev->gfx.me_fw_version >= 0x00000046) &&
(adev->gfx.me_feature_version >= 27) &&
(adev->gfx.pfp_fw_version >= 0x00000068) &&
@@ -3998,12 +3976,12 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 27))
adev->gfx.cp_fw_write_wait = true;
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.cp_fw_write_wait = true;
break;
default:
@@ -4066,8 +4044,8 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
@@ -4093,38 +4071,38 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
chip_name = "navi14";
if (!(adev->pdev->device == 0x7340 &&
adev->pdev->revision != 0x00))
wks = "_wks";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
chip_name = "navi12";
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 3, 0):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(10, 3, 2):
chip_name = "navy_flounder";
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(10, 3, 1):
chip_name = "vangogh";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(10, 3, 4):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(10, 3, 5):
chip_name = "beige_goby";
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
chip_name = "yellow_carp";
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(10, 1, 3):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
chip_name = "cyan_skillfish2";
else
@@ -4684,10 +4662,10 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4695,12 +4673,12 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4710,7 +4688,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config_fields.num_pkrs =
1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(10, 1, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4818,11 +4796,11 @@ static int gfx_v10_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -4830,12 +4808,12 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -5068,8 +5046,8 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
bitmap = i * adev->gfx.config.max_sh_per_se + j;
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
- (adev->asic_type == CHIP_YELLOW_CARP)) &&
+ if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
@@ -5096,7 +5074,7 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
/* for ASICs that integrates GFX v10.3
* pa_sc_tile_steering_override should be set to 0 */
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
return 0;
/* init num_sc */
@@ -5249,7 +5227,7 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
/* TCCs are global (not instanced). */
uint32_t tcc_disable;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
} else {
@@ -5326,7 +5304,7 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- if (adev->asic_type == CHIP_NAVI12) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
@@ -5948,7 +5926,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (adev->asic_type == CHIP_NAVI12) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
} else {
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
@@ -6337,13 +6315,13 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
}
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
}
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
@@ -6474,13 +6452,13 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
if (enable) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
break;
default:
@@ -6488,13 +6466,13 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
break;
}
} else {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
@@ -6586,13 +6564,13 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
@@ -7303,11 +7281,11 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
/* check if mmVGT_ESGS_RING_SIZE_UMD
* has been remapped to mmVGT_ESGS_RING_SIZE */
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid);
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0);
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
@@ -7320,8 +7298,8 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
return false;
}
break;
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
return true;
default:
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
@@ -7350,13 +7328,13 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
* index will auto-inc after each data writting */
WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
@@ -7520,19 +7498,19 @@ static int gfx_v10_0_hw_init(void *handle)
* init golden registers and rlc resume may override some registers,
* reconfig them here
*/
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14 ||
- adev->asic_type == CHIP_NAVI12)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))
gfx_v10_0_tcp_harvest(adev);
r = gfx_v10_0_cp_resume(adev);
if (r)
return r;
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev);
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
gfx_v10_3_set_power_brake_sequence(adev);
return r;
@@ -7584,7 +7562,7 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
@@ -7670,13 +7648,13 @@ static int gfx_v10_0_soft_reset(void *handle)
/* GRBM_STATUS2 */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET,
@@ -7726,9 +7704,9 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock, clock_lo, clock_hi, hi_check;
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
break;
@@ -7784,19 +7762,19 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
break;
default:
@@ -7848,13 +7826,13 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
/* wait for RLC_SAFE_MODE */
@@ -7884,13 +7862,13 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
break;
default:
@@ -8193,7 +8171,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d
mmCGTS_SA1_QUAD1_SM_CTRL_REG
};
- if (adev->asic_type == CHIP_NAVI12) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) {
reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
tcp_ctrl_regs_nv12[i];
@@ -8238,8 +8216,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG + CGLS === */
gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
- if ((adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_NAVI12))
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)))
gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS
@@ -8335,12 +8314,12 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
* Power/performance team will optimize it and might give a new value later.
*/
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
@@ -8399,18 +8378,18 @@ static int gfx_v10_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
amdgpu_gfx_off_ctrl(adev, enable);
break;
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
gfx_v10_cntl_pg(adev, enable);
amdgpu_gfx_off_ctrl(adev, enable);
break;
@@ -8428,16 +8407,16 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
gfx_v10_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -9541,19 +9520,19 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 3, 0):
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
break;
default:
@@ -9641,8 +9620,8 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
bitmap = i * adev->gfx.config.max_sh_per_se + j;
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
- (adev->asic_type == CHIP_YELLOW_CARP)) &&
+ if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
mask = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 025184a556ee..7f944bb11298 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -953,8 +953,8 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
@@ -962,7 +962,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_2_1,
ARRAY_SIZE(golden_settings_gc_9_2_1));
@@ -970,7 +970,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
@@ -978,12 +978,13 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_1_arct,
ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
@@ -995,12 +996,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
gfx_v9_4_2_init_golden_registers(adev,
adev->smuio.funcs->get_die_id(adev));
break;
@@ -1008,8 +1009,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
break;
}
- if ((adev->asic_type != CHIP_ARCTURUS) &&
- (adev->asic_type != CHIP_ALDEBARAN))
+ if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -1193,15 +1194,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->asic_type != CHIP_ARCTURUS) &&
+ if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
@@ -1212,7 +1213,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 42))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
@@ -1223,7 +1224,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
@@ -1234,7 +1235,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
@@ -1297,7 +1299,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{
- if ((adev->asic_type == CHIP_RENOIR) &&
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) &&
(adev->gfx.me_fw_version >= 0x000000a5) &&
(adev->gfx.me_feature_version >= 52))
return true;
@@ -1310,12 +1312,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
(adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) &&
@@ -1329,7 +1332,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
@@ -1553,9 +1556,9 @@ out:
static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_ALDEBARAN ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_RENOIR)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0))
return false;
return true;
@@ -1663,17 +1666,18 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
chip_name = "vega10";
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
chip_name = "vega12";
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
chip_name = "vega20";
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
@@ -1681,16 +1685,16 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "raven";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
chip_name = "arcturus";
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
if (adev->apu_flags & AMD_APU_IS_RENOIR)
chip_name = "renoir";
else
chip_name = "green_sardine";
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
chip_name = "aldebaran";
break;
default:
@@ -1794,7 +1798,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
always_on_cu_num = 4;
- else if (adev->asic_type == CHIP_VEGA12)
+ else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1))
always_on_cu_num = 8;
else
always_on_cu_num = 12;
@@ -1963,11 +1967,12 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
gfx_v9_0_init_lbpw(adev);
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
gfx_v9_4_init_lbpw(adev);
break;
default:
@@ -2142,8 +2147,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2151,7 +2156,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2160,7 +2165,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n");
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -2175,7 +2180,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
if (err)
return err;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2186,7 +2192,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -2197,7 +2203,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2207,7 +2213,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -2305,14 +2311,15 @@ static int gfx_v9_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->gfx.mec.num_mec = 2;
break;
default:
@@ -2596,8 +2603,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
{
uint32_t tmp;
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 1):
tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
DISABLE_BARRIER_WAITCNT, 1);
@@ -2932,7 +2939,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
- if (adev->asic_type != CHIP_RENOIR)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0))
pwr_10_0_gfxip_control_over_cgpg(adev, true);
}
}
@@ -3044,7 +3051,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- if (adev->asic_type == CHIP_VEGA12 ||
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
@@ -3157,14 +3164,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false);
else
gfx_v9_0_enable_lbpw(adev, true);
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
@@ -3959,8 +3967,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
{
u32 tmp;
- if (adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) &&
+ adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))
return;
tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
@@ -4000,7 +4008,7 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
@@ -4232,7 +4240,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
@@ -4582,7 +4590,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
@@ -4732,8 +4740,8 @@ static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
@@ -4767,7 +4775,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
}
/* requires IBs so do in late init after IB pool is initialized */
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
else
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
@@ -4895,7 +4903,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- if (adev->asic_type != CHIP_VEGA12)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4929,7 +4937,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- if (adev->asic_type != CHIP_VEGA12)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
@@ -5035,7 +5043,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
else
@@ -5161,9 +5169,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
@@ -5189,7 +5198,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
if (enable)
amdgpu_gfx_off_ctrl(adev, true);
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
@@ -5207,14 +5216,15 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -5256,7 +5266,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
@@ -7027,14 +7037,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
@@ -7045,17 +7056,18 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
adev->gds.gds_size = 0x10000;
break;
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
adev->gds.gds_size = 0x1000;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
/* aldebaran removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier
* semaphore.etc */
@@ -7066,24 +7078,25 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
break;
}
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 4, 0):
adev->gds.gds_compute_max_wave_id = 0x7ff;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->gds.gds_compute_max_wave_id = 0xfff;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
/* deprecated for Aldebaran, no usage at all */
adev->gds.gds_compute_max_wave_id = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 00a2b36a24b3..c4f37a161875 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -706,6 +706,11 @@ int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
+ /* Workaround for ALDEBARAN, skip GPRs init in GPU reset.
+ Will remove it once GPRs init algorithm works for all CU settings. */
+ if (amdgpu_in_reset(adev))
+ return 0;
+
gfx_v9_4_2_do_sgprs_init(adev);
gfx_v9_4_2_do_vgprs_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 1a374ec0514a..e80d1dc43079 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -506,8 +506,8 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
u32 max_num_physical_nodes = 0;
u32 max_physical_node_id = 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[XGMI_HWIP][0]) {
+ case IP_VERSION(4, 8, 0):
max_num_physical_nodes = 4;
max_physical_node_id = 3;
break;
@@ -544,7 +544,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->asic_type == CHIP_YELLOW_CARP) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index e47104a1f559..3ec5ff5a6dbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -133,7 +133,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
* the new fast GRBM interface.
*/
if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_SIENNA_CICHLID))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* to avoid a false ACK due to the new fast GRBM interface.
*/
if ((vmhub == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_SIENNA_CICHLID))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng, hub_ip);
@@ -657,8 +657,8 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[UMC_HWIP][0]) {
+ case IP_VERSION(8, 7, 0):
adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
@@ -674,9 +674,9 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 4, 0):
adev->mmhub.funcs = &mmhub_v2_3_funcs;
break;
default:
@@ -687,13 +687,13 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
break;
default:
@@ -800,23 +800,9 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
- if (amdgpu_gart_size == -1) {
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- default:
- adev->gmc.gart_size = 512ULL << 20;
- break;
- }
- } else
+ if (amdgpu_gart_size == -1)
+ adev->gmc.gart_size = 512ULL << 20;
+ else
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
@@ -871,17 +857,17 @@ static int gmc_v10_0_sw_init(void *handle)
adev->gmc.vram_vendor = vram_vendor;
}
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
@@ -989,21 +975,6 @@ static int gmc_v10_0_sw_fini(void *handle)
static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- break;
- default:
- break;
- }
}
/**
@@ -1162,8 +1133,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
if (r)
return r;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_YELLOW_CARP)
+ if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
return athub_v2_1_set_clockgating(adev, state);
else
return athub_v2_0_set_clockgating(adev, state);
@@ -1175,8 +1145,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
adev->mmhub.funcs->get_clockgating(adev, flags);
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_YELLOW_CARP)
+ if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
athub_v2_1_get_clockgating(adev, flags);
else
athub_v2_0_get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 0e81e03e9b49..0fe714f54cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -841,12 +841,12 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.mc_mask = 0xffffffffffULL;
- r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
dev_warn(adev->dev, "No suitable DMA available.\n");
return r;
}
- adev->need_swiotlb = drm_need_swiotlb(44);
+ adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v6_0_init_microcode(adev);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 5551359d5dfd..cb82404df534 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -579,7 +579,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* the new fast GRBM interface.
*/
if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_ALDEBARAN))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -597,26 +597,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
gfxhub_client_ids[cid],
cid);
} else {
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
mmhub_cid = mmhub_client_ids_vega10[cid][rw];
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 3, 0):
mmhub_cid = mmhub_client_ids_vega12[cid][rw];
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
mmhub_cid = mmhub_client_ids_vega20[cid][rw];
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 0):
mmhub_cid = mmhub_client_ids_raven[cid][rw];
break;
- case CHIP_RENOIR:
+ case IP_VERSION(1, 5, 0):
+ case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
break;
default:
@@ -694,7 +696,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
return false;
return ((vmhub == AMDGPU_MMHUB_0 ||
@@ -745,7 +747,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
hub = &adev->vmhub[vmhub];
if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20) {
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
@@ -808,7 +810,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* GRBM interface.
*/
if ((vmhub == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_ALDEBARAN))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng);
@@ -874,7 +876,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
* still need a second TLB flush after this.
*/
bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20);
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
/* 2 dwords flush + 8 dwords fence */
unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
@@ -1088,13 +1090,13 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if ((adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) &&
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
!(*flags & AMDGPU_PTE_SYSTEM) &&
mapping->bo_va->is_xgmi)
*flags |= AMDGPU_PTE_SNOOPED;
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
*flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
}
@@ -1108,9 +1110,10 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
} else {
u32 viewport;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
@@ -1118,9 +1121,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
@@ -1151,11 +1151,11 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[UMC_HWIP][0]) {
+ case IP_VERSION(6, 0, 0):
adev->umc.funcs = &umc_v6_0_funcs;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(6, 1, 1):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
@@ -1163,7 +1163,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(6, 1, 2):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
@@ -1171,7 +1171,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(6, 7, 0):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
@@ -1190,11 +1190,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 4, 1):
adev->mmhub.funcs = &mmhub_v9_4_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->mmhub.funcs = &mmhub_v1_7_funcs;
break;
default:
@@ -1205,14 +1205,14 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA20:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 4, 0):
adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
break;
default:
@@ -1233,8 +1233,9 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ /* is UMC the right IP to check for MCA? Maybe DF? */
+ switch (adev->ip_versions[UMC_HWIP][0]) {
+ case IP_VERSION(6, 7, 0):
if (!adev->gmc.xgmi.connected_to_cpu)
adev->mca.funcs = &mca_v3_0_funcs;
break;
@@ -1247,11 +1248,12 @@ static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
if (adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->asic_type == CHIP_ALDEBARAN) {
+ if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
adev->gmc.xgmi.supported = true;
adev->gmc.xgmi.connected_to_cpu =
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
@@ -1289,7 +1291,8 @@ static int gmc_v9_0_late_init(void *handle)
* Workaround performance drop issue with VBIOS enables partial
* writes, while disables HBM ECC for vega10.
*/
- if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
@@ -1393,17 +1396,18 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
/* set the gart size */
if (amdgpu_gart_size == -1) {
- switch (adev->asic_type) {
- case CHIP_VEGA10: /* all engines support GPUVM */
- case CHIP_VEGA12: /* all engines support GPUVM */
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
+ case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
default:
adev->gmc.gart_size = 512ULL << 20;
break;
- case CHIP_RAVEN: /* DCE SG support */
- case CHIP_RENOIR:
+ case IP_VERSION(9, 1, 0): /* DCE SG support */
+ case IP_VERSION(9, 2, 2): /* DCE SG support */
+ case IP_VERSION(9, 3, 0):
adev->gmc.gart_size = 1024ULL << 20;
break;
}
@@ -1464,7 +1468,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
*/
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_RAVEN)
+ if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
+ (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
@@ -1507,8 +1512,9 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
adev->num_vmhubs = 2;
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
@@ -1520,11 +1526,11 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.num_level > 1;
}
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->num_vmhubs = 2;
@@ -1539,7 +1545,7 @@ static int gmc_v9_0_sw_init(void *handle)
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
@@ -1555,7 +1561,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
if (r)
@@ -1622,8 +1628,8 @@ static int gmc_v9_0_sw_init(void *handle)
* for video processing.
*/
adev->vm_manager.first_kfd_vmid =
- (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
amdgpu_vm_manager_init(adev);
@@ -1649,12 +1655,12 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
if (amdgpu_sriov_vf(adev))
break;
fallthrough;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0));
@@ -1662,9 +1668,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
- case CHIP_VEGA12:
- break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 0):
/* TODO for renoir */
soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
@@ -1684,7 +1689,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_RAVEN) {
+ if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
+ (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
WARN_ON(adev->gmc.sdpif_register !=
RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 74b90cc2bf48..eecfb1545c1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,7 +49,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
return;
if (!ring || !ring->funcs->emit_wreg)
@@ -79,7 +79,7 @@ static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
return;
- if (adev->asic_type >= CHIP_ALDEBARAN)
+ if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0))
WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0);
else
/*read back hdp ras counter to reset it to 0 */
@@ -91,9 +91,10 @@ static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data;
- if (adev->asic_type == CHIP_VEGA10 ||
- adev->asic_type == CHIP_VEGA12 ||
- adev->asic_type == CHIP_RAVEN) {
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
@@ -135,8 +136,8 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[HDP_HWIP][0]) {
+ case IP_VERSION(4, 2, 1):
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 85967a5570cb..299de1d131d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -32,26 +32,6 @@
#include "vcn/vcn_2_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
-#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
-#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
-#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
-#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
-#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
-#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
-#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
-#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
-#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
-#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
-
-#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_0_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 15a344ed340f..1a03baa59755 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -24,6 +24,26 @@
#ifndef __JPEG_V2_0_H__
#define __JPEG_V2_0_H__
+#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
+#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
+#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
+#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
+#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
+#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
+#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
+#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
+#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
+#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 46096ad7f0d9..a29c86617fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -423,6 +423,42 @@ static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+/**
+ * jpeg_v2_6_dec_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14)));
+}
+
+/**
+ * jpeg_v2_6_dec_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
+}
+
static bool jpeg_v2_5_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -633,8 +669,8 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v2_0_dec_ring_nop,
- .insert_start = jpeg_v2_0_dec_ring_insert_start,
- .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .insert_start = jpeg_v2_6_dec_ring_insert_start,
+ .insert_end = jpeg_v2_6_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_jpeg_ring_begin_use,
.end_use = amdgpu_jpeg_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
index 058b65730a84..8f7107d392af 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -52,7 +52,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
.ras_fini = mca_v3_0_mp0_ras_fini,
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MP0,
+ .ras_block = AMDGPU_RAS_BLOCK__MCA,
+ .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0,
.sysfs_name = "mp0_err_count",
};
@@ -79,7 +80,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
.ras_fini = mca_v3_0_mp1_ras_fini,
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MP1,
+ .ras_block = AMDGPU_RAS_BLOCK__MCA,
+ .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1,
.sysfs_name = "mp1_err_count",
};
@@ -106,7 +108,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
.ras_fini = mca_v3_0_mpio_ras_fini,
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MPIO,
+ .ras_block = AMDGPU_RAS_BLOCK__MCA,
+ .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO,
.sysfs_name = "mpio_err_count",
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 7ded6b2f058e..25f8e93e5ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -153,18 +153,16 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(2, 1, 2):
mmhub_cid = mmhub_client_ids_beige_goby[cid][rw];
break;
default:
@@ -571,11 +569,10 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
return;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
break;
@@ -606,11 +603,10 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
}
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
if (def != data)
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
if (def1 != data1)
@@ -633,11 +629,10 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
return;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
break;
default:
@@ -651,11 +646,10 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
if (def != data) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
break;
default:
@@ -671,14 +665,12 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
mmhub_v2_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v2_0_update_medium_grain_light_sleep(adev,
@@ -698,11 +690,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 88e457a150e0..a11d60ec6321 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -90,9 +90,9 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 530011622801..1d8414c3fadb 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -107,7 +107,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
{
u32 ih_cntl, ih_rb_cntl;
- if (adev->asic_type < CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3))
return;
ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
@@ -332,13 +332,10 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih[0]->use_bus_addr) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[OSSSYS_HWIP][0]) {
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
ih_chicken = REG_SET_FIELD(ih_chicken,
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
deleted file mode 100644
index 88efaecf9f70..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "navi10_ip_offset.h"
-
-int navi10_reg_base_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
- }
-
- return 0;
-}
-
-
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
deleted file mode 100644
index a786d159e5e9..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "navi12_ip_offset.h"
-
-int navi12_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocks needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
deleted file mode 100644
index 4ea1e8fbb601..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "navi14_ip_offset.h"
-
-int navi14_reg_base_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b184b656b9b6..4ecd2b5808ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -53,6 +53,16 @@
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+
static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -318,6 +328,27 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
+const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc = {
+ .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
+ .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
+ .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
+ .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+ .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK,
+ .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK,
+ .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK,
+};
+
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index a43b60acf7f6..6074dd3a1ed8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -27,6 +27,7 @@
#include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f50045cebd44..b8bd03d16dba 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -56,12 +56,15 @@
* These are nbio v7_4_1 registers mask. Temporarily define these here since
* nbio v7_4_1 header is incomplete.
*/
-#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
@@ -334,12 +337,27 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
- .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
- .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
- .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
- .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
- .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
- .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+};
+
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
+ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
+ .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
+ .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
+ .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+ .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK,
+ .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK,
+ .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK,
};
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
@@ -387,13 +405,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
"errors detected in %s block, "
"no user action is needed.\n",
obj->err_data.ce_count,
- ras_block_str(adev->nbio.ras_if->block));
+ get_ras_block_str(adev->nbio.ras_if));
if (err_data.ue_count)
dev_info(adev->dev, "%ld uncorrectable hardware "
"errors detected in %s block\n",
obj->err_data.ue_count,
- ras_block_str(adev->nbio.ras_if->block));
+ get_ras_block_str(adev->nbio.ras_if));
}
dev_info(adev->dev, "RAS controller interrupt triggered "
@@ -566,7 +584,9 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
return r;
}
-#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030
+#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
@@ -575,12 +595,20 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
uint32_t corr, fatal, non_fatal;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE);
+ else
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+
corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
ParityErrNonFatal);
- parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE);
+ else
+ parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
if (corr)
err_data->ce_count++;
@@ -589,13 +617,21 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
if (corr || fatal || non_fatal) {
central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+
/* clear error status register */
- WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts);
+ else
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
if (fatal)
+ {
/* clear parity fatal error indication field */
- WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
- parity_sts);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts);
+ else
+ WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts);
+ }
if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
BIFL_RasContller_Intr_Recv)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index b8216581ec8d..cc5692db6f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -27,6 +27,7 @@
#include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ff80786e3918..febc903adf58 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -180,8 +180,8 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = {
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(3, 0, 0):
if (amdgpu_sriov_vf(adev)) {
if (encode)
*codecs = &sriov_sc_video_codecs_encode;
@@ -194,29 +194,27 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
*codecs = &sc_video_codecs_decode;
}
return 0;
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_VANGOGH:
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 0, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode;
return 0;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(3, 1, 1):
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(3, 0, 33):
if (encode)
*codecs = &bg_video_codecs_encode;
else
*codecs = &bg_video_codecs_decode;
return 0;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
else
@@ -511,14 +509,15 @@ nv_asic_reset_method(struct amdgpu_device *adev)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
return AMD_RESET_METHOD_MODE2;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
return AMD_RESET_METHOD_MODE1;
default:
if (amdgpu_dpm_is_baco_supported(adev))
@@ -599,7 +598,7 @@ static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
-static const struct amdgpu_ip_block_version nv_common_ip_block =
+const struct amdgpu_ip_block_version nv_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
@@ -608,314 +607,11 @@ static const struct amdgpu_ip_block_version nv_common_ip_block =
.funcs = &nv_common_ip_funcs,
};
-static bool nv_is_headless_sku(struct pci_dev *pdev)
-{
- if ((pdev->device == 0x731E &&
- (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
- (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
- (pdev->device == 0x7360 && pdev->revision == 0xC7))
- return true;
- return false;
-}
-
-static int nv_reg_base_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (amdgpu_discovery) {
- r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- DRM_WARN("failed to init reg base from ip discovery table, "
- "fallback to legacy init method\n");
- goto legacy_init;
- }
-
- amdgpu_discovery_harvest_ip(adev);
- if (nv_is_headless_sku(adev->pdev)) {
- adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
- adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
- }
-
- return 0;
- }
-
-legacy_init:
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- navi10_reg_base_init(adev);
- break;
- case CHIP_NAVI14:
- navi14_reg_base_init(adev);
- break;
- case CHIP_NAVI12:
- navi12_reg_base_init(adev);
- break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- sienna_cichlid_reg_base_init(adev);
- break;
- case CHIP_VANGOGH:
- vangogh_reg_base_init(adev);
- break;
- case CHIP_DIMGREY_CAVEFISH:
- dimgrey_cavefish_reg_base_init(adev);
- break;
- case CHIP_BEIGE_GOBY:
- beige_goby_reg_base_init(adev);
- break;
- case CHIP_YELLOW_CARP:
- yellow_carp_reg_base_init(adev);
- break;
- case CHIP_CYAN_SKILLFISH:
- cyan_skillfish_reg_base_init(adev);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
void nv_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_nv_virt_ops;
}
-int nv_set_ip_blocks(struct amdgpu_device *adev)
-{
- int r;
-
- if (adev->asic_type == CHIP_CYAN_SKILLFISH) {
- adev->nbio.funcs = &nbio_v2_3_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- } else if (adev->flags & AMD_IS_APU) {
- adev->nbio.funcs = &nbio_v7_2_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
- } else {
- adev->nbio.funcs = &nbio_v2_3_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- }
- adev->hdp.funcs = &hdp_v5_0_funcs;
-
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
- adev->smuio.funcs = &smuio_v11_0_6_funcs;
- else
- adev->smuio.funcs = &smuio_v11_0_funcs;
-
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
- adev->gmc.xgmi.supported = true;
-
- /* Set IP register base before any HW register access */
- r = nv_reg_base_init(adev);
- if (r)
- return r;
-
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- !amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- !amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
- if (adev->enable_mes)
- amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
- break;
- case CHIP_NAVI12:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- if (!amdgpu_sriov_vf(adev)) {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- }
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- !amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
- break;
- case CHIP_SIENNA_CICHLID:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- if (!amdgpu_sriov_vf(adev)) {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- } else {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- }
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- if (adev->enable_mes)
- amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
- break;
- case CHIP_NAVY_FLOUNDER:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- break;
- case CHIP_VANGOGH:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- break;
- case CHIP_DIMGREY_CAVEFISH:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- break;
- case CHIP_BEIGE_GOBY:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- break;
- case CHIP_YELLOW_CARP:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- break;
- case CHIP_CYAN_SKILLFISH:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- }
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
return adev->nbio.funcs->get_rev_id(adev);
@@ -1056,8 +752,11 @@ static int nv_common_early_init(void *handle)
adev->rev_id = nv_get_rev_id(adev);
adev->external_rev_id = 0xff;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ /* TODO: split the GC and PG flags based on the relevant IP version for which
+ * they are relevant.
+ */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
@@ -1079,7 +778,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_ATHUB;
adev->external_rev_id = adev->rev_id + 0x1;
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
@@ -1100,7 +799,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 20;
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -1129,7 +828,7 @@ static int nv_common_early_init(void *handle)
adev->rev_id = 0;
adev->external_rev_id = adev->rev_id + 0xa;
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 3, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1153,7 +852,7 @@ static int nv_common_early_init(void *handle)
}
adev->external_rev_id = adev->rev_id + 0x28;
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(10, 3, 2):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1172,8 +871,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = adev->rev_id + 0x32;
break;
-
- case CHIP_VANGOGH:
+ case IP_VERSION(10, 3, 1):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1196,7 +894,7 @@ static int nv_common_early_init(void *handle)
if (adev->apu_flags & AMD_APU_IS_VANGOGH)
adev->external_rev_id = adev->rev_id + 0x01;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(10, 3, 4):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1215,7 +913,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(10, 3, 5):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1232,7 +930,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = adev->rev_id + 0x46;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -1257,11 +955,11 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
if (adev->pdev->device == 0x1681)
- adev->external_rev_id = adev->rev_id + 0x19;
+ adev->external_rev_id = 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(10, 1, 3):
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x82;
@@ -1388,14 +1086,14 @@ static int nv_common_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 1f40ba3b0460..83e9782aef39 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -26,18 +26,10 @@
#include "nbio_v2_3.h"
+extern const struct amdgpu_ip_block_version nv_common_ip_block;
+
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
-int nv_set_ip_blocks(struct amdgpu_device *adev);
-int navi10_reg_base_init(struct amdgpu_device *adev);
-int navi14_reg_base_init(struct amdgpu_device *adev);
-int navi12_reg_base_init(struct amdgpu_device *adev);
-int sienna_cichlid_reg_base_init(struct amdgpu_device *adev);
-void vangogh_reg_base_init(struct amdgpu_device *adev);
-int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev);
-int beige_goby_reg_base_init(struct amdgpu_device *adev);
-int yellow_carp_reg_base_init(struct amdgpu_device *adev);
-int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5872d68ed13d..ed2293686f0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -84,28 +84,28 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
- adev->psp.hdcp.feature_version =
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp.size_bytes =
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp.start_addr =
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.dtm.feature_version =
+ adev->psp.dtm_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm.size_bytes =
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm.start_addr =
- (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
- adev->psp.securedisplay.feature_version =
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay.size_bytes =
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay.start_addr =
- (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 29bf9f09944b..2176ef85f137 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -93,35 +93,35 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA20:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 2):
chip_name = "vega20";
break;
- case CHIP_NAVI10:
+ case IP_VERSION(11, 0, 0):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
chip_name = "navi14";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
chip_name = "navi12";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(11, 0, 4):
chip_name = "arcturus";
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(11, 0, 7):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(11, 0, 11):
chip_name = "navy_flounder";
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
chip_name = "vangogh";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
chip_name = "beige_goby";
break;
default:
@@ -129,9 +129,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -151,20 +151,26 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
- adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
- adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
+ adev->psp.xgmi_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
- adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
- adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
+ adev->psp.ras_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->ras.offset_bytes);
}
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -186,30 +192,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(
+ ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.dtm_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context
+ .bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
}
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- err = psp_init_sos_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_ta_microcode(psp, chip_name);
- if (err)
- return err;
- break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -217,7 +224,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
if (err)
return err;
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
err = psp_init_asd_microcode(psp, chip_name);
if (err)
return err;
@@ -691,7 +698,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return -ENOMEM;
}
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index cc649406234b..a2588200ea58 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -84,22 +84,22 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
- adev->psp.hdcp.feature_version =
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp.size_bytes =
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp.start_addr =
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.dtm.feature_version =
+ adev->psp.dtm_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm.size_bytes =
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm.start_addr =
- (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 47a500f64db2..17655bc6d2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -47,18 +47,19 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
const char *chip_name;
int err = 0;
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
chip_name = "aldebaran";
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
chip_name = "yellow_carp";
break;
default:
BUG();
}
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -66,7 +67,8 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
if (err)
return err;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
err = psp_init_asd_microcode(psp, chip_name);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 8931000dcd41..e8e4749e9c79 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -469,8 +469,8 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
@@ -478,7 +478,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_vg10,
ARRAY_SIZE(golden_settings_sdma_vg10));
break;
- case CHIP_VEGA12:
+ case IP_VERSION(4, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
@@ -486,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_vg12,
ARRAY_SIZE(golden_settings_sdma_vg12));
break;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma0_4_2_init,
ARRAY_SIZE(golden_settings_sdma0_4_2_init));
@@ -497,17 +497,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma1_4_2,
ARRAY_SIZE(golden_settings_sdma1_4_2));
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(4, 2, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_arct,
ARRAY_SIZE(golden_settings_sdma_arct));
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(4, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_aldebaran,
ARRAY_SIZE(golden_settings_sdma_aldebaran));
break;
- case CHIP_RAVEN:
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
@@ -520,7 +521,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1));
break;
- case CHIP_RENOIR:
+ case IP_VERSION(4, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_4_3,
ARRAY_SIZE(golden_settings_sdma_4_3));
@@ -538,12 +539,12 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
* The only chips with SDMAv4 and ULV are VG10 and VG20.
* Server SKUs take a different hysteresis setting from other SKUs.
*/
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
if (adev->pdev->device == 0x6860)
break;
return;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
if (adev->pdev->device == 0x66a1)
break;
return;
@@ -589,8 +590,8 @@ static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
/* arcturus shares the same FW memory across
all SDMA isntances */
- if (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
break;
}
@@ -620,17 +621,18 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
chip_name = "vega10";
break;
- case CHIP_VEGA12:
+ case IP_VERSION(4, 0, 1):
chip_name = "vega12";
break;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
chip_name = "vega20";
break;
- case CHIP_RAVEN:
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
@@ -638,16 +640,16 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "raven";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(4, 2, 2):
chip_name = "arcturus";
break;
- case CHIP_RENOIR:
+ case IP_VERSION(4, 1, 2):
if (adev->apu_flags & AMD_APU_IS_RENOIR)
chip_name = "renoir";
else
chip_name = "green_sardine";
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(4, 4, 0):
chip_name = "aldebaran";
break;
default:
@@ -665,8 +667,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
goto out;
for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) {
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
memcpy((void *)&adev->sdma.instance[i],
@@ -1106,7 +1108,7 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
* Arcturus for the moment and firmware version 14
* and above.
*/
- if (adev->asic_type == CHIP_ARCTURUS &&
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&
adev->sdma.instance[i].fw_version >= 14)
WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
/* Extend page fault timeout to avoid interrupt storm */
@@ -1393,9 +1395,10 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
return;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
sdma_v4_1_init_power_gating(adev);
sdma_v4_1_update_power_gating(adev, true);
break;
@@ -1835,13 +1838,13 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
{
uint fw_version = adev->sdma.instance[0].fw_version;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
return fw_version >= 430;
- case CHIP_VEGA12:
+ case IP_VERSION(4, 0, 1):
/*return fw_version >= 31;*/
return false;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
return fw_version >= 123;
default:
return false;
@@ -1853,15 +1856,6 @@ static int sdma_v4_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- if (adev->flags & AMD_IS_APU)
- adev->sdma.num_instances = 1;
- else if (adev->asic_type == CHIP_ARCTURUS)
- adev->sdma.num_instances = 8;
- else if (adev->asic_type == CHIP_ALDEBARAN)
- adev->sdma.num_instances = 5;
- else
- adev->sdma.num_instances = 2;
-
r = sdma_v4_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
@@ -1869,7 +1863,8 @@ static int sdma_v4_0_early_init(void *handle)
}
/* TODO: Page queue breaks driver reload under SRIOV */
- if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&
+ amdgpu_sriov_vf((adev)))
adev->sdma.has_page_queue = false;
else if (sdma_v4_0_fw_support_paging_queue(adev))
adev->sdma.has_page_queue = true;
@@ -2141,14 +2136,14 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- if (adev->asic_type == CHIP_VEGA20)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- if (adev->asic_type != CHIP_VEGA20)
+ if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
@@ -2364,9 +2359,10 @@ static int sdma_v4_0_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
sdma_v4_1_update_power_gating(adev,
state == AMD_PG_STATE_GATE);
break;
@@ -2551,7 +2547,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
adev->sdma.instance[i].ring.funcs =
&sdma_v4_0_ring_funcs_2nd_mmhub;
else
@@ -2559,7 +2555,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
&sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
if (adev->sdma.has_page_queue) {
- if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
adev->sdma.instance[i].page.funcs =
&sdma_v4_0_page_ring_funcs_2nd_mmhub;
else
@@ -2786,12 +2782,12 @@ static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
adev->sdma.funcs = &sdma_v4_0_ras_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(4, 4, 0):
adev->sdma.funcs = &sdma_v4_4_ras_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 50bf3b71bc93..853d1511b889 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -187,8 +187,8 @@ static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
@@ -196,7 +196,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv10,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
break;
- case CHIP_NAVI14:
+ case IP_VERSION(5, 0, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
@@ -204,7 +204,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv14,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break;
- case CHIP_NAVI12:
+ case IP_VERSION(5, 0, 5):
if (amdgpu_sriov_vf(adev))
soc15_program_register_sequence(adev,
golden_settings_sdma_5_sriov,
@@ -217,7 +217,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv12,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(5, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_cyan_skillfish,
(const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
@@ -248,22 +248,22 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
- if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_NAVI12))
+ if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 0, 0):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(5, 0, 2):
chip_name = "navi14";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(5, 0, 5):
chip_name = "navi12";
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(5, 0, 1):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
chip_name = "cyan_skillfish2";
else
@@ -1295,8 +1295,6 @@ static int sdma_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->sdma.num_instances = 2;
-
sdma_v5_0_set_ring_funcs(adev);
sdma_v5_0_set_buffer_funcs(adev);
sdma_v5_0_set_vm_pte_funcs(adev);
@@ -1636,10 +1634,10 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
sdma_v5_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v5_0_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index e32efcfb0c8b..4d4d1aa51b8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -136,23 +136,23 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 2, 0):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(5, 2, 2):
chip_name = "navy_flounder";
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(5, 2, 1):
chip_name = "vangogh";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(5, 2, 4):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(5, 2, 5):
chip_name = "beige_goby";
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(5, 2, 3):
chip_name = "yellow_carp";
break;
default:
@@ -174,7 +174,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
(void *)&adev->sdma.instance[0],
sizeof(struct amdgpu_sdma_instance));
- if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+ if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0)))
return 0;
DRM_DEBUG("psp_load == '%s'\n",
@@ -375,10 +375,10 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
{
- uint32_t gcr_cntl =
- SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
- SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
- SDMA_GCR_GLI_INV(1);
+ uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
+ SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
+ SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
@@ -1217,23 +1217,6 @@ static int sdma_v5_2_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- adev->sdma.num_instances = 4;
- break;
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- adev->sdma.num_instances = 2;
- break;
- case CHIP_VANGOGH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- adev->sdma.num_instances = 1;
- break;
- default:
- break;
- }
-
sdma_v5_2_set_ring_funcs(adev);
sdma_v5_2_set_buffer_funcs(adev);
sdma_v5_2_set_vm_pte_funcs(adev);
@@ -1555,7 +1538,7 @@ static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *ade
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH)
+ if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1))
adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1592,7 +1575,7 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH)
+ if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1))
adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
@@ -1621,13 +1604,13 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 3):
sdma_v5_2_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v5_2_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c
deleted file mode 100644
index 5ee69f70c49b..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "sienna_cichlid_ip_offset.h"
-
-int sienna_cichlid_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocke needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0fc97c364fd7..0c316a2d42ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -85,6 +85,8 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
+static const struct amd_ip_funcs soc15_common_ip_funcs;
+
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
@@ -154,31 +156,38 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode =
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- if (encode)
- *codecs = &vega_video_codecs_encode;
- else
- *codecs = &vega_video_codecs_decode;
- return 0;
- case CHIP_RAVEN:
- if (encode)
- *codecs = &vega_video_codecs_encode;
- else
- *codecs = &rv_video_codecs_decode;
- return 0;
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- case CHIP_RENOIR:
- if (encode)
- *codecs = &vega_video_codecs_encode;
- else
- *codecs = &rn_video_codecs_decode;
- return 0;
- default:
- return -EINVAL;
+ if (adev->ip_versions[VCE_HWIP][0]) {
+ switch (adev->ip_versions[VCE_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &vega_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rv_video_codecs_decode;
+ return 0;
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(2, 6, 0):
+ case IP_VERSION(2, 2, 0):
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rn_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
}
@@ -332,9 +341,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
- if (adev->asic_type == CHIP_RENOIR)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
return 10000;
- if (adev->asic_type == CHIP_RAVEN)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
return reference_clock / 4;
return reference_clock;
@@ -565,28 +576,29 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
return AMD_RESET_METHOD_MODE2;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_ARCTURUS:
- baco_reset = amdgpu_dpm_is_baco_supported(adev);
- break;
- case CHIP_VEGA20:
- if (adev->psp.sos.fw_version >= 0x80067)
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->psp.sos.fw_version >= 0x80067)
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
+ /*
+ * 1. PMFW version > 0x284300: all cases use baco
+ * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
+ */
+ if (ras && adev->ras_enabled &&
+ adev->pm.fw_version <= 0x283400)
+ baco_reset = false;
+ } else {
baco_reset = amdgpu_dpm_is_baco_supported(adev);
-
- /*
- * 1. PMFW version > 0x284300: all cases use baco
- * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
- */
- if (ras && adev->ras_enabled &&
- adev->pm.fw_version <= 0x283400)
- baco_reset = false;
+ }
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(13, 0, 2):
/*
* 1.connected to cpu: driver issue mode2 reset
* 2.discret gpu: driver issue mode1 reset
@@ -629,15 +641,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
static bool soc15_supports_baco(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_ARCTURUS:
- return amdgpu_dpm_is_baco_supported(adev);
- case CHIP_VEGA20:
- if (adev->psp.sos.fw_version >= 0x80067)
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->psp.sos.fw_version >= 0x80067)
+ return amdgpu_dpm_is_baco_supported(adev);
+ return false;
+ } else {
return amdgpu_dpm_is_baco_supported(adev);
- return false;
+ }
+ break;
default:
return false;
}
@@ -704,7 +718,7 @@ static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
-static const struct amdgpu_ip_block_version vega10_common_ip_block =
+const struct amdgpu_ip_block_version vega10_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 2,
@@ -766,185 +780,6 @@ void soc15_set_virt_ops(struct amdgpu_device *adev)
soc15_reg_base_init(adev);
}
-int soc15_set_ip_blocks(struct amdgpu_device *adev)
-{
- /* for bare metal case */
- if (!amdgpu_sriov_vf(adev))
- soc15_reg_base_init(adev);
-
- if (adev->flags & AMD_IS_APU) {
- adev->nbio.funcs = &nbio_v7_0_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
- } else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) {
- adev->nbio.funcs = &nbio_v7_4_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
- } else {
- adev->nbio.funcs = &nbio_v6_1_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
- }
- adev->hdp.funcs = &hdp_v4_0_funcs;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN)
- adev->df.funcs = &df_v3_6_funcs;
- else
- adev->df.funcs = &df_v1_7_funcs;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
- adev->smuio.funcs = &smuio_v11_0_funcs;
- else if (adev->asic_type == CHIP_ALDEBARAN)
- adev->smuio.funcs = &smuio_v13_0_funcs;
- else
- adev->smuio.funcs = &smuio_v9_0_funcs;
-
- adev->rev_id = soc15_get_rev_id(adev);
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
-
- /* For Vega10 SR-IOV, PSP need to be initialized before IH */
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
- }
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- } else {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
- }
- }
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (is_support_sw_smu(adev)) {
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- }
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
- amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
- }
- break;
- case CHIP_RAVEN:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
- break;
- case CHIP_ARCTURUS:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
-
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- }
-
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
-
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
- }
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
- break;
- case CHIP_RENOIR:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
- break;
- case CHIP_ALDEBARAN:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
-
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
- }
-
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
-
- amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we implement soft reset */
@@ -1153,10 +988,13 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
-
+ adev->rev_id = soc15_get_rev_id(adev);
adev->external_rev_id = 0xFF;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ /* TODO: split the GC and PG flags based on the relevant IP version for which
+ * they are relevant.
+ */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1180,7 +1018,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = 0x1;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1203,7 +1041,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1226,7 +1064,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x28;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
@@ -1299,7 +1138,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1318,7 +1157,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x32;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
adev->asic_funcs = &soc15_asic_funcs;
if (adev->apu_flags & AMD_APU_IS_RENOIR)
@@ -1349,7 +1188,7 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1564,10 +1403,10 @@ static int soc15_common_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ case IP_VERSION(7, 4, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
@@ -1583,8 +1422,9 @@ static int soc15_common_set_clockgating_state(void *handle,
adev->df.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
@@ -1596,8 +1436,8 @@ static int soc15_common_set_clockgating_state(void *handle,
soc15_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -1619,7 +1459,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->hdp.funcs->get_clock_gating_state(adev, flags);
- if (adev->asic_type != CHIP_ALDEBARAN) {
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
@@ -1645,7 +1485,7 @@ static int soc15_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs soc15_common_ip_funcs = {
+static const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
.late_init = soc15_common_late_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 034cfdfc4dbe..efc2a253e8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,11 +28,11 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+extern const struct amdgpu_ip_block_version vega10_common_ip_block;
+
#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
-extern const struct amd_ip_funcs soc15_common_ip_funcs;
-
struct soc15_reg_golden {
u32 hwip;
u32 instance;
@@ -102,7 +102,6 @@ struct soc15_ras_field_entry {
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void soc15_set_virt_ops(struct amdgpu_device *adev);
-int soc15_set_ip_blocks(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev,
const struct soc15_reg_golden *registers,
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 0f214a398dd8..5093826a43d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -38,9 +38,8 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR,
};
-enum ta_ras_status
-{
- TA_RAS_STATUS__SUCCESS = 0x00,
+enum ta_ras_status {
+ TA_RAS_STATUS__SUCCESS = 0x0000,
TA_RAS_STATUS__RESET_NEEDED = 0xA001,
TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
@@ -55,7 +54,17 @@ enum ta_ras_status
TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
- TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F
+ TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
+ TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
+ TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
+ TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014,
+ TA_RAS_STATUS__ERROR_PCS_STATE_QUIET = 0xA015,
+ TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
+ TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
+ TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019
};
enum ta_ras_block {
@@ -73,9 +82,18 @@ enum ta_ras_block {
TA_RAS_BLOCK__MP0,
TA_RAS_BLOCK__MP1,
TA_RAS_BLOCK__FUSE,
+ TA_RAS_BLOCK__MCA,
TA_NUM_BLOCK_MAX
};
+enum ta_ras_mca_block {
+ TA_RAS_MCA_BLOCK__MP0 = 0,
+ TA_RAS_MCA_BLOCK__MP1 = 1,
+ TA_RAS_MCA_BLOCK__MPIO = 2,
+ TA_RAS_MCA_BLOCK__IOHC = 3,
+ TA_MCA_NUM_BLOCK_MAX
+};
+
enum ta_ras_error_type {
TA_RAS_ERROR__NONE = 0,
TA_RAS_ERROR__PARITY = 1,
@@ -105,17 +123,15 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc.
};
-struct ta_ras_init_flags
-{
- uint8_t poison_mode_en;
- uint8_t dgpu_mode;
+struct ta_ras_init_flags {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
};
-struct ta_ras_output_flags
-{
- uint8_t ras_init_success_flag;
- uint8_t err_inject_switch_disable_flag;
- uint8_t reg_access_failure_flag;
+struct ta_ras_output_flags {
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
};
/* Common input structure for RAS callbacks */
@@ -126,14 +142,13 @@ union ta_ras_cmd_input {
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
- uint32_t reserve_pad[256];
+ uint32_t reserve_pad[256];
};
-union ta_ras_cmd_output
-{
- struct ta_ras_output_flags flags;
+union ta_ras_cmd_output {
+ struct ta_ras_output_flags flags;
- uint32_t reserve_pad[256];
+ uint32_t reserve_pad[256];
};
/* Shared Memory structures */
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index bb30336b1e8d..f7ec3fe134e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -288,9 +288,43 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
}
}
+static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_EccCtrl, UCFatalEn);
+}
+
+static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ /* Enabling fatal error in one channel will be considered
+ as fatal error mode */
+ if (umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset))
+ return false;
+ }
+
+ return true;
+}
+
const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
.ras_late_init = amdgpu_umc_ras_late_init,
.ras_fini = amdgpu_umc_ras_fini,
.query_ras_error_count = umc_v6_7_query_ras_error_count,
.query_ras_error_address = umc_v6_7_query_ras_error_address,
+ .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 7232241e3bfb..0fef925b6602 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -698,6 +698,19 @@ static int uvd_v3_1_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v3_1_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v3_1_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -722,17 +735,6 @@ static int uvd_v3_1_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v3_1_stop(adev);
-
- return 0;
-}
-
-static int uvd_v3_1_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v3_1_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 52d6de969f46..c108b8381795 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -212,6 +212,19 @@ static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v4_2_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v4_2_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -236,17 +249,6 @@ static int uvd_v4_2_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v4_2_stop(adev);
-
- return 0;
-}
-
-static int uvd_v4_2_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index db6d06758e4d..563493d1f830 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -210,6 +210,19 @@ static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v5_0_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v5_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -234,17 +247,6 @@ static int uvd_v5_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v5_0_stop(adev);
-
- return 0;
-}
-
-static int uvd_v5_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bc571833632e..d5d023a24269 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -332,15 +332,9 @@ err:
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r;
- r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
- if (r)
- return r;
-
r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -357,9 +351,6 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index b6e82d75561f..b483f03b4591 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -338,15 +338,9 @@ err:
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r;
- r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
- if (r)
- return r;
-
r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -363,9 +357,6 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
@@ -606,6 +597,23 @@ static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (!amdgpu_sriov_vf(adev))
+ uvd_v7_0_stop(adev);
+ else {
+ /* full access mode, so don't touch any UVD register */
+ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+ }
+
+ return 0;
+}
+
+static int uvd_v7_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -630,21 +638,6 @@ static int uvd_v7_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (!amdgpu_sriov_vf(adev))
- uvd_v7_0_stop(adev);
- else {
- /* full access mode, so don't touch any UVD register */
- DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
- }
-
- return 0;
-}
-
-static int uvd_v7_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v7_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c
deleted file mode 100644
index d64d681a05dc..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "vangogh_ip_offset.h"
-
-void vangogh_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocke needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index b70c17f0c52e..67eb01fef789 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -431,10 +431,12 @@ static int vce_v2_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -479,6 +481,17 @@ static int vce_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ return 0;
+}
+
+static int vce_v2_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -502,14 +515,6 @@ static int vce_v2_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- return 0;
-}
-
-static int vce_v2_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = vce_v2_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 9de66893ccd6..142e291983b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -440,10 +440,12 @@ static int vce_v3_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -490,6 +492,21 @@ static int vce_v3_0_hw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ r = vce_v3_0_wait_for_idle(handle);
+ if (r)
+ return r;
+
+ vce_v3_0_stop(adev);
+ return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+}
+
+static int vce_v3_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -513,19 +530,6 @@ static int vce_v3_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v3_0_wait_for_idle(handle);
- if (r)
- return r;
-
- vce_v3_0_stop(adev);
- return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
-}
-
-static int vce_v3_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = vce_v3_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index fec902b800c2..d1fc4e0b8265 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -463,6 +463,8 @@ static int vce_v4_0_sw_init(void *handle)
}
for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
if (amdgpu_sriov_vf(adev)) {
@@ -478,7 +480,7 @@ static int vce_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -542,29 +544,8 @@ static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * Proper cleanups before halting the HW engine:
- * - cancel the delayed idle work
- * - enable powergating
- * - enable clockgating
- * - disable dpm
- *
- * TODO: to align with the VCN implementation, move the
- * jobs for clockgating/powergating/dpm setting to
- * ->set_powergating_state().
- */
cancel_delayed_work_sync(&adev->vce.idle_work);
- if (adev->pm.dpm_enabled) {
- amdgpu_dpm_enable_vce(adev, false);
- } else {
- amdgpu_asic_set_vce_clocks(adev, 0, 0);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- }
-
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
vce_v4_0_stop(adev);
@@ -584,7 +565,7 @@ static int vce_v4_0_suspend(void *handle)
if (adev->vce.vcpu_bo == NULL)
return 0;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
@@ -594,6 +575,29 @@ static int vce_v4_0_suspend(void *handle)
drm_dev_exit(idx);
}
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
r = vce_v4_0_hw_fini(adev);
if (r)
return r;
@@ -611,7 +615,7 @@ static int vce_v4_0_resume(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 121ee9f2b8d1..d54d720b3cf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -66,7 +66,6 @@ static int vcn_v1_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->vcn.num_vcn_inst = 1;
adev->vcn.num_enc_rings = 2;
vcn_v1_0_set_dec_ring_funcs(adev);
@@ -112,15 +111,7 @@ static int vcn_v1_0_sw_init(void *handle)
/* Override the work func */
adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -145,10 +136,12 @@ static int vcn_v1_0_sw_init(void *handle)
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
+
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index f4686e918e0d..313fc1b53999 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vcn.h"
@@ -68,7 +69,6 @@ static int vcn_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->vcn.num_vcn_inst = 1;
if (amdgpu_sriov_vf(adev))
adev->vcn.num_enc_rings = 1;
else
@@ -115,15 +115,7 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -159,6 +151,8 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
+
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
if (!amdgpu_sriov_vf(adev))
@@ -167,7 +161,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -192,11 +186,14 @@ static int vcn_v2_0_sw_init(void *handle)
*/
static int vcn_v2_0_sw_fini(void *handle)
{
- int r;
+ int r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
- fw_shared->present_flag_0 = 0;
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ fw_shared->present_flag_0 = 0;
+ drm_dev_exit(idx);
+ }
amdgpu_virt_free_mm_table(adev);
@@ -1879,15 +1876,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- tmp = AMDGPU_UCODE_ID_VCN;
MMSCH_V2_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
MMSCH_V2_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
offset = 0;
} else {
MMSCH_V2_0_INSERT_DIRECT_WT(
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index e0c0c3734432..44fc4c218433 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vcn.h"
@@ -82,7 +83,7 @@ static int vcn_v2_5_early_init(void *handle)
} else {
u32 harvest;
int i;
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
@@ -138,22 +139,7 @@ static int vcn_v2_5_sw_init(void *handle)
if (r)
return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- }
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -194,6 +180,8 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
+
ring = &adev->vcn.inst[j].ring_enc[i];
ring->use_doorbell = true;
@@ -203,7 +191,7 @@ static int vcn_v2_5_sw_init(void *handle)
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -233,17 +221,21 @@ static int vcn_v2_5_sw_init(void *handle)
*/
static int vcn_v2_5_sw_fini(void *handle)
{
- int i, r;
+ int i, r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
- fw_shared->present_flag_0 = 0;
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ }
+ drm_dev_exit(idx);
}
+
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
@@ -1713,7 +1705,7 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
else /* CHIP_ALDEBARAN */
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
@@ -1730,7 +1722,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
else /* CHIP_ALDEBARAN */
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3d18aab88b4e..da11ceba0698 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -60,11 +60,6 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
};
-static int amdgpu_ucode_id_vcns[] = {
- AMDGPU_UCODE_ID_VCN,
- AMDGPU_UCODE_ID_VCN1
-};
-
static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -87,7 +82,6 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
@@ -95,24 +89,12 @@ static int vcn_v3_0_early_init(void *handle)
adev->vcn.num_enc_rings = 1;
} else {
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- u32 harvest;
-
- adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
- if (adev->asic_type == CHIP_BEIGE_GOBY)
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33))
adev->vcn.num_enc_rings = 0;
else
adev->vcn.num_enc_rings = 2;
@@ -143,22 +125,7 @@ static int vcn_v3_0_sw_init(void *handle)
if (r)
return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- }
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -224,6 +191,8 @@ static int vcn_v3_0_sw_init(void *handle)
return r;
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
+
/* VCN ENC TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
@@ -239,8 +208,7 @@ static int vcn_v3_0_sw_init(void *handle)
}
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT,
- &adev->vcn.inst[i].sched_score);
+ hw_prio, &adev->vcn.inst[i].sched_score);
if (r)
return r;
}
@@ -275,7 +243,7 @@ static int vcn_v3_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r, idx;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
@@ -1271,7 +1239,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
- if (adev->asic_type != CHIP_BEIGE_GOBY) {
+ if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
@@ -1305,7 +1273,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
uint32_t param, resp, expected;
uint32_t offset, cache_size;
uint32_t tmp, timeout;
- uint32_t id;
struct amdgpu_mm_table *table = &adev->virt.mm_table;
uint32_t *table_loc;
@@ -1349,13 +1316,12 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- id = amdgpu_ucode_id_vcns[i];
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- adev->firmware.ucode[id].tmr_mc_addr_lo);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- adev->firmware.ucode[id].tmr_mc_addr_hi);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_VCPU_CACHE_OFFSET0),
@@ -1643,7 +1609,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
- if (adev->asic_type != CHIP_BEIGE_GOBY) {
+ if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
/* Restore */
fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
diff --git a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c b/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c
deleted file mode 100644
index 3d89421275ed..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "yellow_carp_offset.h"
-
-int yellow_carp_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the block needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 86afd37b098d..24ebd61395d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -405,7 +405,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
mutex_lock(&p->mutex);
- retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
+ retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
@@ -418,7 +418,7 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
int retval;
const int max_num_cus = 1024;
struct kfd_ioctl_set_cu_mask_args *args = data;
- struct queue_properties properties;
+ struct mqd_update_info minfo = {0};
uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
@@ -428,8 +428,8 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
return -EINVAL;
}
- properties.cu_mask_count = args->num_cu_mask;
- if (properties.cu_mask_count == 0) {
+ minfo.cu_mask.count = args->num_cu_mask;
+ if (minfo.cu_mask.count == 0) {
pr_debug("CU mask cannot be 0");
return -EINVAL;
}
@@ -438,32 +438,33 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
* limit of max_num_cus bits. We can then just drop any CU mask bits
* past max_num_cus bits and just use the first max_num_cus bits.
*/
- if (properties.cu_mask_count > max_num_cus) {
+ if (minfo.cu_mask.count > max_num_cus) {
pr_debug("CU mask cannot be greater than 1024 bits");
- properties.cu_mask_count = max_num_cus;
+ minfo.cu_mask.count = max_num_cus;
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!properties.cu_mask)
+ minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
+ if (!minfo.cu_mask.ptr)
return -ENOMEM;
- retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+ retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
if (retval) {
pr_debug("Could not copy CU mask from userspace");
- kfree(properties.cu_mask);
- return -EFAULT;
+ retval = -EFAULT;
+ goto out;
}
+ minfo.update_flag = UPDATE_FLAG_CU_MASK;
+
mutex_lock(&p->mutex);
- retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+ retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
mutex_unlock(&p->mutex);
- if (retval)
- kfree(properties.cu_mask);
-
+out:
+ kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -1011,11 +1012,6 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
void *mem, *kern_addr;
uint64_t size;
- if (p->signal_page) {
- pr_err("Event page is already set\n");
- return -EINVAL;
- }
-
kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
if (!kfd) {
pr_err("Getting device by id failed in %s\n", __func__);
@@ -1023,6 +1019,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
}
mutex_lock(&p->mutex);
+
+ if (p->signal_page) {
+ pr_err("Event page is already set\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
pdd = kfd_bind_process_to_device(kfd, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
@@ -1037,20 +1040,24 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
err = -EINVAL;
goto out_unlock;
}
- mutex_unlock(&p->mutex);
err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
- return err;
+ goto out_unlock;
}
err = kfd_event_page_set(p, kern_addr, size);
if (err) {
pr_err("Failed to set event page\n");
- return err;
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem);
+ goto out_unlock;
}
+
+ p->signal_handle = args->event_page_offset;
+
+ mutex_unlock(&p->mutex);
}
err = kfd_event_create(filp, p, args->event_type,
@@ -1259,6 +1266,23 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (args->size == 0)
return -EINVAL;
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+ /* Flush pending deferred work to avoid racing with deferred actions
+ * from previous memory map changes (e.g. munmap).
+ */
+ svm_range_list_lock_and_flush_work(&p->svms, current->mm);
+ mutex_lock(&p->svms.lock);
+ mmap_write_unlock(current->mm);
+ if (interval_tree_iter_first(&p->svms.objects,
+ args->va_addr >> PAGE_SHIFT,
+ (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
+ pr_err("Address: 0x%llx already allocated by SVM\n",
+ args->va_addr);
+ mutex_unlock(&p->svms.lock);
+ return -EADDRINUSE;
+ }
+ mutex_unlock(&p->svms.lock);
+#endif
dev = kfd_device_by_id(args->gpu_id);
if (!dev)
return -EINVAL;
@@ -1351,6 +1375,15 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
return -EINVAL;
mutex_lock(&p->mutex);
+ /*
+ * Safeguard to prevent user space from freeing signal BO.
+ * It will be freed at process termination.
+ */
+ if (p->signal_handle && (p->signal_handle == args->handle)) {
+ pr_err("Free signal BO is not allowed\n");
+ ret = -EPERM;
+ goto err_unlock;
+ }
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 4a416231b24c..0fffaf859c59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -32,6 +32,7 @@
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
#include "kfd_migrate.h"
+#include "amdgpu.h"
#define MQD_SIZE_ALIGNED 768
@@ -52,41 +53,6 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
-static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
-#ifdef KFD_SUPPORT_IOMMU_V2
-#ifdef CONFIG_DRM_AMDGPU_CIK
- [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
-#endif
- [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
- [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
-#endif
- [CHIP_TONGA] = &gfx_v8_kfd2kgd,
- [CHIP_FIJI] = &gfx_v8_kfd2kgd,
- [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
- [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
- [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
- [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
- [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
- [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
- [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
- [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
- [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
- [CHIP_ALDEBARAN] = &aldebaran_kfd2kgd,
- [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
- [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
- [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
- [CHIP_SIENNA_CICHLID] = &gfx_v10_3_kfd2kgd,
- [CHIP_NAVY_FLOUNDER] = &gfx_v10_3_kfd2kgd,
- [CHIP_VANGOGH] = &gfx_v10_3_kfd2kgd,
- [CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd,
- [CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd,
- [CHIP_YELLOW_CARP] = &gfx_v10_3_kfd2kgd,
- [CHIP_CYAN_SKILLFISH] = &gfx_v10_kfd2kgd,
-};
-
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
@@ -127,7 +93,6 @@ static const struct kfd_device_info carrizo_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
-#endif
static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN,
@@ -147,7 +112,9 @@ static const struct kfd_device_info raven_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII,
.asic_name = "hawaii",
@@ -167,6 +134,7 @@ static const struct kfd_device_info hawaii_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
+#endif
static const struct kfd_device_info tonga_device_info = {
.asic_family = CHIP_TONGA,
@@ -653,63 +621,202 @@ static const struct kfd_device_info cyan_skillfish_device_info = {
.num_sdma_queues_per_engine = 8,
};
-/* For each entry, [0] is regular and [1] is virtualisation device. */
-static const struct kfd_device_info *kfd_supported_devices[][2] = {
-#ifdef KFD_SUPPORT_IOMMU_V2
- [CHIP_KAVERI] = {&kaveri_device_info, NULL},
- [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
-#endif
- [CHIP_RAVEN] = {&raven_device_info, NULL},
- [CHIP_HAWAII] = {&hawaii_device_info, NULL},
- [CHIP_TONGA] = {&tonga_device_info, NULL},
- [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
- [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
- [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
- [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
- [CHIP_VEGAM] = {&vegam_device_info, NULL},
- [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
- [CHIP_VEGA12] = {&vega12_device_info, NULL},
- [CHIP_VEGA20] = {&vega20_device_info, NULL},
- [CHIP_RENOIR] = {&renoir_device_info, NULL},
- [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
- [CHIP_ALDEBARAN] = {&aldebaran_device_info, &aldebaran_device_info},
- [CHIP_NAVI10] = {&navi10_device_info, NULL},
- [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
- [CHIP_NAVI14] = {&navi14_device_info, NULL},
- [CHIP_SIENNA_CICHLID] = {&sienna_cichlid_device_info, &sienna_cichlid_device_info},
- [CHIP_NAVY_FLOUNDER] = {&navy_flounder_device_info, &navy_flounder_device_info},
- [CHIP_VANGOGH] = {&vangogh_device_info, NULL},
- [CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info},
- [CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info},
- [CHIP_YELLOW_CARP] = {&yellow_carp_device_info, NULL},
- [CHIP_CYAN_SKILLFISH] = {&cyan_skillfish_device_info, NULL},
-};
-
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, unsigned int asic_type, bool vf)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
{
struct kfd_dev *kfd;
const struct kfd_device_info *device_info;
const struct kfd2kgd_calls *f2g;
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct pci_dev *pdev = adev->pdev;
- if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
- || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
- dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
- return NULL; /* asic_type out of range */
+ switch (adev->asic_type) {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &kaveri_device_info;
+ f2g = &gfx_v7_kfd2kgd;
+ break;
+#endif
+ case CHIP_CARRIZO:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &carrizo_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_HAWAII:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &hawaii_device_info;
+ f2g = &gfx_v7_kfd2kgd;
+ break;
+#endif
+ case CHIP_TONGA:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &tonga_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_FIJI:
+ if (vf)
+ device_info = &fiji_vf_device_info;
+ else
+ device_info = &fiji_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_POLARIS10:
+ if (vf)
+ device_info = &polaris10_vf_device_info;
+ else
+ device_info = &polaris10_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_POLARIS11:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &polaris11_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_POLARIS12:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &polaris12_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_VEGAM:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vegam_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ default:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ if (vf)
+ device_info = &vega10_vf_device_info;
+ else
+ device_info = &vega10_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+#ifdef KFD_SUPPORT_IOMMU_V2
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &raven_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+#endif
+ case IP_VERSION(9, 2, 1):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vega12_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+ case IP_VERSION(9, 3, 0):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &renoir_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+ case IP_VERSION(9, 4, 0):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vega20_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+ case IP_VERSION(9, 4, 1):
+ device_info = &arcturus_device_info;
+ f2g = &arcturus_kfd2kgd;
+ break;
+ case IP_VERSION(9, 4, 2):
+ device_info = &aldebaran_device_info;
+ f2g = &aldebaran_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 10):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &navi10_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 2):
+ device_info = &navi12_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 1):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &navi14_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 3):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &cyan_skillfish_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 0):
+ device_info = &sienna_cichlid_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 2):
+ device_info = &navy_flounder_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 1):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vangogh_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 4):
+ device_info = &dimgrey_cavefish_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 5):
+ device_info = &beige_goby_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 3):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &yellow_carp_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ default:
+ return NULL;
+ }
+ break;
}
- device_info = kfd_supported_devices[asic_type][vf];
- f2g = kfd2kgd_funcs[asic_type];
-
if (!device_info || !f2g) {
dev_err(kfd_device, "%s %s not supported in kfd\n",
- amdgpu_asic_name[asic_type], vf ? "VF" : "");
+ amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
@@ -916,6 +1023,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_double_confirm_iommu_support(kfd);
if (kfd_iommu_device_init(kfd)) {
+ kfd->use_iommu_v2 = false;
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
}
@@ -924,6 +1032,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
svm_migrate_init((struct amdgpu_device *)kfd->kgd);
+ if(kgd2kfd_resume_iommu(kfd))
+ goto device_iommu_error;
+
if (kfd_resume(kfd))
goto kfd_resume_error;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f8fce9d05f50..533b27b35fc9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -557,7 +557,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
-static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+static int update_queue(struct device_queue_manager *dqm, struct queue *q,
+ struct mqd_update_info *minfo)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
@@ -605,7 +606,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
}
- mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
+ mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
/*
* check active state vs. the previous state and modify
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c8719682c4da..499fc0ea387f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -93,7 +93,7 @@ struct device_queue_manager_ops {
struct queue *q);
int (*update_queue)(struct device_queue_manager *dqm,
- struct queue *q);
+ struct queue *q, struct mqd_update_info *minfo);
int (*register_process)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 12d91e53556c..543e7ea75593 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -231,7 +231,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_gpu_reset(dev->kgd);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
return;
}
break;
@@ -253,7 +253,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_gpu_reset(dev->kgd);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index a2b77d1df854..64b4ac339904 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -136,7 +136,6 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
- prop.cu_mask = NULL;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4a16e3c257b9..6d8634e40b3b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -20,7 +20,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include <linux/types.h>
#include <linux/hmm.h>
#include <linux/dma-direction.h>
@@ -34,6 +33,11 @@
#include "kfd_svm.h"
#include "kfd_migrate.h"
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__
+
static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
{
@@ -151,14 +155,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
}
if (r) {
- pr_debug("failed %d to create gart mapping\n", r);
+ dev_err(adev->dev, "fail %d create gart mapping\n", r);
goto out_unlock;
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
NULL, &next, false, true, false);
if (r) {
- pr_debug("failed %d to copy memory\n", r);
+ dev_err(adev->dev, "fail %d to copy memory\n", r);
goto out_unlock;
}
@@ -264,6 +268,19 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
+{
+ unsigned long cpages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ cpages++;
+ }
+ return cpages;
+}
+
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
@@ -285,7 +302,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
r = svm_range_vram_node_new(adev, prange, true);
if (r) {
- pr_debug("failed %d get 0x%llx pages from vram\n", r, npages);
+ dev_err(adev->dev, "fail %d to alloc vram\n", r);
goto out;
}
@@ -305,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
if (r) {
- pr_debug("failed %d dma_map_page\n", r);
+ dev_err(adev->dev, "fail %d dma_map_page\n", r);
goto out_free_vram_pages;
}
} else {
@@ -325,8 +342,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
continue;
}
- pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
- src[i] >> PAGE_SHIFT, page_to_pfn(spage));
+ pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
+ src[i] >> PAGE_SHIFT, page_to_pfn(spage));
if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
r = svm_migrate_copy_memory_gart(adev, src + i - j,
@@ -372,7 +389,7 @@ out:
return r;
}
-static int
+static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end)
@@ -381,6 +398,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
+ unsigned long cpages = 0;
dma_addr_t *scratch;
size_t size;
void *buf;
@@ -405,23 +423,31 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
r = migrate_vma_setup(&migrate);
if (r) {
- pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange->start, prange->last);
+ dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
+ prange->start, prange->last);
goto out_free;
}
- if (migrate.cpages != npages) {
- pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
- migrate.cpages,
- npages);
- }
- if (migrate.cpages) {
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
- scratch);
- migrate_vma_pages(&migrate);
- svm_migrate_copy_done(adev, mfence);
- migrate_vma_finalize(&migrate);
+ cpages = migrate.cpages;
+ if (!cpages) {
+ pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
+ prange->start, prange->last);
+ goto out_free;
}
+ if (cpages != npages)
+ pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ cpages, npages);
+ else
+ pr_debug("0x%lx pages migrated\n", cpages);
+
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ migrate_vma_pages(&migrate);
+
+ pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
+
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
svm_range_free_dma_mappings(prange);
@@ -429,12 +455,13 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
out_free:
kvfree(buf);
out:
- if (!r) {
+ if (!r && cpages) {
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
- WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
- }
+ WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
+ return cpages;
+ }
return r;
}
@@ -456,7 +483,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
- int r = 0;
+ unsigned long cpages = 0;
+ long r = 0;
if (prange->actual_loc == best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
@@ -488,17 +516,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
- if (r) {
- pr_debug("failed to migrate\n");
+ if (r < 0) {
+ pr_debug("failed %ld to migrate\n", r);
break;
+ } else {
+ cpages += r;
}
addr = next;
}
- if (!r)
+ if (cpages)
prange->actual_loc = best_loc;
- return r;
+ return r < 0 ? r : 0;
}
static void svm_migrate_page_free(struct page *page)
@@ -506,7 +536,7 @@ static void svm_migrate_page_free(struct page *page)
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
- pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
+ pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
svm_range_bo_unref(svm_bo);
}
}
@@ -572,12 +602,12 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
r = dma_mapping_error(dev, dst[i]);
if (r) {
- pr_debug("failed %d dma_map_page\n", r);
+ dev_err(adev->dev, "fail %d dma_map_page\n", r);
goto out_oom;
}
- pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n",
- dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
+ pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
+ dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
@@ -599,7 +629,7 @@ out_oom:
return r;
}
-static int
+static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
{
@@ -607,6 +637,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
+ unsigned long cpages = 0;
dma_addr_t *scratch;
size_t size;
void *buf;
@@ -631,34 +662,43 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
r = migrate_vma_setup(&migrate);
if (r) {
- pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange->start, prange->last);
+ dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
+ prange->start, prange->last);
goto out_free;
}
- pr_debug("cpages %ld\n", migrate.cpages);
-
- if (migrate.cpages) {
- r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
- scratch, npages);
- migrate_vma_pages(&migrate);
- svm_migrate_copy_done(adev, mfence);
- migrate_vma_finalize(&migrate);
- } else {
+ cpages = migrate.cpages;
+ if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
+ goto out_free;
}
+ if (cpages != npages)
+ pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ cpages, npages);
+ else
+ pr_debug("0x%lx pages migrated\n", cpages);
+ r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+ scratch, npages);
+ migrate_vma_pages(&migrate);
+
+ pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
+
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
out:
- if (!r) {
+ if (!r && cpages) {
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
- WRITE_ONCE(pdd->page_out,
- pdd->page_out + migrate.cpages);
+ WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
+
+ return cpages;
}
return r;
}
@@ -680,7 +720,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
unsigned long addr;
unsigned long start;
unsigned long end;
- int r = 0;
+ unsigned long cpages = 0;
+ long r = 0;
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
@@ -711,18 +752,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
- if (r) {
- pr_debug("failed %d to migrate\n", r);
+ if (r < 0) {
+ pr_debug("failed %ld to migrate\n", r);
break;
+ } else {
+ cpages += r;
}
addr = next;
}
- if (!r) {
+ if (cpages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}
- return r;
+
+ return r < 0 ? r : 0;
}
/**
@@ -901,8 +945,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
/* Disable SVM support capability */
pgmap->type = 0;
- devm_release_mem_region(adev->dev, res->start,
- res->end - res->start + 1);
+ devm_release_mem_region(adev->dev, res->start, resource_size(res));
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 6e6918ccedfd..965e17c5dbb4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -80,7 +80,8 @@ struct mqd_manager {
struct mm_struct *mms);
void (*update_mqd)(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q);
+ struct queue_properties *q,
+ struct mqd_update_info *minfo);
int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 064914e1e8d6..8128f4d312f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -42,16 +42,17 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct cik_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -135,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -152,7 +153,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static void free_mqd(struct mqd_manager *mm, void *mqd,
@@ -185,7 +186,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q, unsigned int atc_bit)
+ struct queue_properties *q, struct mqd_update_info *minfo,
+ unsigned int atc_bit)
{
struct cik_mqd *m;
@@ -214,16 +216,17 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, 1);
+ __update_mqd(mm, mqd, q, minfo, 1);
}
static uint32_t read_doorbell_id(void *mqd)
@@ -234,13 +237,15 @@ static uint32_t read_doorbell_id(void *mqd)
}
static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, 0);
+ __update_mqd(mm, mqd, q, minfo, 0);
}
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct cik_sdma_rlc_registers *m;
@@ -318,7 +323,8 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
}
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct cik_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index c7fb59ca597f..270160fc401b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -42,16 +42,17 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct v10_compute_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -136,7 +137,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -162,7 +163,8 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v10_compute_mqd *m;
@@ -218,7 +220,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -311,7 +313,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -326,7 +328,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v10_sdma_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 7f4e102ff4bd..4e5932f54b5a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -43,16 +43,17 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -188,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -212,7 +213,8 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v9_mqd *m;
@@ -269,7 +271,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -366,7 +368,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -381,7 +383,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v9_sdma_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 33dbd22d290f..cd9220eb8a7a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -45,16 +45,17 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct vi_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -150,7 +151,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -167,8 +168,8 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q, unsigned int mtype,
- unsigned int atc_bit)
+ struct queue_properties *q, struct mqd_update_info *minfo,
+ unsigned int mtype, unsigned int atc_bit)
{
struct vi_mqd *m;
@@ -230,7 +231,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -238,9 +239,10 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+ __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1);
}
static uint32_t read_doorbell_id(void *mqd)
@@ -251,9 +253,10 @@ static uint32_t read_doorbell_id(void *mqd)
}
static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+ __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
@@ -317,9 +320,10 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
}
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+ __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -336,7 +340,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -349,7 +353,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
}
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct vi_sdma_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6d8f9bb2d905..4104b167e721 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -472,9 +472,6 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
- /* Relevant for CU */
- uint32_t cu_mask_count; /* Must be a multiple of 32 */
- uint32_t *cu_mask;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
@@ -482,6 +479,20 @@ struct queue_properties {
(q).queue_percent > 0 && \
!(q).is_evicted)
+enum mqd_update_flag {
+ UPDATE_FLAG_CU_MASK = 0,
+};
+
+struct mqd_update_info {
+ union {
+ struct {
+ uint32_t count; /* Must be a multiple of 32 */
+ uint32_t *ptr;
+ } cu_mask;
+ };
+ enum mqd_update_flag update_flag;
+};
+
/**
* struct queue
*
@@ -608,12 +619,14 @@ struct qcm_process_device {
uint32_t sh_hidden_private_base;
/* CWSR memory */
+ struct kgd_mem *cwsr_mem;
void *cwsr_kaddr;
uint64_t cwsr_base;
uint64_t tba_addr;
uint64_t tma_addr;
/* IB memory */
+ struct kgd_mem *ib_mem;
uint64_t ib_base;
void *ib_kaddr;
@@ -808,6 +821,7 @@ struct kfd_process {
/* Event ID allocator and lookup */
struct idr event_idr;
/* Event page */
+ u64 signal_handle;
struct kfd_signal_page *signal_page;
size_t signal_mapped_size;
size_t signal_event_count;
@@ -1031,10 +1045,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
unsigned int *qid,
uint32_t *p_doorbell_offset_in_process);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
-int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
- struct queue_properties *p);
-int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
+ struct mqd_update_info *minfo);
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 21ec8a18cad2..457863861d6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -72,6 +72,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
+static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
+
struct kfd_procfs_tree {
struct kobject *kobj;
};
@@ -685,10 +687,15 @@ void kfd_process_destroy_wq(void)
}
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
- struct kfd_process_device *pdd)
+ struct kfd_process_device *pdd, void *kptr)
{
struct kfd_dev *dev = pdd->dev;
+ if (kptr) {
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem);
+ kptr = NULL;
+ }
+
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
NULL);
@@ -702,63 +709,46 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
*/
static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
uint64_t gpu_va, uint32_t size,
- uint32_t flags, void **kptr)
+ uint32_t flags, struct kgd_mem **mem, void **kptr)
{
struct kfd_dev *kdev = pdd->dev;
- struct kgd_mem *mem = NULL;
- int handle;
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
- pdd->drm_priv, &mem, NULL, flags);
+ pdd->drm_priv, mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem,
pdd->drm_priv, NULL);
if (err)
goto err_map_mem;
- err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
- /* Create an obj handle so kfd_process_device_remove_obj_handle
- * will take care of the bo removal when the process finishes.
- * We do not need to take p->mutex, because the process is just
- * created and the ioctls have not had the chance to run.
- */
- handle = kfd_process_device_create_obj_handle(pdd, mem);
-
- if (handle < 0) {
- err = handle;
- goto free_gpuvm;
- }
-
if (kptr) {
err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
- (struct kgd_mem *)mem, kptr, NULL);
+ (struct kgd_mem *)*mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
- goto free_obj_handle;
+ goto sync_memory_failed;
}
}
return err;
-free_obj_handle:
- kfd_process_device_remove_obj_handle(pdd, handle);
-free_gpuvm:
sync_memory_failed:
- kfd_process_free_gpuvm(mem, pdd);
- return err;
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv);
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv,
NULL);
err_alloc_mem:
+ *mem = NULL;
*kptr = NULL;
return err;
}
@@ -776,6 +766,7 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
+ struct kgd_mem *mem;
void *kaddr;
int ret;
@@ -784,15 +775,26 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
/* ib_base is only set for dGPU */
ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
- &kaddr);
+ &mem, &kaddr);
if (ret)
return ret;
+ qpd->ib_mem = mem;
qpd->ib_kaddr = kaddr;
return 0;
}
+static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
+{
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ if (!qpd->ib_kaddr || !qpd->ib_base)
+ return;
+
+ kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
+}
+
struct kfd_process *kfd_create_process(struct file *filep)
{
struct kfd_process *process;
@@ -947,6 +949,37 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
}
}
+/*
+ * Just kunmap and unpin signal BO here. It will be freed in
+ * kfd_process_free_outstanding_kfd_bos()
+ */
+static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_dev *kdev;
+ void *mem;
+
+ kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
+ if (!kdev)
+ return;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(kdev, p);
+ if (!pdd)
+ goto out;
+
+ mem = kfd_process_device_translate_handle(
+ pdd, GET_IDR_HANDLE(p->signal_handle));
+ if (!mem)
+ goto out;
+
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem);
+
+out:
+ mutex_unlock(&p->mutex);
+}
+
static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
{
int i;
@@ -965,6 +998,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
+ kfd_process_device_destroy_cwsr_dgpu(pdd);
+ kfd_process_device_destroy_ib_mem(pdd);
+
if (pdd->drm_file) {
amdgpu_amdkfd_gpuvm_release_process_vm(
pdd->dev->kgd, pdd->drm_priv);
@@ -1049,9 +1085,11 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+
kfd_process_remove_sysfs(p);
kfd_iommu_unbind_process(p);
+ kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
svm_range_list_fini(p);
@@ -1198,6 +1236,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
+ struct kgd_mem *mem;
void *kaddr;
int ret;
@@ -1206,10 +1245,11 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
/* cwsr_base is only set for dGPU */
ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
- KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
+ KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
if (ret)
return ret;
+ qpd->cwsr_mem = mem;
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
@@ -1222,6 +1262,17 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
return 0;
}
+static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
+ return;
+
+ kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
+}
+
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 243dd1efcdbf..3627e7ac161b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -121,7 +121,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q);
+ pqn->q, NULL);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
@@ -394,8 +394,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
pdd->qpd.num_gws = 0;
}
- kfree(pqn->q->properties.cu_mask);
- pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
}
@@ -411,8 +409,8 @@ err_destroy_queue:
return retval;
}
-int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
- struct queue_properties *p)
+int pqm_update_queue_properties(struct process_queue_manager *pqm,
+ unsigned int qid, struct queue_properties *p)
{
int retval;
struct process_queue_node *pqn;
@@ -429,15 +427,15 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
pqn->q->properties.priority = p->priority;
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q);
+ pqn->q, NULL);
if (retval != 0)
return retval;
return 0;
}
-int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
- struct queue_properties *p)
+int pqm_update_mqd(struct process_queue_manager *pqm,
+ unsigned int qid, struct mqd_update_info *minfo)
{
int retval;
struct process_queue_node *pqn;
@@ -448,16 +446,8 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
return -EFAULT;
}
- /* Free the old CU mask memory if it is already allocated, then
- * allocate memory for the new CU mask.
- */
- kfree(pqn->q->properties.cu_mask);
-
- pqn->q->properties.cu_mask_count = p->cu_mask_count;
- pqn->q->properties.cu_mask = p->cu_mask;
-
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q);
+ pqn->q, minfo);
if (retval != 0)
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d0f65a90002..b691c8495d66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -33,6 +33,11 @@
#include "kfd_svm.h"
#include "kfd_migrate.h"
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
+
#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
/* Long enough to ensure no retry fault comes after svm range is restored and
@@ -45,7 +50,9 @@ static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
-
+static int
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+ uint64_t *bo_s, uint64_t *bo_l);
static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
.invalidate = svm_range_cpu_invalidate_pagetables,
};
@@ -158,17 +165,17 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
bo_adev->vm_manager.vram_base_offset -
bo_adev->kfd.dev->pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
- pr_debug("vram address detected: 0x%llx\n", addr[i]);
+ pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
continue;
}
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
r = dma_mapping_error(dev, addr[i]);
if (r) {
- pr_debug("failed %d dma_map_page\n", r);
+ dev_err(dev, "failed %d dma_map_page\n", r);
return r;
}
- pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
- addr[i] >> PAGE_SHIFT, page_to_pfn(page));
+ pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
+ addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
return 0;
}
@@ -217,7 +224,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
for (i = offset; i < offset + npages; i++) {
if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue;
- pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
+ pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
dma_addr[i] = 0;
}
@@ -1307,7 +1314,7 @@ struct svm_validate_context {
struct svm_range *prange;
bool intr;
unsigned long bitmap[MAX_GPU_INSTANCE];
- struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1];
+ struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
struct list_head validate_list;
struct ww_acquire_ctx ticket;
};
@@ -1334,11 +1341,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
ctx->tv[gpuidx].num_shared = 4;
list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
}
- if (ctx->prange->svm_bo && ctx->prange->ttm_res) {
- ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo;
- ctx->tv[MAX_GPU_INSTANCE].num_shared = 1;
- list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list);
- }
r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
ctx->intr, NULL);
@@ -1459,7 +1461,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
/* This should never happen. actual_loc gets set by
* svm_migrate_ram_to_vram after allocating a BO.
*/
- WARN(1, "VRAM BO missing during validation\n");
+ WARN_ONCE(1, "VRAM BO missing during validation\n");
return -EINVAL;
}
@@ -1552,7 +1554,7 @@ unreserve_out:
* Context: Returns with mmap write lock held, pending deferred work flushed
*
*/
-static void
+void
svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
struct mm_struct *mm)
{
@@ -2308,6 +2310,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
+
static int
svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
unsigned long *start, unsigned long *last)
@@ -2355,8 +2358,59 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
vma->vm_end >> PAGE_SHIFT, *last);
return 0;
+}
+static int
+svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
+ uint64_t *bo_s, uint64_t *bo_l)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct interval_tree_node *node;
+ struct amdgpu_bo *bo = NULL;
+ unsigned long userptr;
+ uint32_t i;
+ int r;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct amdgpu_vm *vm;
+
+ if (!p->pdds[i]->drm_priv)
+ continue;
+
+ vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ /* Check userptr by searching entire vm->va interval tree */
+ node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
+ while (node) {
+ mapping = container_of((struct rb_node *)node,
+ struct amdgpu_bo_va_mapping, rb);
+ bo = mapping->bo_va->base.bo;
+
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
+ start << PAGE_SHIFT,
+ last << PAGE_SHIFT,
+ &userptr)) {
+ node = interval_tree_iter_next(node, 0, ~0ULL);
+ continue;
+ }
+
+ pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
+ start, last);
+ if (bo_s && bo_l) {
+ *bo_s = userptr >> PAGE_SHIFT;
+ *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ return -EADDRINUSE;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ }
+ return 0;
}
+
static struct
svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct kfd_process *p,
@@ -2366,10 +2420,26 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct svm_range *prange = NULL;
unsigned long start, last;
uint32_t gpuid, gpuidx;
+ uint64_t bo_s = 0;
+ uint64_t bo_l = 0;
+ int r;
if (svm_range_get_range_boundaries(p, addr, &start, &last))
return NULL;
+ r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
+ if (r != -EADDRINUSE)
+ r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
+
+ if (r == -EADDRINUSE) {
+ if (addr >= bo_s && addr <= bo_l)
+ return NULL;
+
+ /* Create one page svm range if 2MB range overlapping */
+ start = addr;
+ last = addr;
+ }
+
prange = svm_range_new(&p->svms, start, last);
if (!prange) {
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
@@ -2668,8 +2738,67 @@ int svm_range_list_init(struct kfd_process *p)
}
/**
+ * svm_range_check_vm - check if virtual address range mapped already
+ * @p: current kfd_process
+ * @start: range start address, in pages
+ * @last: range last address, in pages
+ * @bo_s: mapping start address in pages if address range already mapped
+ * @bo_l: mapping last address in pages if address range already mapped
+ *
+ * The purpose is to avoid virtual address ranges already allocated by
+ * kfd_ioctl_alloc_memory_of_gpu ioctl.
+ * It looks for each pdd in the kfd_process.
+ *
+ * Context: Process context
+ *
+ * Return 0 - OK, if the range is not mapped.
+ * Otherwise error code:
+ * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
+ * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static int
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+ uint64_t *bo_s, uint64_t *bo_l)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct interval_tree_node *node;
+ uint32_t i;
+ int r;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct amdgpu_vm *vm;
+
+ if (!p->pdds[i]->drm_priv)
+ continue;
+
+ vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ node = interval_tree_iter_first(&vm->va, start, last);
+ if (node) {
+ pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
+ start, last);
+ mapping = container_of((struct rb_node *)node,
+ struct amdgpu_bo_va_mapping, rb);
+ if (bo_s && bo_l) {
+ *bo_s = mapping->start;
+ *bo_l = mapping->last;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ return -EADDRINUSE;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ }
+
+ return 0;
+}
+
+/**
* svm_range_is_valid - check if virtual address range is valid
- * @mm: current process mm_struct
+ * @p: current kfd_process
* @start: range start address, in pages
* @size: range size, in pages
*
@@ -2678,28 +2807,28 @@ int svm_range_list_init(struct kfd_process *p)
* Context: Process context
*
* Return:
- * true - valid svm range
- * false - invalid svm range
+ * 0 - OK, otherwise error code
*/
-static bool
-svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size)
+static int
+svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
{
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
struct vm_area_struct *vma;
unsigned long end;
+ unsigned long start_unchg = start;
start <<= PAGE_SHIFT;
end = start + (size << PAGE_SHIFT);
-
do {
- vma = find_vma(mm, start);
+ vma = find_vma(p->mm, start);
if (!vma || start < vma->vm_start ||
(vma->vm_flags & device_vma))
- return false;
+ return -EFAULT;
start = min(end, vma->vm_end);
} while (start < end);
- return true;
+ return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
+ NULL);
}
/**
@@ -3002,9 +3131,9 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svm_range_list_lock_and_flush_work(svms, mm);
- if (!svm_range_is_valid(mm, start, size)) {
- pr_debug("invalid range\n");
- r = -EFAULT;
+ r = svm_range_is_valid(p, start, size);
+ if (r) {
+ pr_debug("invalid range r=%d\n", r);
mmap_write_unlock(mm);
goto out;
}
@@ -3106,6 +3235,7 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t flags_or = 0;
int gpuidx;
uint32_t i;
+ int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);
@@ -3119,12 +3249,12 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
flush_work(&p->svms.deferred_list_work);
mmap_read_lock(mm);
- if (!svm_range_is_valid(mm, start, size)) {
- pr_debug("invalid range\n");
- mmap_read_unlock(mm);
- return -EINVAL;
- }
+ r = svm_range_is_valid(p, start, size);
mmap_read_unlock(mm);
+ if (r) {
+ pr_debug("invalid range r=%d\n", r);
+ return r;
+ }
for (i = 0; i < nattr; i++) {
switch (attrs[i].type) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c6ec55354c7b..6dc91c33e80f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -188,6 +188,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
void *owner);
struct kfd_process_device *
svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
+void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
/* SVM API and HMM page migration work together, device memory type
* is initialized to not 0 when page migration register device memory.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 98cca5f2b27f..dd593ad0614a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1296,6 +1296,24 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
+ adev = (struct amdgpu_device *)(gpu->kgd);
+
+ /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
+ if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) {
+ struct kfd_topology_device *top_dev;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list) {
+ if (top_dev->gpu)
+ break;
+
+ top_dev->node_props.hive_id = gpu->hive_id;
+ }
+
+ up_read(&topology_lock);
+ }
+
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
* else create a Virtual CRAT for this gpu device and then parse that
@@ -1457,7 +1475,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.max_waves_per_simd = 10;
}
- adev = (struct amdgpu_device *)(dev->gpu->kgd);
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |=
((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1ea31dcc7a8b..43e983e42c0f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -29,6 +29,7 @@
#include "dm_services_types.h"
#include "dc.h"
#include "dc_link_dp.h"
+#include "link_enc_cfg.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "dmub/dmub_srv.h"
@@ -215,6 +216,8 @@ static void handle_cursor_update(struct drm_plane *plane,
static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -618,6 +621,121 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
}
#endif
+/**
+ * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *hpd_aconnector = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ uint8_t link_index = 0;
+ struct drm_device *dev = adev->dm.ddev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ link_index = notify->link_index;
+
+ link = adev->dm.dc->links[link_index];
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ hpd_aconnector = aconnector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ if (hpd_aconnector)
+ handle_hpd_irq_helper(hpd_aconnector);
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
+dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+
+ kfree(dmub_hpd_wrk->dmub_notify);
+ kfree(dmub_hpd_wrk);
+
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -634,22 +752,47 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
+ struct dc_link *plink = NULL;
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
- if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
- do {
- dc_stat_get_dmub_notification(adev->dm.dc, &notify);
- } while (notify.pending_notification);
-
- if (adev->dm.dmub_notify)
- memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
- if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
- complete(&adev->dm.dmub_aux_transfer_done);
- // TODO : HPD Implementation
+ if (dc_enable_dmub_notifications(adev->dm.dc) &&
+ irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
- } else {
- DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
- }
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+ if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
+ if (!dmub_hpd_wrk->dmub_notify) {
+ kfree(dmub_hpd_wrk);
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+ if (dmub_hpd_wrk->dmub_notify)
+ memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
+ dmub_hpd_wrk->adev = adev;
+ if (notify.type == DMUB_NOTIFICATION_HPD) {
+ plink = adev->dm.dc->links[notify.link_index];
+ if (plink) {
+ plink->hpd_status =
+ notify.hpd_status ==
+ DP_HPD_PLUG ? true : false;
+ }
+ }
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
+ } while (notify.pending_notification);
}
@@ -667,7 +810,8 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
- ASSERT(count <= DMUB_TRACE_MAX_READ);
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
}
#endif
@@ -873,6 +1017,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
const unsigned char *fw_inst_const, *fw_bss_data;
uint32_t i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
+ struct dc *dc = adev->dm.dc;
if (!dmub_srv)
/* DMUB isn't supported on the ASIC. */
@@ -959,6 +1104,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
for (i = 0; i < fb_info->num_fb; ++i)
hw_params.fb[i] = &fb_info->fb[i];
+ switch (adev->asic_type) {
+ case CHIP_YELLOW_CARP:
+ if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
+ hw_params.dpia_supported = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
+#endif
+ }
+ break;
+ default:
+ break;
+ }
+
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error initializing DMUB HW: %d\n", status);
@@ -1083,6 +1241,114 @@ static void vblank_control_worker(struct work_struct *work)
}
#endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+ dc_link_dp_handle_automated_test(dc_link);
+ else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ dc_link_dp_handle_link_loss(dc_link);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ return NULL;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+}
+
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1138,17 +1404,27 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- init_data.flags.gpu_vm_support = true;
- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
- init_data.flags.disable_dmcu = true;
- break;
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
init_data.flags.gpu_vm_support = true;
break;
default:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ init_data.flags.gpu_vm_support = true;
+ init_data.flags.disable_dmcu = true;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ init_data.flags.gpu_vm_support = true;
+ break;
+ case IP_VERSION(2, 0, 3):
+ init_data.flags.disable_dmcu = true;
+ break;
+ default:
+ break;
+ }
break;
}
@@ -1184,6 +1460,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
@@ -1202,6 +1480,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
@@ -1233,7 +1517,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
+ if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
@@ -1254,7 +1538,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
amdgpu_dm_outbox_init(adev);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ goto error;
+ }
+#endif
}
if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -1336,6 +1638,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
}
if (adev->dm.dmub_bo)
@@ -1343,6 +1647,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
+ if (adev->dm.hpd_rx_offload_wq) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -1396,15 +1712,6 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_RENOIR:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1418,6 +1725,21 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
break;
default:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ return 0;
+ default:
+ break;
+ }
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -1496,35 +1818,37 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
enum dmub_status status;
int r;
- switch (adev->asic_type) {
- case CHIP_RENOIR:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
- case CHIP_SIENNA_CICHLID:
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
- break;
- case CHIP_NAVY_FLOUNDER:
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ } else {
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ }
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(3, 0, 1):
dmub_asic = DMUB_ASIC_DCN301;
fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(3, 0, 2):
dmub_asic = DMUB_ASIC_DCN302;
fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(3, 0, 3):
dmub_asic = DMUB_ASIC_DCN303;
fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
- case CHIP_YELLOW_CARP:
- dmub_asic = DMUB_ASIC_DCN31;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
@@ -1823,10 +2147,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
* therefore, this function apply to navi10/12/14 but not Renoir
* *
*/
- switch(adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
break;
default:
return 0;
@@ -1980,6 +2303,16 @@ context_alloc_fail:
return res;
}
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -2001,6 +2334,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
+ hpd_rx_irq_work_suspend(dm);
+
return ret;
}
@@ -2011,6 +2346,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
+ hpd_rx_irq_work_suspend(dm);
+
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -2157,7 +2494,7 @@ cleanup:
return;
}
-static void dm_set_dpms_off(struct dc_link *link)
+static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
{
struct dc_stream_state *stream_state;
struct amdgpu_dm_connector *aconnector = link->priv;
@@ -2178,6 +2515,7 @@ static void dm_set_dpms_off(struct dc_link *link)
}
stream_update.stream = stream_state;
+ acrtc_state->force_dpms_off = true;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
stream_state, &stream_update,
stream_state->ctx->dc->current_state);
@@ -2615,20 +2953,22 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(sink);
}
-static void handle_hpd_irq(void *param)
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{
- struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
+ struct dm_crtc_state *dm_crtc_state = NULL;
if (adev->dm.disable_hpd_irq)
return;
+ if (dm_con_state->base.state && dm_con_state->base.crtc)
+ dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
+ dm_con_state->base.state,
+ dm_con_state->base.crtc));
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
@@ -2650,7 +2990,6 @@ static void handle_hpd_irq(void *param)
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
-
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
@@ -2660,8 +2999,9 @@ static void handle_hpd_irq(void *param)
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
- aconnector->dc_link->type == dc_connection_none)
- dm_set_dpms_off(aconnector->dc_link);
+ aconnector->dc_link->type == dc_connection_none &&
+ dm_crtc_state)
+ dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -2676,7 +3016,15 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
@@ -2754,6 +3102,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
static void handle_hpd_rx_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
@@ -2765,14 +3132,16 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data;
- bool lock_flag = 0;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = aconnector->base.index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
if (adev->dm.disable_hpd_irq)
return;
-
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -2780,43 +3149,41 @@ static void handle_hpd_rx_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
- read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
- if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- (dc_link->type == dc_connection_mst_branch)) {
- if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
- result = true;
- dm_handle_hpd_rx_irq(aconnector);
- goto out;
- } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- result = false;
- dm_handle_hpd_rx_irq(aconnector);
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg(aconnector);
goto out;
}
- }
- /*
- * TODO: We need the lock to avoid touching DC state while it's being
- * modified during automated compliance testing, or when link loss
- * happens. While this should be split into subhandlers and proper
- * interfaces to avoid having to conditionally lock like this in the
- * outer layer, we need this workaround temporarily to allow MST
- * lightup in some scenarios to avoid timeout.
- */
- if (!amdgpu_in_reset(adev) &&
- (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
- hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
- mutex_lock(&adev->dm.dc_lock);
- lock_flag = 1;
- }
+ if (link_loss) {
+ bool skip = false;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
-#else
- result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
-#endif
- if (!amdgpu_in_reset(adev) && lock_flag)
- mutex_unlock(&adev->dm.dc_lock);
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
out:
if (result && !is_mst_root_connector) {
@@ -2901,6 +3268,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
}
@@ -2998,7 +3369,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
- if (adev->asic_type >= CHIP_VEGA10)
+ if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -3715,6 +4086,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
dm->display_indexes_num = dm->dc->caps.max_streams;
/* Update the actual used number of crtc */
@@ -3783,18 +4155,32 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Use Outbox interrupt */
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_YELLOW_CARP:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(2, 1, 0):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
+ DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
+ adev->ip_versions[DCE_HWIP][0]);
+ }
+
+ /* Determine whether to enable PSR support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ psr_feature_enabled = true;
+ break;
+ default:
+ psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
+ break;
+ }
}
#endif
@@ -3839,7 +4225,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
- if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+
+ if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
}
@@ -3880,27 +4267,33 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
+ default:
#if defined(CONFIG_DRM_AMD_DC_DCN)
- case CHIP_RAVEN:
- case CHIP_NAVI12:
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_RENOIR:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ adev->ip_versions[DCE_HWIP][0]);
goto fail;
}
- break;
#endif
- default:
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
- goto fail;
+ break;
}
return 0;
@@ -4047,42 +4440,44 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
+ default:
#if defined(CONFIG_DRM_AMD_DC_DCN)
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_VANGOGH:
- adev->mode_info.num_crtc = 4;
- adev->mode_info.num_hpd = 4;
- adev->mode_info.num_dig = 4;
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- adev->mode_info.num_crtc = 6;
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_YELLOW_CARP:
- adev->mode_info.num_crtc = 4;
- adev->mode_info.num_hpd = 4;
- adev->mode_info.num_dig = 4;
- break;
- case CHIP_NAVI14:
- case CHIP_DIMGREY_CAVEFISH:
- adev->mode_info.num_crtc = 5;
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- case CHIP_BEIGE_GOBY:
- adev->mode_info.num_crtc = 2;
- adev->mode_info.num_hpd = 2;
- adev->mode_info.num_dig = 2;
- break;
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(3, 0, 0):
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(3, 0, 3):
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ adev->ip_versions[DCE_HWIP][0]);
+ return -EINVAL;
+ }
#endif
- default:
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
- return -EINVAL;
+ break;
}
amdgpu_dm_set_irq_funcs(adev);
@@ -4301,12 +4696,7 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
tiling_info->gfx9.num_rb_per_se =
adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
tiling_info->gfx9.shaderEnable = 1;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER ||
- adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
- adev->asic_type == CHIP_BEIGE_GOBY ||
- adev->asic_type == CHIP_YELLOW_CARP ||
- adev->asic_type == CHIP_VANGOGH)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
@@ -4672,6 +5062,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
@@ -4682,6 +5082,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
@@ -4726,7 +5137,7 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u
case AMDGPU_FAMILY_NV:
case AMDGPU_FAMILY_VGH:
case AMDGPU_FAMILY_YC:
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
add_gfx10_3_modifiers(adev, mods, &size, &capacity);
else
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
@@ -4763,10 +5174,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
if (modifier_has_dcc(modifier) && !force_disable_dcc) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
+ bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
dcc->enable = 1;
dcc->meta_pitch = afb->base.pitches[1];
- dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ dcc->independent_64b_blks = independent_64b_blks;
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+ if (independent_64b_blks && independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
+ else if (independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_128b;
+ else if (independent_64b_blks && !independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ } else {
+ if (independent_64b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ }
address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
@@ -5602,9 +6030,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
{
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
+ uint32_t max_dsc_target_bpp_limit_override = 0;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
+
+ if (stream->link && stream->link->local_sink)
+ max_dsc_target_bpp_limit_override =
+ stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
+
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
@@ -5614,7 +6048,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5654,7 +6088,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
* - Cinema HFR (48 FPS)
* - TV/PAL (50 FPS)
* - Commonly used (60 FPS)
- * - Multiples of 24 (48,72,96 FPS)
+ * - Multiples of 24 (48,72,96,120 FPS)
*
* The list of standards video format is not huge and can be added to the
* connector modeset list beforehand. With that, userspace can leverage
@@ -5965,6 +6399,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+ state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
@@ -7615,19 +8050,19 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
/* Standard FPS values
*
- * 23.976 - TV/NTSC
- * 24 - Cinema
- * 25 - TV/PAL
- * 29.97 - TV/NTSC
- * 30 - TV/NTSC
- * 48 - Cinema HFR
- * 50 - TV/PAL
- * 60 - Commonly used
- * 48,72,96 - Multiples of 24
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96,120 - Multiples of 24
*/
static const uint32_t common_rates[] = {
23976, 24000, 25000, 29970, 30000,
- 48000, 50000, 60000, 72000, 96000
+ 48000, 50000, 60000, 72000, 96000, 120000
};
/*
@@ -7755,7 +8190,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- aconnector->base.ycbcr_420_allowed =
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link->link_enc =
+ link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (!link->link_enc)
+ link->link_enc =
+ link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ }
+
+ if (link->link_enc)
+ aconnector->base.ycbcr_420_allowed =
link->link_enc->features.dp_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
@@ -7870,7 +8315,8 @@ create_i2c(struct ddc_service *ddc_service,
snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
i2c_set_adapdata(&i2c->base, i2c);
i2c->ddc_service = ddc_service;
- i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
+ if (i2c->ddc_service->ddc_pin)
+ i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
return i2c;
}
@@ -8681,7 +9127,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* and rely on sending it from software.
*/
if (acrtc_attach->base.state->event &&
- acrtc_state->active_planes > 0) {
+ acrtc_state->active_planes > 0 &&
+ !acrtc_state->force_dpms_off) {
drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
@@ -10158,18 +10605,18 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
{
- struct drm_plane_state *new_cursor_state, *new_primary_state;
- int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
+ struct drm_plane *cursor = crtc->cursor, *underlying;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the primary plane's. */
+ * blending properties match the underlying planes'. */
- new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
- new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
- if (!new_cursor_state || !new_primary_state ||
- !new_cursor_state->fb || !new_primary_state->fb) {
+ new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+ if (!new_cursor_state || !new_cursor_state->fb) {
return 0;
}
@@ -10178,15 +10625,34 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
cursor_scale_h = new_cursor_state->crtc_h * 1000 /
(new_cursor_state->src_h >> 16);
- primary_scale_w = new_primary_state->crtc_w * 1000 /
- (new_primary_state->src_w >> 16);
- primary_scale_h = new_primary_state->crtc_h * 1000 /
- (new_primary_state->src_h >> 16);
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ continue;
+
+ /* Ignore disabled planes */
+ if (!new_underlying_state->fb)
+ continue;
- if (cursor_scale_w != primary_scale_w ||
- cursor_scale_h != primary_scale_h) {
- drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
- return -EINVAL;
+ underlying_scale_w = new_underlying_state->crtc_w * 1000 /
+ (new_underlying_state->src_w >> 16);
+ underlying_scale_h = new_underlying_state->crtc_h * 1000 /
+ (new_underlying_state->src_h >> 16);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+ drm_dbg_atomic(crtc->dev,
+ "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
+ cursor->base.id, cursor->name, underlying->base.id, underlying->name);
+ return -EINVAL;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (new_underlying_state->crtc_x <= 0 &&
+ new_underlying_state->crtc_y <= 0 &&
+ new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
+ new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
+ break;
}
return 0;
@@ -10217,53 +10683,6 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
#endif
-static int validate_overlay(struct drm_atomic_state *state)
-{
- int i;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct drm_plane_state *primary_state, *overlay_state = NULL;
-
- /* Check if primary plane is contained inside overlay */
- for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
- if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
- if (drm_atomic_plane_disabling(plane->state, new_plane_state))
- return 0;
-
- overlay_state = new_plane_state;
- continue;
- }
- }
-
- /* check if we're making changes to the overlay plane */
- if (!overlay_state)
- return 0;
-
- /* check if overlay plane is enabled */
- if (!overlay_state->crtc)
- return 0;
-
- /* find the primary plane for the CRTC that the overlay is enabled on */
- primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
- if (IS_ERR(primary_state))
- return PTR_ERR(primary_state);
-
- /* check if primary plane is enabled */
- if (!primary_state->crtc)
- return 0;
-
- /* Perform the bounds check to ensure the overlay plane covers the primary */
- if (primary_state->crtc_x < overlay_state->crtc_x ||
- primary_state->crtc_y < overlay_state->crtc_y ||
- primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
- primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
- DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -10306,6 +10725,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_topology_mgr *mgr;
#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -10445,10 +10866,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = validate_overlay(state);
- if (ret)
- goto fail;
-
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -10514,6 +10931,33 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ if (!mgr->mst_state )
+ continue;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ int id = connector->index;
+
+ if (id == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ }
+#endif
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through
@@ -10821,6 +11265,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -10832,28 +11277,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
goto update;
}
- if (!edid) {
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
dm_con_state = to_dm_connector_state(connector->state);
amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
goto update;
}
dm_con_state = to_dm_connector_state(connector->state);
- if (!amdgpu_dm_connector->dc_sink) {
- DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
- goto update;
- }
if (!adev->dm.freesync_module)
goto update;
- if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false;
if (edid) {
@@ -10900,7 +11348,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
freesync_capable = true;
}
}
- } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
timing = &edid->detailed_timings[i];
@@ -10982,29 +11430,75 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return value;
}
-int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
- struct aux_payload *payload, enum aux_return_code_type *operation_result)
+int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
+ uint8_t status_type, uint32_t *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int return_status = -1;
+ struct dmub_notification *p_notify = adev->dm.dmub_notify;
+
+ if (is_cmd_aux) {
+ if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
+ return_status = p_notify->aux_reply.length;
+ *operation_result = p_notify->result;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
+ *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ } else {
+ *operation_result = AUX_RET_ERROR_UNKNOWN;
+ }
+ } else {
+ if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
+ return_status = 0;
+ *operation_result = p_notify->sc_status;
+ } else {
+ *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ }
+ }
+
+ return return_status;
+}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
+ unsigned int link_index, void *cmd_payload, void *operation_result)
{
struct amdgpu_device *adev = ctx->driver_context;
int ret = 0;
- dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
- ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
+ if (is_cmd_aux) {
+ dc_process_dmub_aux_transfer_async(ctx->dc,
+ link_index, (struct aux_payload *)cmd_payload);
+ } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
+ (struct set_config_cmd_payload *)cmd_payload,
+ adev->dm.dmub_notify)) {
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+ ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
+ (uint32_t *)operation_result);
+ }
+
+ ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
if (ret == 0) {
- *operation_result = AUX_RET_ERROR_TIMEOUT;
- return -1;
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+ ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
+ (uint32_t *)operation_result);
}
- *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
- if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
- (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
+ if (is_cmd_aux) {
+ if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
+ struct aux_payload *payload = (struct aux_payload *)cmd_payload;
- // For read case, Copy data to payload
- if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
- (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
- memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
- adev->dm.dmub_notify->aux_reply.length);
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+ if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
+ payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+ memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
+ adev->dm.dmub_notify->aux_reply.length);
+ }
+ }
}
- return adev->dm.dmub_notify->aux_reply.length;
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+ ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
+ (uint32_t *)operation_result);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d1d353a7c77d..37e61a88d49e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -47,6 +47,15 @@
#define AMDGPU_DM_MAX_CRTC 6
#define AMDGPU_DM_MAX_NUM_EDP 2
+
+#define AMDGPU_DMUB_NOTIFICATION_MAX 5
+
+/**
+ * DMUB Async to Sync Mechanism Status
+ **/
+#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -86,6 +95,21 @@ struct dm_compressor_info {
uint64_t gpu_addr;
};
+typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
+
+/**
+ * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
+ *
+ * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
+ * @dmub_notify: notification for callback function
+ * @adev: amdgpu_device pointer
+ */
+struct dmub_hpd_work {
+ struct work_struct handle_hpd_work;
+ struct dmub_notification *dmub_notify;
+ struct amdgpu_device *adev;
+};
+
/**
* struct vblank_control_work - Work data for vblank control
* @work: Kernel work data for the work event
@@ -155,6 +179,48 @@ struct dal_allocation {
};
/**
+ * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
+ * offload work
+ */
+struct hpd_rx_irq_offload_work_queue {
+ /**
+ * @wq: workqueue structure to queue offload work.
+ */
+ struct workqueue_struct *wq;
+ /**
+ * @offload_lock: To protect fields of offload work queue.
+ */
+ spinlock_t offload_lock;
+ /**
+ * @is_handling_link_loss: Used to prevent inserting link loss event when
+ * we're handling link loss
+ */
+ bool is_handling_link_loss;
+ /**
+ * @aconnector: The aconnector that this work queue is attached to
+ */
+ struct amdgpu_dm_connector *aconnector;
+};
+
+/**
+ * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
+ */
+struct hpd_rx_irq_offload_work {
+ /**
+ * @work: offload work
+ */
+ struct work_struct work;
+ /**
+ * @data: reference irq data which is used while handling offload work
+ */
+ union hpd_irq_data data;
+ /**
+ * @offload_wq: offload work queue that this work is queued to
+ */
+ struct hpd_rx_irq_offload_work_queue *offload_wq;
+};
+
+/**
* struct amdgpu_display_manager - Central amdgpu display manager device
*
* @dc: Display Core control structure
@@ -190,9 +256,31 @@ struct amdgpu_display_manager {
*/
struct dmub_srv *dmub_srv;
+ /**
+ * @dmub_notify:
+ *
+ * Notification from DMUB.
+ */
+
struct dmub_notification *dmub_notify;
/**
+ * @dmub_callback:
+ *
+ * Callback functions to handle notification from DMUB.
+ */
+
+ dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
+
+ /**
+ * @dmub_thread_offload:
+ *
+ * Flag to indicate if callback is offload.
+ */
+
+ bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
+
+ /**
* @dmub_fb_info:
*
* Framebuffer regions for the DMUB.
@@ -422,7 +510,12 @@ struct amdgpu_display_manager {
*/
struct crc_rd_work *crc_rd_wrk;
#endif
-
+ /**
+ * @hpd_rx_offload_wq:
+ *
+ * Work queue to offload works of hpd_rx_irq
+ */
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
/**
* @mst_encoders:
*
@@ -439,6 +532,7 @@ struct amdgpu_display_manager {
*/
struct list_head da_list;
struct completion dmub_aux_transfer_done;
+ struct workqueue_struct *delayed_hpd_wq;
/**
* @brightness:
@@ -542,6 +636,8 @@ struct dm_crtc_state {
bool dsc_force_changed;
bool vrr_supported;
+
+ bool force_dpms_off;
struct mod_freesync_config freesync_config;
struct dc_info_packet vrr_infopacket;
@@ -632,6 +728,7 @@ void amdgpu_dm_update_connector_after_detect(
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
-int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
- struct aux_payload *payload, enum aux_return_code_type *operation_result);
+int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
+ struct dc_context *ctx, unsigned int link_index,
+ void *payload, void *operation_result);
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 87daa78a32b8..3655663e079b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
{
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
@@ -263,7 +264,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -293,6 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
case LINK_RATE_RBR2:
case LINK_RATE_HIGH2:
case LINK_RATE_HIGH3:
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR10:
+#endif
break;
default:
valid_input = false;
@@ -313,7 +317,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
- dp_retrain_link_dp_test(link, &prefer_link_settings, false);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
kfree(wr_buf);
return size;
@@ -378,9 +382,9 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
return -EINVAL;
snprintf(rd_buf, rd_buf_size, " %d %d %d\n",
- link->cur_lane_setting.VOLTAGE_SWING,
- link->cur_lane_setting.PRE_EMPHASIS,
- link->cur_lane_setting.POST_CURSOR2);
+ link->cur_lane_setting[0].VOLTAGE_SWING,
+ link->cur_lane_setting[0].PRE_EMPHASIS,
+ link->cur_lane_setting[0].POST_CURSOR2);
while (size) {
if (*pos >= rd_buf_size)
@@ -487,7 +491,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -639,7 +643,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -732,7 +736,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
}
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
- link_training_settings.lane_settings[i] = link->cur_lane_setting;
+ link_training_settings.lane_settings[i] = link->cur_lane_setting[i];
dc_link_set_test_pattern(
link,
@@ -914,7 +918,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
&param, buf,
max_param_num,
&param_nums)) {
@@ -1211,7 +1215,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1396,7 +1400,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1581,7 +1585,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1766,7 +1770,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1944,7 +1948,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -2382,7 +2386,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c5f1dc3b5961..5bfdc66b5867 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -448,6 +448,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct drm_connector_state *conn_state;
+ struct dc_sink *sink = NULL;
+ bool link_is_hdcp14 = false;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -460,8 +462,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
- if (aconnector->dc_sink != NULL)
- link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+ if (aconnector->dc_sink)
+ sink = aconnector->dc_sink;
+ else if (aconnector->dc_em_sink)
+ sink = aconnector->dc_em_sink;
+
+ if (sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
display->dig_fe = config->dig_fe;
@@ -470,8 +477,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->stream_enc_idx = config->stream_enc_idx;
link->link_enc_idx = config->link_enc_idx;
link->phy_idx = config->phy_idx;
- link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link,
- aconnector->dc_sink->sink_signal) ? 1 : 0;
+ if (sink)
+ link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal);
+ link->hdcp_supported_informational = link_is_hdcp14;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6fee12c91ef5..8cbeeb7c986d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -40,6 +40,39 @@
#include "dm_helpers.h"
+struct monitor_patch_info {
+ unsigned int manufacturer_id;
+ unsigned int product_id;
+ void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
+ unsigned int patch_param;
+};
+static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
+
+static const struct monitor_patch_info monitor_patch_table[] = {
+{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
+{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
+};
+
+static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
+{
+ if (edid_caps)
+ edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
+}
+
+static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
+ if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
+ && (edid_caps->product_id == monitor_patch_table[i].product_id)) {
+ monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
+ ret++;
+ }
+
+ return ret;
+}
+
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
@@ -125,6 +158,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
kfree(sads);
kfree(sadb);
+ amdgpu_dm_patch_edid_caps(edid_caps);
+
return result;
}
@@ -184,6 +219,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
bool ret;
+ u8 link_coding_cap = DP_8b_10b_ENCODING;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
/* Accessing the connector state is required for vcpi_slots allocation
@@ -203,6 +239,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_port = aconnector->port;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+#endif
+
if (enable) {
ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
@@ -216,7 +256,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
}
/* It's OK for this to fail */
- drm_dp_update_payload_part1(mst_mgr);
+ drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -648,8 +688,21 @@ int dm_helper_dmub_aux_transfer_sync(
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
- return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, operation_result);
+ return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
+ link->link_index, (void *)payload,
+ (void *)operation_result);
}
+
+int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
+ const struct dc_link *link,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result)
+{
+ return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
+ link->link_index, (void *)payload,
+ (void *)operation_result);
+}
+
void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
{
/* TODO: something */
@@ -751,3 +804,17 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
&new_downspread.raw,
sizeof(new_downspread));
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
+{
+ // FPGA programming for this clock in diags framework that
+ // needs to go through dm layer, therefore leave dummy interace here
+}
+
+
+void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
+{
+ /* TODO: add peridic detection implementation */
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7af0d58c231b..874a49b605c7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -64,6 +64,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
+ payload.write_status_update =
+ (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
@@ -542,7 +544,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@@ -574,7 +576,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 70a554f1e725..c022e56f9459 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -107,6 +107,8 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
// Init fail safe of 2 frames static
unsigned int num_frames_static = 2;
+ unsigned int power_opt = 0;
+ bool psr_enable = true;
DRM_DEBUG_DRIVER("Enabling psr...\n");
@@ -133,7 +135,9 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- return dc_link_set_psr_allow_active(link, true, false, false);
+ power_opt |= psr_power_opt_z10_static_screen;
+
+ return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
/*
@@ -144,10 +148,12 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
{
+ unsigned int power_opt = 0;
+ bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, false, true, false);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index b1bf80da3a55..ab0c6d191038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -52,7 +52,7 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth);
* This function tells if the code is already under FPU protection or not. A
* function that works as an API for a set of FPU operations can use this
* function for checking if the caller invoked it after DC_FP_START(). For
- * example, take a look at dcn2x.c file.
+ * example, take a look at dcn20_fpu.c file.
*/
inline void dc_assert_fp_enabled(void)
{
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 943fcb164876..b1f0d6260226 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -30,6 +30,7 @@ DC_LIBS += dcn20
DC_LIBS += dsc
DC_LIBS += dcn10 dml
DC_LIBS += dcn21
+DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn302
@@ -58,7 +59,7 @@ include $(AMD_DC)
DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
-dc_link_enc_cfg.o dc_link_dpcd.o
+dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 6dbde74c1e06..a4bef4364afd 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -99,6 +99,10 @@ static enum bp_result get_firmware_info_v3_2(
struct bios_parser *bp,
struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v3_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
struct atom_display_object_path_v2 *object);
@@ -1426,8 +1430,10 @@ static enum bp_result bios_parser_get_firmware_info(
break;
case 2:
case 3:
- case 4:
result = get_firmware_info_v3_2(bp, info);
+ break;
+ case 4:
+ result = get_firmware_info_v3_4(bp, info);
break;
default:
break;
@@ -1575,6 +1581,88 @@ static enum bp_result get_firmware_info_v3_2(
return BP_RESULT_OK;
}
+static enum bp_result get_firmware_info_v3_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_4 *firmware_info;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+ struct atom_display_controller_info_v4_1 *dce_info_v4_1 = NULL;
+ struct atom_display_controller_info_v4_4 *dce_info_v4_4 = NULL;
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_4,
+ DATA_TABLES(firmwareinfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ switch (revision.minor) {
+ case 4:
+ dce_info_v4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_4)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* 100MHz expected */
+ info->pll_info.crystal_frequency = dce_info_v4_4->dce_refclk_10khz * 10;
+ info->dp_phy_ref_clk = dce_info_v4_4->dpphy_refclk_10khz * 10;
+ /* 50MHz expected */
+ info->i2c_engine_ref_clk = dce_info_v4_4->i2c_engine_refclk_10khz * 10;
+
+ /* Get SMU Display PLL VCO Frequency in KHz*/
+ info->smu_gpu_pll_output_freq = dce_info_v4_4->dispclk_pll_vco_freq * 10;
+ break;
+
+ default:
+ /* should not come here, keep as backup, as was before */
+ dce_info_v4_1 = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_1)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->pll_info.crystal_frequency = dce_info_v4_1->dce_refclk_10khz * 10;
+ info->dp_phy_ref_clk = dce_info_v4_1->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info_v4_1->i2c_engine_refclk_10khz * 10;
+ break;
+ }
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(smu_info));
+ get_atom_data_table_revision(header, &revision);
+
+ // We need to convert from 10KHz units into KHz units.
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+ if (firmware_info->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
+
+ return BP_RESULT_OK;
+}
+
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
struct graphics_object_id object_id,
@@ -1604,6 +1692,16 @@ static enum bp_result bios_parser_get_encoder_cap_info(
ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
info->HDMI_6GB_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ info->IS_DP2_CAPABLE = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0;
+ info->DP_UHBR10_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0;
+ info->DP_UHBR13_5_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0;
+ info->DP_UHBR20_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0;
+#endif
info->DP_IS_USB_C = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
@@ -2223,6 +2321,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ext_disp_conn_info.checksum =
info_v2_2->extdispconninfo.checksum;
+ info->ext_disp_conn_info.fixdpvoltageswing =
+ info_v2_2->extdispconninfo.fixdpvoltageswing;
info->edp1_info.edp_backlight_pwm_hz =
le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index f1f672a997d7..9afa5eb2e6d3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -44,9 +44,7 @@
bp->base.ctx->logger
#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
- (((char *)(&((\
- struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\
- ->FieldName)-(char *)0)/sizeof(uint16_t))
+ (offsetof(struct atom_master_list_of_##MasterOrData##_functions_v2_1, FieldName) / sizeof(uint16_t))
#define EXEC_BIOS_CMD_TABLE(fname, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
@@ -340,6 +338,13 @@ static enum bp_result transmitter_control_v1_7(
const struct command_table_helper *cmd = bp->cmd_helper;
struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0;
+
+ if (dc_is_dp_signal(cntl->signal))
+ hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0;
+#endif
+
dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter);
dig_v1_7.action = (uint8_t)cntl->action;
@@ -353,6 +358,9 @@ static enum bp_result transmitter_control_v1_7(
dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dig_v1_7.HPO_instance = hpo_instance;
+#endif
dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10;
if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index cb3fd44cb1ed..eedc553f340e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -70,6 +70,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_1_01:
case DCN_VERSION_2_0:
case DCN_VERSION_2_1:
+ case DCN_VERSION_2_01:
case DCN_VERSION_3_0:
case DCN_VERSION_3_01:
case DCN_VERSION_3_02:
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 0e18df1283b6..6b248cd2a461 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -459,9 +459,9 @@ static void dcn_bw_calc_rq_dlg_ttu(
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
- struct _vcs_dpi_display_rq_params_st rq_param = {0};
- struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
- struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
+ struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param;
+ struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param;
+ struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input;
float total_active_bw = 0;
float total_prefetch_bw = 0;
int total_flip_bytes = 0;
@@ -470,45 +470,48 @@ static void dcn_bw_calc_rq_dlg_ttu(
memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs));
memset(rq_regs, 0, sizeof(*rq_regs));
+ memset(rq_param, 0, sizeof(*rq_param));
+ memset(dlg_sys_param, 0, sizeof(*dlg_sys_param));
+ memset(input, 0, sizeof(*input));
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
total_flip_bytes += v->total_immediate_flip_bytes[i];
}
- dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
- if (dlg_sys_param.total_flip_bw < 0.0)
- dlg_sys_param.total_flip_bw = 0;
-
- dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
- dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
- dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
- dlg_sys_param.t_extra_us = v->urgent_extra_latency;
- dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
- dlg_sys_param.total_flip_bytes = total_flip_bytes;
-
- pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
- input.clks_cfg.dcfclk_mhz = v->dcfclk;
- input.clks_cfg.dispclk_mhz = v->dispclk;
- input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- input.clks_cfg.socclk_mhz = v->socclk;
- input.clks_cfg.voltage = v->voltage_level;
+ dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
+ if (dlg_sys_param->total_flip_bw < 0.0)
+ dlg_sys_param->total_flip_bw = 0;
+
+ dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark;
+ dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
+ dlg_sys_param->t_urg_wm_us = v->urgent_watermark;
+ dlg_sys_param->t_extra_us = v->urgent_extra_latency;
+ dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
+ dlg_sys_param->total_flip_bytes = total_flip_bytes;
+
+ pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe);
+ input->clks_cfg.dcfclk_mhz = v->dcfclk;
+ input->clks_cfg.dispclk_mhz = v->dispclk;
+ input->clks_cfg.dppclk_mhz = v->dppclk;
+ input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+ input->clks_cfg.socclk_mhz = v->socclk;
+ input->clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
- input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
- input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
+ input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
+ input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
- dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
+ dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
- dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
+ dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
dml1_rq_dlg_get_dlg_params(
dml,
dlg_regs,
ttu_regs,
- rq_param.dlg,
+ &rq_param->dlg,
dlg_sys_param,
input,
true,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 7fa0b007a7ea..6bd73e49a6d2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -94,6 +94,15 @@ AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DC
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
+# DCN201
+###############################################################################
+CLK_MGR_DCN201 = dcn201_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCN201 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn201/,$(CLK_MGR_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN201)
+
+###############################################################################
# DCN21
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index bb31541f8072..26f96ee32472 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -39,6 +39,7 @@
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn21/rn_clk_mgr.h"
+#include "dcn201/dcn201_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dcn301/vg_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
@@ -99,11 +100,13 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
+ bool allow_active = false;
+
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
- dc_link_set_psr_allow_active(edp_link, false, false, false);
+ dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -123,7 +126,7 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
dc_link_set_psr_allow_active(edp_link,
- clk_mgr->psr_allow_active_cache, false, false);
+ &clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
@@ -256,6 +259,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
+ }
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
@@ -278,13 +285,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
- if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) {
- /* TODO: to add DCN31 clk_mgr support, once CLK IP header files are available,
- * for now use DCN3.0 clk mgr.
- */
- dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base.base;
- }
+
+ dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
#endif
@@ -321,7 +323,6 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
break;
case FAMILY_YELLOW_CARP:
- if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev))
dcn31_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d01aa9f15a6..2108bff49d4e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -38,6 +38,8 @@
#include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h"
+#include "irq/dcn20/irq_service_dcn20.h"
+
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
@@ -221,6 +223,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
+ int irq_src;
+ uint32_t hpd_state;
if (dc->work_arounds.skip_clock_update)
return;
@@ -238,7 +242,13 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
- if (display_count == 0)
+ for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) {
+ hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
+ if (hpd_state)
+ break;
+ }
+
+ if (display_count == 0 && !hpd_state)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
new file mode 100644
index 000000000000..db9950244c7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+#include "dcn201_clk_mgr.h"
+#include "dcn20/dcn20_clk_mgr.h"
+#include "dce100/dce_clk_mgr.h"
+#include "dm_helpers.h"
+#include "dm_services.h"
+
+#include "cyan_skillfish_ip_offset.h"
+#include "dcn/dcn_2_0_3_offset.h"
+#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "clk/clk_11_0_1_offset.h"
+#include "clk/clk_11_0_1_sh_mask.h"
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define CLK_BASE_INNER(seg) \
+ CLK_BASE__INST0_SEG ## seg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+
+#define CTX \
+ clk_mgr->base.ctx
+#define DC_LOGGER \
+ clk_mgr->base.ctx->logger
+
+static const struct clk_mgr_registers clk_mgr_regs = {
+ CLK_COMMON_REG_LIST_DCN_201()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCN201_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK)
+};
+
+void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ update_dispclk = true;
+ }
+
+ if (update_dppclk || update_dispclk) {
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+
+ if (update_dispclk) {
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+ dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ }
+ /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface.
+ * vbios program DPPCLK to the same DispCLK limitation
+ */
+ }
+}
+
+static void dcn201_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.max_supported_dppclk_khz = 1200000;
+ clk_mgr->clks.max_supported_dispclk_khz = 1200000;
+}
+
+static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
+ bool force_reset = false;
+ bool p_state_change_support;
+ int total_plane_count;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ if (clk_mgr_base->clks.dispclk_khz == 0 ||
+ dc->debug.force_clock_mode & 0x1) {
+ force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+ }
+
+ display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+
+ if (display_count == 0)
+ enter_display_off = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
+ clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz))
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz))
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
+ clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz))
+ clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
+
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ update_dispclk = true;
+ }
+
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
+ } else {
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+ }
+}
+
+struct clk_mgr_funcs dcn201_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn201_update_clocks,
+ .init_clocks = dcn201_init_clocks,
+ .get_clock = dcn2_get_clock,
+};
+
+void dcn201_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->base.funcs = &dcn201_funcs;
+ clk_mgr->regs = &clk_mgr_regs;
+ clk_mgr->clk_mgr_shift = &clk_mgr_shift;
+ clk_mgr->clk_mgr_mask = &clk_mgr_mask;
+
+ clk_mgr->dccg = dccg;
+
+ clk_mgr->dfs_bypass_disp_clk = 0;
+
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ dcn201_funcs.update_clocks = dcn2_update_clocks_fpga;
+ clk_mgr->base.dprefclk_khz = 600000;
+ clk_mgr->base.dentist_vco_freq_khz = 3000000;
+ } else {
+ clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT);
+ clk_mgr->base.dprefclk_khz *= 100;
+
+ if (clk_mgr->base.dprefclk_khz == 0)
+ clk_mgr->base.dprefclk_khz = 600000;
+
+ REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz);
+ clk_mgr->base.dentist_vco_freq_khz *= 100000;
+
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3000000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_mgr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h
new file mode 100644
index 000000000000..ae463baaff47
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN201_CLK_MGR_H__
+#define __DCN201_CLK_MGR_H__
+
+void dcn201_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
+
+#endif //__DCN201_CLK_MGR_H__ \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 6185f9475fa2..ac2d4c4f04e4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -42,6 +42,7 @@
#include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h"
+#include "irq/dcn21/irq_service_dcn21.h"
/* Constants */
@@ -66,11 +67,9 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
- /* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
- stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
}
@@ -131,9 +130,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
+ int irq_src;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
+ uint32_t hpd_state;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
@@ -149,8 +150,15 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = rn_get_active_display_cnt_wa(dc, context);
+
+ for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) {
+ hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
+ if (hpd_state)
+ break;
+ }
+
/* if we can go lower, go lower */
- if (display_count == 0) {
+ if (display_count == 0 && !hpd_state) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 7046da14bb2a..3eee32faa208 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -582,8 +582,8 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 5.32,
- .sr_enter_plus_exit_time_us = 6.38,
+ .sr_exit_time_us = 7.95,
+ .sr_enter_plus_exit_time_us = 9,
.valid = true,
},
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 4a4894e9d9c9..f4c9a458ace8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -87,7 +87,7 @@ int dcn31_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->link_enc->funcs->is_dig_enabled &&
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@@ -142,6 +142,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
@@ -166,6 +167,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
@@ -217,14 +219,17 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
- /* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
// notify DMCUB of latest clocks
@@ -366,32 +371,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 5.32,
- .sr_enter_plus_exit_time_us = 6.38,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@@ -518,14 +523,21 @@ static unsigned int find_clk_for_voltage(
unsigned int voltage)
{
int i;
+ int max_voltage = 0;
+ int clock = 0;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage)
+ if (clock_table->SocVoltage[i] == voltage) {
return clocks[i];
+ } else if (clock_table->SocVoltage[i] >= max_voltage &&
+ clock_table->SocVoltage[i] < voltage) {
+ max_voltage = clock_table->SocVoltage[i];
+ clock = clocks[i];
+ }
}
- ASSERT(0);
- return 0;
+ ASSERT(clock);
+ return clock;
}
void dcn31_clk_mgr_helper_populate_bw_params(
@@ -640,7 +652,7 @@ void dcn31_clk_mgr_construct(
sizeof(struct dcn31_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
- if (clk_mgr->smu_wm_set.wm_set == 0) {
+ if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c798c65d4276..12e5470fa567 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -229,6 +229,25 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
+ /* Create a link for each usb4 dpia port */
+ for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link_init_params.is_dpia_link = true;
+
+ link = link_create(&link_init_params);
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
for (i = 0; i < num_virtual_links; i++) {
struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
struct encoder_init_data enc_init = {0};
@@ -255,6 +274,24 @@ static bool create_links(
goto failed_alloc;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ dc->caps.dp_hpo &&
+ link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
+ /* FPGA case - Allocate HPO DP link encoder */
+ if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
+ link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
+
+ if (link->hpo_dp_link_enc == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+ link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
+ link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
+ }
+ }
+#endif
+
link->link_status.dpcd_caps = &link->dpcd_caps;
enc_init.ctx = dc->ctx;
@@ -276,6 +313,75 @@ failed_alloc:
return false;
}
+/* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction. This can happen if the
+ * number of physical connectors is less than the number of DIGs.
+ */
+static bool create_link_encoders(struct dc *dc)
+{
+ bool res = true;
+ unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+ int i;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return res;
+
+ /* Create as many link encoder objects as the platform supports. DPIA
+ * endpoints can be programmably mapped to any DIG.
+ */
+ if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
+ link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
+ (enum engine_id)(ENGINE_ID_DIGA + i));
+ if (link_enc) {
+ dc->res_pool->link_encoders[i] = link_enc;
+ dc->res_pool->dig_link_enc_count++;
+ } else {
+ res = false;
+ }
+ }
+ }
+ }
+
+ return res;
+}
+
+/* Destroy any additional DIG link encoder objects created by
+ * create_link_encoders().
+ * NB: Must only be called after destroy_links().
+ */
+static void destroy_link_encoders(struct dc *dc)
+{
+ unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+ int i;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return;
+
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (link_enc) {
+ link_enc->funcs->destroy(&link_enc);
+ dc->res_pool->link_encoders[i] = NULL;
+ dc->res_pool->dig_link_enc_count--;
+ }
+ }
+}
+
static struct dc_perf_trace *dc_perf_trace_create(void)
{
return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
@@ -709,6 +815,8 @@ static void dc_destruct(struct dc *dc)
destroy_links(dc);
+ destroy_link_encoders(dc);
+
if (dc->clk_mgr) {
dc_destroy_clk_mgr(dc->clk_mgr);
dc->clk_mgr = NULL;
@@ -913,6 +1021,12 @@ static bool dc_construct(struct dc *dc,
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
/* Initialise DIG link encoder resource tracking variables. */
link_enc_cfg_init(dc, dc->current_state);
@@ -1544,7 +1658,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
-void dc_z10_restore(struct dc *dc)
+void dc_z10_restore(const struct dc *dc)
{
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
@@ -1773,6 +1887,27 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
return false;
}
+/* Perform updates here which need to be deferred until next vupdate
+ *
+ * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
+ * but forcing lut memory to shutdown state is immediate. This causes
+ * single frame corruption as lut gets disabled mid-frame unless shutdown
+ * is deferred until after entering bypass.
+ */
+static void process_deferred_updates(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ int i = 0;
+
+ if (dc->debug.enable_mem_low_power.bits.cm) {
+ ASSERT(dc->dcn_ip->max_num_dpp);
+ for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
+ if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
+ dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
+ }
+#endif
+}
+
void dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@@ -1783,6 +1918,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+ else
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+
if (is_flip_pending_in_pipes(dc, context))
return;
@@ -1793,6 +1933,8 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ process_deferred_updates(dc);
+
dc->hwss.optimize_bandwidth(dc, context);
dc->optimized_required = false;
@@ -1990,7 +2132,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
- || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
+ || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
/* During DCC on/off, stutter period is calculated before
* DCC has fully transitioned. This results in incorrect
@@ -2143,6 +2285,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->bits.gamma_change = 1;
}
+ if (u->lut3d_func || u->func_shaper)
+ update_flags->bits.lut_3d = 1;
+
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
update_flags->bits.hdr_mult = 1;
@@ -2156,6 +2301,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (update_flags->bits.input_csc_change
|| update_flags->bits.coeff_reduction_change
+ || update_flags->bits.lut_3d
|| update_flags->bits.gamma_change
|| update_flags->bits.gamut_remap_change) {
type = UPDATE_TYPE_FULL;
@@ -2214,6 +2360,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->dsc_config)
su_flags->bits.dsc_changed = 1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream_update->mst_bw_update)
+ su_flags->bits.mst_bw = 1;
+#endif
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2591,6 +2742,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dsc_config)
dp_update_dsc_config(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream_update->mst_bw_update) {
+ if (stream_update->mst_bw_update->is_increase)
+ dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ else
+ dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ }
+#endif
+
if (stream_update->pending_test_pattern) {
dc_link_dp_set_test_pattern(stream->link,
stream->test_pattern.type,
@@ -2968,6 +3128,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
+ } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ *
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+ dc_post_update_surfaces_to_stream(dc);
}
@@ -3024,14 +3192,11 @@ void dc_commit_updates_for_stream(struct dc *dc,
pipe_ctx->plane_state->force_full_update = false;
}
}
- /*let's use current_state to update watermark etc*/
- if (update_type >= UPDATE_TYPE_FULL) {
- dc_post_update_surfaces_to_stream(dc);
- if (dc_ctx->dce_version >= DCE_VERSION_MAX)
- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- else
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ /* Legacy optimization path for DCE. */
+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
+ dc_post_update_surfaces_to_stream(dc);
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
}
return;
@@ -3334,6 +3499,7 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
bool dc_set_psr_allow_active(struct dc *dc, bool enable)
{
int i;
+ bool allow_active;
for (i = 0; i < dc->current_state->stream_count ; i++) {
struct dc_link *link;
@@ -3345,10 +3511,12 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
if (link->psr_settings.psr_feature_enabled) {
if (enable && !link->psr_settings.psr_allow_active) {
- if (!dc_link_set_psr_allow_active(link, true, false, false))
+ allow_active = true;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
return false;
} else if (!enable && link->psr_settings.psr_allow_active) {
- if (!dc_link_set_psr_allow_active(link, false, true, false))
+ allow_active = false;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
return false;
}
}
@@ -3432,6 +3600,12 @@ void dc_hardware_release(struct dc *dc)
*/
bool dc_enable_dmub_notifications(struct dc *dc)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
+ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0)
+ return true;
+#endif
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
@@ -3457,7 +3631,12 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
cmd.dp_aux_access.header.payload_bytes = 0;
- cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
+ /* For dpia, ddc_pin is set to NULL */
+ if (!dc->links[link_index]->ddc->ddc_pin)
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
+ else
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
+
cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
cmd.dp_aux_access.aux_control.timeout = 0;
@@ -3501,6 +3680,130 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
return true;
}
+uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ uint8_t dpia_port_index)
+{
+ uint8_t index, link_index = 0xFF;
+
+ for (index = 0; index < dc->link_count; index++) {
+ /* ddc_hw_inst has dpia port index for dpia links
+ * and ddc instance for legacy links
+ */
+ if (!dc->links[index]->ddc->ddc_pin) {
+ if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
+ link_index = index;
+ break;
+ }
+ }
+ }
+ ASSERT(link_index != 0xFF);
+ return link_index;
+}
+
+/**
+ *****************************************************************************
+ * Function: dc_process_dmub_set_config_async
+ *
+ * @brief
+ * Submits set_config command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] link_index: link index
+ * [in] payload: aux payload
+ * [out] notify: set_config immediate reply
+ *
+ * @return
+ * True if successful, False if failure
+ *****************************************************************************
+ */
+bool dc_process_dmub_set_config_async(struct dc *dc,
+ uint32_t link_index,
+ struct set_config_cmd_payload *payload,
+ struct dmub_notification *notify)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+ bool is_cmd_complete = true;
+
+ /* prepare SET_CONFIG command */
+ cmd.set_config_access.header.type = DMUB_CMD__DPIA;
+ cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
+
+ cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
+ /* command is not processed by dmub */
+ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
+ return is_cmd_complete;
+ }
+
+ /* command processed by dmub, if ret_status is 1, it is completed instantly */
+ if (cmd.set_config_access.header.ret_status == 1)
+ notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
+ else
+ /* cmd pending, will receive notification via outbox */
+ is_cmd_complete = false;
+
+ return is_cmd_complete;
+}
+
+/**
+ *****************************************************************************
+ * Function: dc_process_dmub_set_mst_slots
+ *
+ * @brief
+ * Submits mst slot allocation command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] link_index: link index
+ * [in] mst_alloc_slots: mst slots to be allotted
+ * [out] mst_slots_in_use: mst slots in use returned in failure case
+ *
+ * @return
+ * DC_OK if successful, DC_ERROR if failure
+ *****************************************************************************
+ */
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ /* prepare MST_ALLOC_SLOTS command */
+ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
+ cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
+
+ cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
+ /* command is not processed by dmub */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed by dmub, if ret_status is 1 */
+ if (cmd.set_config_access.header.ret_status != 1)
+ /* command processing error */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed and we have a status of 2, mst not enabled in dpia */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
+ return DC_FAIL_UNSUPPORTED_1;
+
+ /* previously configured mst alloc and used slots did not match */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
+ *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
+ return DC_NOT_SUPPORTED;
+ }
+
+ return DC_OK;
+}
+
/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
@@ -3509,3 +3812,57 @@ void dc_disable_accelerated_mode(struct dc *dc)
{
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
}
+
+
+/**
+ *****************************************************************************
+ * dc_notify_vsync_int_state() - notifies vsync enable/disable state
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @enable: whether vsync is enabled or disabled
+ *
+ * Called when vsync is enabled/disabled
+ * Will notify DMUB to start/stop ABM interrupts after steady state is reached
+ *
+ *****************************************************************************
+ */
+void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ if (link->psr_settings.psr_feature_enabled)
+ return;
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return;
+ }
+
+ get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++) {
+ if (edp_links[i] == link)
+ break;
+ }
+
+ if (i == edp_num) {
+ return;
+ }
+
+ if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
+ pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 1e44b13c1c7d..2796bdd17de1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -51,6 +51,8 @@
#include "inc/link_enc_cfg.h"
#include "inc/link_dpcd.h"
+#include "dc/dcn30/dcn30_vpg.h"
+
#define DC_LOGGER_INIT(logger)
#define LINK_INFO(...) \
@@ -64,6 +66,31 @@
/*******************************************************************************
* Private functions
******************************************************************************/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link)
+{
+ struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder(
+ link->dc->res_pool);
+
+ if (!link->hpo_dp_link_enc && enc) {
+ link->hpo_dp_link_enc = enc;
+ link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
+ link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
+ }
+
+ return (link->hpo_dp_link_enc != NULL);
+}
+
+static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link)
+{
+ if (link->hpo_dp_link_enc) {
+ link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN;
+ link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN;
+ link->hpo_dp_link_enc = NULL;
+ }
+}
+#endif
+
static void dc_link_destruct(struct dc_link *link)
{
int i;
@@ -91,6 +118,12 @@ static void dc_link_destruct(struct dc_link *link)
link->link_enc->funcs->destroy(&link->link_enc);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->hpo_dp_link_enc) {
+ remove_dp_hpo_link_encoder_from_link(link);
+ }
+#endif
+
if (link->local_sink)
dc_sink_release(link->local_sink);
@@ -641,13 +674,13 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
static void read_current_link_settings_on_detect(struct dc_link *link)
{
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
uint8_t link_bw_set;
uint8_t link_rate_set;
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
- union max_down_spread max_down_spread = { {0} };
+ union max_down_spread max_down_spread = {0};
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
@@ -928,6 +961,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
+ add_dp_hpo_link_encoder_to_link(link);
+#endif
+
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
@@ -1173,6 +1211,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
sizeof(link->mst_stream_alloc_table.stream_allocations));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ reset_dp_hpo_stream_encoders_for_link(link);
+#endif
+
link->type = dc_connection_none;
sink_caps.signal = SIGNAL_TYPE_NONE;
/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
@@ -1209,6 +1252,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_z10_restore(dc);
+#endif
+
/* get out of low power state */
if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT)
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
@@ -1378,8 +1425,8 @@ static enum transmitter translate_encoder_to_transmitter(struct graphics_object_
}
}
-static bool dc_link_construct(struct dc_link *link,
- const struct link_init_data *init_params)
+static bool dc_link_construct_legacy(struct dc_link *link,
+ const struct link_init_data *init_params)
{
uint8_t i;
struct ddc_service_init_data ddc_service_init_data = { { 0 } };
@@ -1549,6 +1596,9 @@ static bool dc_link_construct(struct dc_link *link,
}
DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+#endif
/* Update link encoder tracking variables. These are used for the dynamic
* assignment of link encoders to streams.
@@ -1610,6 +1660,14 @@ static bool dc_link_construct(struct dc_link *link,
DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
}
+
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ link->bios_forced_drive_settings.VOLTAGE_SWING =
+ (info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
+ link->bios_forced_drive_settings.PRE_EMPHASIS =
+ ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
+ }
+
break;
}
}
@@ -1651,6 +1709,80 @@ create_fail:
return false;
}
+static bool dc_link_construct_dpia(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ struct ddc_service_init_data ddc_service_init_data = { { 0 } };
+ struct dc_context *dc_ctx = init_params->ctx;
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ /* Initialized irq source for hpd and hpd rx */
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ /* Dummy Init for linkid */
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_DISPLAY_PORT;
+ link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index;
+ link->is_internal_display = false;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ LINK_INFO("Connector[%d] description:signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA;
+ link->is_dig_mapping_flexible = true;
+
+ /* TODO: Initialize link : funcs->link_init */
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ /* Set indicator for dpia link so that ddc won't be created */
+ ddc_service_init_data.is_dpia_link = true;
+
+ link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+ if (!link->ddc) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ /* Set dpia port index : 0 to number of dpia ports */
+ link->ddc_hw_inst = init_params->connector_index;
+
+ /* TODO: Create link encoder */
+
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
+ link->wa_flags.dp_mot_reset_segment = true;
+
+ return true;
+
+ddc_create_fail:
+ return false;
+}
+
+static bool dc_link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ /* Handle dpia case */
+ if (init_params->is_dpia_link)
+ return dc_link_construct_dpia(link, init_params);
+ else
+ return dc_link_construct_legacy(link, init_params);
+}
/*******************************************************************************
* Public functions
******************************************************************************/
@@ -1741,17 +1873,47 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
+ /* Train with fallback when enabling DPIA link. Conventional links are
+ * trained with fallback during sink detection.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ do_fallback = true;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+ * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
+ */
+ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->dc->debug.set_mst_en_for_sst) {
+ dp_enable_mst_on_sink(link, true);
+ }
+#endif
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
+ /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
+ } else {
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+ }
+#else
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
- link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr,
- state, false);
+ state, false);
+#endif
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
@@ -1780,7 +1942,12 @@ static enum dc_status enable_link_dp(struct dc_state *state,
else
fec_enable = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_enable(link, fec_enable);
+#else
dp_set_fec_enable(link, fec_enable);
+#endif
// during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@@ -2284,6 +2451,9 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
if (dc_is_dp_signal(signal)) {
/* SST DP, eDP */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings link_settings = link->cur_link_settings;
+#endif
if (dc_is_dp_sst_signal(signal))
dp_disable_link_phy(link, signal);
else
@@ -2291,8 +2461,15 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
if (dc_is_dp_sst_signal(signal) ||
link->mst_stream_alloc_table.stream_count == 0) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, false);
+ }
+#else
dp_set_fec_enable(link, false);
dp_set_fec_ready(link, false);
+#endif
}
} else {
if (signal != SIGNAL_TYPE_VIRTUAL)
@@ -2475,9 +2652,14 @@ static bool dp_active_dongle_validate_timing(
break;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
+ dongle_caps->extendedCapValid == true) {
+#else
if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
+#endif
/* Check Pixel Encoding */
switch (timing->pixel_encoding) {
@@ -2520,6 +2702,89 @@ static bool dp_active_dongle_validate_timing(
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+
+ if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
+ dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
+ dongle_caps->dfp_cap_ext.supported) {
+
+ if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
+ return false;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_666 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
+ return false;
+ }
+ }
+#endif
+
return true;
}
@@ -2662,8 +2927,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
return true;
}
-bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
- bool wait, bool force_static)
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
@@ -2676,20 +2941,33 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- link->psr_settings.psr_allow_active = allow_active;
+ /* Set power optimization flag */
+ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
+ link->psr_settings.psr_power_opt = *power_opts;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt);
+ }
+
+ /* Enable or Disable PSR */
+ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
+ link->psr_settings.psr_allow_active = *allow_active;
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!allow_active)
- dc_z10_restore(dc);
+ if (!link->psr_settings.psr_allow_active)
+ dc_z10_restore(dc);
#endif
- if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- if (force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
- psr->funcs->psr_enable(psr, allow_active, wait, panel_inst);
- } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
- dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
- else
- return false;
+ if (psr != NULL && link->psr_settings.psr_feature_enabled) {
+ if (force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+ psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
+ } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
+ link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
+ else
+ return false;
+ }
return true;
}
@@ -2978,10 +3256,12 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
static void update_mst_stream_alloc_table(
struct dc_link *link,
struct stream_encoder *stream_enc,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
+#endif
const struct dp_mst_stream_allocation_table *proposed_table)
{
- struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
- { 0 } };
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
struct link_mst_stream_allocation *dc_alloc;
int i;
@@ -3014,6 +3294,9 @@ static void update_mst_stream_alloc_table(
work_table[i].slot_count =
proposed_table->stream_allocations[i].slot_count;
work_table[i].stream_enc = stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
+#endif
}
}
@@ -3024,6 +3307,108 @@ static void update_mst_stream_alloc_table(
link->mst_stream_alloc_table.stream_allocations[i] =
work_table[i];
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
+{
+ const uint32_t VCP_Y_PRECISION = 1000;
+ uint64_t vcp_x, vcp_y;
+
+ // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision
+ avg_time_slots_per_mtp = dc_fixpt_add(
+ avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION));
+
+ vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp);
+ vcp_y = dc_fixpt_floor(
+ dc_fixpt_mul_int(
+ dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)),
+ VCP_Y_PRECISION));
+
+ if (link->type == dc_connection_mst_branch)
+ DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream "
+ "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
+ else
+ DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream "
+ "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
+}
+
+/*
+ * Payload allocation/deallocation for SST introduced in DP2.0
+ */
+enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* slot X.Y for SST payload deallocate */
+ if (!allocate) {
+ avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ }
+
+ /* calculate VC payload and update branch with new payload allocation table*/
+ if (!dpcd_write_128b_132b_sst_payload_allocation_table(
+ stream,
+ link,
+ &proposed_table,
+ allocate)) {
+ DC_LOG_ERROR("SST Update Payload: Failed to update "
+ "allocation table for "
+ "pipe idx: %d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder;
+
+ ASSERT(proposed_table.stream_count == 1);
+
+ //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
+ DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
+ "vcp_id: %d "
+ "slot_count: %d\n",
+ (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
+ proposed_table.stream_allocations[0].vcp_id,
+ proposed_table.stream_allocations[0].slot_count);
+
+ /* program DP source TX for payload */
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &proposed_table);
+
+ /* poll for ACT handled */
+ if (!dpcd_poll_for_allocation_change_trigger(link)) {
+ // Failures will result in blackscreen and errors logged
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* slot X.Y for SST payload allocate */
+ if (allocate) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ }
+
+ /* Always return DC_OK.
+ * If part of sequence fails, log failure(s) and show blackscreen
+ */
+ return DC_OK;
+}
+#endif
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
@@ -3032,16 +3417,27 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_encoder = link->link_enc;
+ struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- uint8_t i;
+ int i;
enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
+ /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_encoder = link->link_enc;
+ else if (link->dc->res_pool->funcs->link_encs_assign)
+ link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream);
+ ASSERT(link_encoder);
+
/* enable_link_dp_mst already check link->enabled_stream_count
* and stream is in link->stream[]. This is called during set mode,
* stream_enc is available.
@@ -3054,7 +3450,14 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&proposed_table,
true)) {
update_mst_stream_alloc_table(
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+#else
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+#endif
}
else
DC_LOG_WARNING("Failed to update"
@@ -3068,23 +3471,70 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#else
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#endif
}
ASSERT(proposed_table.stream_count > 0);
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ static enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+
+ for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
+ mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status == DC_OK);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
+ }
+
/* program DP source TX for payload */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
link_encoder->funcs->update_mst_stream_allocation_table(
link_encoder,
&link->mst_stream_alloc_table);
+#endif
/* send down message */
ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3107,26 +3557,215 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
+#endif
return DC_OK;
}
-static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
struct link_encoder *link_encoder = link->link_enc;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
uint8_t i;
+ enum act_return_status ret;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* decrease throttled vcp size */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ } else {
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ return DC_OK;
+}
+
+enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ enum act_return_status ret;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* increase throttled vcp size */
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ pbn_per_slot = get_pbn_per_slot(stream);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+#endif
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct link_encoder *link_encoder = NULL;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+#endif
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
DC_LOGGER_INIT(link->ctx->logger);
+ /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_encoder = link->link_enc;
+ else if (link->dc->res_pool->funcs->link_encs_assign)
+ link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream);
+ ASSERT(link_encoder);
+
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -3135,9 +3774,28 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
*/
/* slot X.Y */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
+#endif
/* TODO: which component is responsible for remove payload table? */
if (mst_mode) {
@@ -3147,8 +3805,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&proposed_table,
false)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+#else
update_mst_stream_alloc_table(
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+#endif
}
else {
DC_LOG_WARNING("Failed to update"
@@ -3164,6 +3830,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#else
DC_LOG_MST("stream_enc[%d]: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
@@ -3173,11 +3853,44 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#endif
+ }
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+
+ for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
+ mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status != DC_NOT_SUPPORTED);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
link_encoder->funcs->update_mst_stream_allocation_table(
link_encoder,
&link->mst_stream_alloc_table);
+#endif
if (mst_mode) {
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3198,6 +3911,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct link_encoder *link_enc = NULL;
+ struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state;
+ struct link_enc_assignment link_enc_assign;
+ int i;
+#endif
+
if (cp_psp && cp_psp->funcs.update_stream_config) {
struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
@@ -3209,8 +3929,86 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
#if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
+ pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ link_enc = pipe_ctx->stream->link->link_enc;
+ config.dio_output_type = pipe_ctx->stream->link->ep_type;
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = pipe_ctx->stream->link->link_enc;
+ else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ }
+ // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
+ config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ //look up the link_enc_assignment for the current pipe_ctx
+ for (i = 0; i < state->stream_count; i++) {
+ if (pipe_ctx->stream == state->streams[i]) {
+ link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+ }
+ }
+ // Add flag to guard new A0 DIG mapping
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) {
+ config.dig_be = link_enc_assign.eng_id;
+ config.dio_output_type = pipe_ctx->stream->link->ep_type;
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ } else {
+ config.dio_output_type = 0;
+ config.dio_output_idx = 0;
+ }
+
+ // Add flag to guard B0 implementation
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
+ link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ link_enc = link_enc_assign.stream->link_enc;
+
+ // enum ID 1-4 maps to DPIA PHY ID 0-3
+ config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1;
+ } else { // for non DPIA mode over B0, ABCDE maps to 01564
+
+ switch (link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ config.phy_idx = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ config.phy_idx = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ config.phy_idx = 5;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ config.phy_idx = 6;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ config.phy_idx = 4;
+ break;
+ default:
+ config.phy_idx = 0;
+ break;
+ }
+
+ }
+ }
+ } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
+ }
+ ASSERT(link_enc);
+ if (link_enc)
+ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+ config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst;
+ config.dp2_enabled = 1;
+ }
#endif
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
@@ -3222,15 +4020,103 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
}
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ uint8_t req_slot_count = 0;
+ uint8_t vc_id = 1; /// VC ID always 1 for SST
+
+ struct dc_link_settings link_settings = {0};
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ decide_link_settings(stream, &link_settings);
+ stream->link->cur_link_settings = link_settings;
+
+ /* Enable clock, Configure lane count, and Enable Link Encoder*/
+ enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings);
+
+#ifdef DIAGS_BUILD
+ /* Workaround for FPGA HPO capture DP link data:
+ * HPO capture will set link to active mode
+ * This workaround is required to get a capture from start of frame
+ */
+ if (!dc->debug.fpga_hpo_capture_en) {
+ struct encoder_set_dp_phy_pattern_param params = {0};
+ params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+
+ /* Set link active */
+ stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ stream->link->hpo_dp_link_enc,
+ &params);
+ }
+#endif
+
+ /* Enable DP_STREAM_ENC */
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ dp_set_dsc_pps_sdp(pipe_ctx, true, true);
+ }
+
+ /* Allocate Payload */
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
+ // MST case
+ uint8_t i;
+
+ proposed_table.stream_count = state->stream_count;
+ for (i = 0; i < state->stream_count; i++) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_allocations[i].slot_count = req_slot_count;
+ proposed_table.stream_allocations[i].vcp_id = i+1;
+ /* NOTE: This makes assumption that pipe_ctx index is same as stream index */
+ proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
+ }
+ } else {
+ // SST case
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_count = 1; /// Always 1 stream for SST
+ proposed_table.stream_allocations[0].slot_count = req_slot_count;
+ proposed_table.stream_allocations[0].vcp_id = vc_id;
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ }
+
+ stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table(
+ stream->link->hpo_dp_link_enc,
+ &proposed_table);
+
+ stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size(
+ stream->link->hpo_dp_link_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
+ avg_time_slots_per_mtp);
+
+
+
+ dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
+}
+#endif
+
void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
enum dc_status status;
+ struct link_encoder *link_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
#endif
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -3238,23 +4124,57 @@ void core_link_enable_stream(
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
+ if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream);
+ else
+ link_enc = stream->link->link_enc;
+ ASSERT(link_enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
+ && !is_dp_128b_132b_signal(pipe_ctx)) {
+#else
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
- stream->link->link_enc->funcs->setup(
- stream->link->link_enc,
- pipe_ctx->stream->signal);
+#endif
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst,
stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
}
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ stream->timing.flags.DSC,
+ false);
+ otg_out_dest = OUT_MUX_HPO_DP;
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ }
+#else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
stream->output_color_space,
stream->use_vsc_sdp_for_colorimetry,
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+#endif
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
@@ -3288,9 +4208,18 @@ void core_link_enable_stream(
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ // Enable VPG before building infoframe
+ if (vpg && vpg->funcs->vpg_poweron)
+ vpg->funcs->vpg_poweron(vpg);
+#endif
+
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
@@ -3365,10 +4294,16 @@ void core_link_enable_stream(
* as a workaround for the incorrect value being applied
* from transmitter control.
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
+ is_dp_128b_132b_signal(pipe_ctx)))
+#else
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
- stream->link->link_enc->funcs->setup(
- stream->link->link_enc,
- pipe_ctx->stream->signal);
+#endif
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
dc->hwss.enable_stream(pipe_ctx);
@@ -3377,12 +4312,17 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal)) {
dp_set_dsc_on_rx(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true);
+ dp_set_dsc_pps_sdp(pipe_ctx, true, true);
}
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_allocate_mst_payload(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ is_dp_128b_132b_signal(pipe_ctx))
+ dc_link_update_sst_payload(pipe_ctx, true);
+#endif
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
@@ -3399,6 +4339,11 @@ void core_link_enable_stream(
dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
+ }
+#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
@@ -3415,6 +4360,12 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
+#endif
if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
@@ -3434,6 +4385,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ is_dp_128b_132b_signal(pipe_ctx))
+ dc_link_update_sst_payload(pipe_ctx, false);
+#endif
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
struct ext_hdmi_settings settings = {0};
@@ -3460,14 +4416,44 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ !is_dp_128b_132b_signal(pipe_ctx)) {
+
+ /* In DP1.x SST mode, our encoder will go to TPS1
+ * when link is on but stream is off.
+ * Disabling link before stream will avoid exposing TPS1 pattern
+ * during the disable sequence as it will confuse some receivers
+ * state machine.
+ * In DP2 or MST mode, our encoder will stay video active
+ */
+ disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ dc->hwss.disable_stream(pipe_ctx);
+ } else {
+ dc->hwss.disable_stream(pipe_ctx);
+ disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ }
+#else
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
+#endif
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, false);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+ }
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (vpg && vpg->funcs->vpg_powerdown)
+ vpg->funcs->vpg_powerdown(vpg);
+#endif
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -3600,6 +4586,13 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
if (link_setting != NULL) {
link->preferred_link_setting = *link_setting;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_setting) ==
+ DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) {
+ if (!add_dp_hpo_link_encoder_to_link(link))
+ memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting));
+ }
+#endif
} else {
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
@@ -3641,6 +4634,38 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t total_data_bw_efficiency_x10000 = 0;
+ uint32_t link_rate_per_lane_kbps = 0;
+
+ switch (dp_get_link_encoding_format(link_setting)) {
+ case DP_8b_10b_ENCODING:
+ /* For 8b/10b encoding:
+ * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
+ * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
+ */
+ link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
+ if (dc_link_should_enable_fec(link)) {
+ total_data_bw_efficiency_x10000 /= 100;
+ total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
+ }
+ break;
+ case DP_128b_132b_ENCODING:
+ /* For 128b/132b encoding:
+ * link rate is defined in the unit of 10mbps per lane.
+ * total data bandwidth efficiency is always 96.71%.
+ */
+ link_rate_per_lane_kbps = link_setting->link_rate * 10000;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
+ break;
+ default:
+ break;
+ }
+
+ /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
+ return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
+#else
uint32_t link_bw_kbps =
link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
@@ -3671,9 +4696,9 @@ uint32_t dc_link_bandwidth_kbps(
long long fec_link_bw_kbps = link_bw_kbps * 970LL;
link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
}
-
return link_bw_kbps;
+#endif
}
const struct dc_link_settings *dc_link_get_link_cap(
@@ -3700,14 +4725,14 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign) {
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
if (link_enc == NULL)
- link_enc = link_enc_cfg_get_next_avail_link_enc(link->dc, link->dc->current_state);
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
} else
link_enc = link->link_enc;
ASSERT(link_enc);
- return (dc_is_dp_signal(link->connector_signal) &&
+ return (dc_is_dp_signal(link->connector_signal) && link_enc &&
link_enc->features.fec_supported &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
@@ -3721,8 +4746,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
- (link->connector_signal == SIGNAL_TYPE_EDP &&
- link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP
+ (link->connector_signal == SIGNAL_TYPE_EDP
+ ))
is_fec_disable = true;
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ba6b56f20269..60539b1f2a80 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -196,7 +196,8 @@ static void ddc_service_construct(
ddc_service->link = init_data->link;
ddc_service->ctx = init_data->ctx;
- if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+ if (init_data->is_dpia_link ||
+ dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info) != BP_RESULT_OK) {
ddc_service->ddc_pin = NULL;
} else {
DC_LOGGER_INIT(ddc_service->ctx->logger);
@@ -553,6 +554,7 @@ bool dal_ddc_service_query_ddc_data(
payload.address = address;
payload.reply = NULL;
payload.defer_delay = get_defer_delay(ddc);
+ payload.write_status_update = false;
if (write_size != 0) {
payload.write = true;
@@ -624,24 +626,24 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length;
+ payload->length ? true : false;
+ uint32_t payload_length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
current_payload.address = payload->address;
current_payload.data = &payload->data[retrieved];
current_payload.defer_delay = payload->defer_delay;
current_payload.i2c_over_aux = payload->i2c_over_aux;
- current_payload.length = is_end_of_payload ?
- payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- /* set mot (middle of transaction) to false
- * if it is the last payload
- */
+ current_payload.length = payload_length;
+ /* set mot (middle of transaction) to false if it is the last payload */
current_payload.mot = is_end_of_payload ? payload->mot:true;
+ current_payload.write_status_update = false;
current_payload.reply = payload->reply;
current_payload.write = payload->write;
ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
- retrieved += current_payload.length;
+ retrieved += payload_length;
} while (retrieved < payload->length && ret == true);
return ret;
@@ -658,10 +660,12 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
- if (dc_enable_dmub_notifications(ddc->ctx->dc))
+ if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc ||
+ !ddc->ddc_pin) {
return dce_aux_transfer_dmub_raw(ddc, payload, operation_result);
- else
+ } else {
return dce_aux_transfer_raw(ddc, payload, operation_result);
+ }
}
/* dc_link_aux_transfer_with_retries() - Attempt to submit an
@@ -760,7 +764,7 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
- union hdmi_scdc_status_flags_data status_data = { {0} };
+ union hdmi_scdc_status_flags_data status_data = {0};
uint8_t scramble_status = 0;
offset = HDMI_SCDC_SCRAMBLER_STATUS;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6d655e158267..cc25ba0ec7db 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -36,6 +36,7 @@
#include "dpcd_defs.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "inc/dc_link_dpia.h"
#include "inc/link_enc_cfg.h"
/*Travis*/
@@ -61,6 +62,43 @@ enum {
POST_LT_ADJ_REQ_TIMEOUT = 200
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dp_lt_fallback_entry {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+};
+
+static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
+ /* This link training fallback array is ordered by
+ * link bandwidth from highest to lowest.
+ * DP specs makes it a normative policy to always
+ * choose the next highest link bandwidth during
+ * link training fallback.
+ */
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR20},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR10},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH2},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH3},
+ {LANE_COUNT_FOUR, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH2},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH},
+ {LANE_COUNT_TWO, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_LOW},
+};
+#endif
+
static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
@@ -68,21 +106,37 @@ static bool decide_fallback_link_setting(
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
const struct dc_link_settings *link_settings)
{
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 100;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+#else
core_link_read_dpcd(
link,
DP_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+#endif
return wait_in_micro_secs;
}
@@ -90,6 +144,36 @@ static uint32_t get_eq_training_aux_rd_interval(
struct dc_link *link,
const struct dc_link_settings *link_settings)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union training_aux_rd_interval training_rd_interval;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ core_link_read_dpcd(
+ link,
+ DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ }
+
+ switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
+ case 0: return 400;
+ case 1: return 4000;
+ case 2: return 8000;
+ case 3: return 12000;
+ case 4: return 16000;
+ case 5: return 32000;
+ case 6: return 64000;
+ default: return 400;
+ }
+#else
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 400;
@@ -109,13 +193,21 @@ static uint32_t get_eq_training_aux_rd_interval(
}
return wait_in_micro_secs;
+#endif
}
void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (wait_in_micro_secs > 16000)
+ msleep(wait_in_micro_secs/1000);
+ else
+ udelay(wait_in_micro_secs);
+#else
udelay(wait_in_micro_secs);
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -143,6 +235,17 @@ enum dpcd_training_patterns
case DP_TRAINING_PATTERN_SEQUENCE_4:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS1;
+ break;
+ case DP_128b_132b_TPS2:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2;
+ break;
+ case DP_128b_132b_TPS2_CDS:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
+ break;
+#endif
case DP_TRAINING_PATTERN_VIDEOIDLE:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
break;
@@ -160,7 +263,7 @@ static void dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern)
{
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = {0};
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(
@@ -181,13 +284,57 @@ static void dpcd_set_training_pattern(
static enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings)
{
- return DP_TRAINING_PATTERN_SEQUENCE_1;
+ switch (dp_get_link_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ default:
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_ENCODING:
+ return DP_128b_132b_TPS1;
+#endif
+ }
}
static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct encoder_feature_support *enc_caps;
+ struct dpcd_caps *rx_caps = &link->dpcd_caps;
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+
+ /* Access link encoder capability based on whether it is statically
+ * or dynamically assigned to a link.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+ enc_caps = &link_enc->features;
+
+ switch (dp_get_link_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
+ rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+ else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
+ rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
+ else
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ case DP_128b_132b_ENCODING:
+ pattern = DP_128b_132b_TPS2;
+ break;
+ default:
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ }
+ return pattern;
+#else
enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
struct encoder_feature_support *features;
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
@@ -197,7 +344,7 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -218,7 +365,38 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
return DP_TRAINING_PATTERN_SEQUENCE_3;
return DP_TRAINING_PATTERN_SEQUENCE_2;
+#endif
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
+{
+ uint8_t link_rate = 0;
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings);
+
+ if (encoding == DP_128b_132b_ENCODING)
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ link_rate = 0x1;
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = 0x2;
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = 0x4;
+ break;
+ default:
+ link_rate = 0;
+ break;
+ }
+ else if (encoding == DP_8b_10b_ENCODING)
+ link_rate = (uint8_t) link_settings->link_rate;
+ else
+ link_rate = 0;
+
+ return link_rate;
}
+#endif
enum dc_status dpcd_set_link_settings(
struct dc_link *link,
@@ -227,8 +405,8 @@ enum dc_status dpcd_set_link_settings(
uint8_t rate;
enum dc_status status;
- union down_spread_ctrl downspread = { {0} };
- union lane_count_set lane_count_set = { {0} };
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -269,7 +447,11 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
&lt_settings->link_settings.link_rate_set, 1);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+#else
rate = (uint8_t) (lt_settings->link_settings.link_rate);
+#endif
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
@@ -311,6 +493,10 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
disable_scrabled_data_symbols = 1;
break;
case DP_TRAINING_PATTERN_SEQUENCE_4:
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ case DP_128b_132b_TPS2:
+#endif
disable_scrabled_data_symbols = 0;
break;
default:
@@ -333,13 +519,10 @@ static void dpcd_set_lt_pattern_and_lane_settings(
enum dc_dp_training_pattern pattern,
uint32_t offset)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
-
uint32_t dpcd_base_lt_offset;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = { {0} };
- uint32_t lane;
+ union dpcd_training_pattern dpcd_pattern = { 0 };
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
@@ -372,53 +555,57 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_base_lt_offset,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
- /*****************************************************************
- * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
- *****************************************************************/
- for (lane = 0; lane <
- (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
-
- dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
-
- dpcd_lane[lane].bits.MAX_SWING_REACHED =
- (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
/* concatenate everything into one buffer*/
-
- size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+ size_in_bytes = lt_settings->link_settings.lane_count *
+ sizeof(lt_settings->dpcd_lane_settings[0]);
// 0x00103 - 0x00102
memmove(
&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
- dpcd_lane,
+ lt_settings->dpcd_lane_settings,
size_in_bytes);
if (is_repeater(link, offset)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
offset,
dpcd_base_lt_offset,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
dpcd_base_lt_offset,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
if (edp_workaround) {
/* for eDP write in 2 parts because the 5-byte burst is
@@ -433,9 +620,18 @@ static void dpcd_set_lt_pattern_and_lane_settings(
core_link_write_dpcd(
link,
DP_TRAINING_LANE0_SET,
- (uint8_t *)(dpcd_lane),
+ (uint8_t *)(lt_settings->dpcd_lane_settings),
size_in_bytes);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ } else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ sizeof(dpcd_lt_buffer));
+#endif
} else
/* write it all in (1 + number-of-lanes)-byte burst*/
core_link_write_dpcd(
@@ -443,8 +639,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_base_lt_offset,
dpcd_lt_buffer,
size_in_bytes + sizeof(dpcd_pattern.raw));
-
- link->cur_lane_setting = lt_settings->lane_settings[0];
}
bool dp_is_cr_done(enum dc_lane_count ln_count,
@@ -486,27 +680,75 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
}
-void dp_update_drive_settings(
- struct link_training_settings *dest,
- struct link_training_settings src)
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
+ (hw_lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (hw_lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
+ hw_lane_settings[lane].FFE_PRESET.settings.level;
+ }
+#endif
+ }
+}
+
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
{
uint32_t lane;
- for (lane = 0; lane < src.link_settings.lane_count; lane++) {
- if (dest->voltage_swing == NULL)
- dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING;
- else
- dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing;
- if (dest->pre_emphasis == NULL)
- dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS;
- else
- dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis;
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(ln_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(ln_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ hw_lane_settings[lane].FFE_PRESET.raw =
+ ln_adjust[lane].tx_ffe.PRESET_VALUE;
+ }
+#endif
+ }
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
- if (dest->post_cursor2 == NULL)
- dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2;
- else
- dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2;
+ if (lt_settings->disallow_per_lane_settings) {
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ maximize_lane_settings(lt_settings, hw_lane_settings);
+ override_lane_settings(lt_settings, hw_lane_settings);
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
}
+
}
static uint8_t get_nibble_at_index(const uint8_t *buf,
@@ -536,46 +778,31 @@ static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
}
-static void find_max_drive_settings(
- const struct link_training_settings *link_training_setting,
- struct link_training_settings *max_lt_setting)
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
uint32_t lane;
struct dc_lane_settings max_requested;
- max_requested.VOLTAGE_SWING =
- link_training_setting->
- lane_settings[0].VOLTAGE_SWING;
- max_requested.PRE_EMPHASIS =
- link_training_setting->
- lane_settings[0].PRE_EMPHASIS;
- /*max_requested.postCursor2 =
- * link_training_setting->laneSettings[0].postCursor2;*/
+ max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
+#endif
/* Determine what the maximum of the requested settings are*/
- for (lane = 1; lane < link_training_setting->link_settings.lane_count;
- lane++) {
- if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
- max_requested.VOLTAGE_SWING)
-
- max_requested.VOLTAGE_SWING =
- link_training_setting->
- lane_settings[lane].VOLTAGE_SWING;
-
- if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
- max_requested.PRE_EMPHASIS)
- max_requested.PRE_EMPHASIS =
- link_training_setting->
- lane_settings[lane].PRE_EMPHASIS;
-
- /*
- if (link_training_setting->laneSettings[lane].postCursor2 >
- max_requested.postCursor2)
- {
- max_requested.postCursor2 =
- link_training_setting->laneSettings[lane].postCursor2;
- }
- */
+ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
+ if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
+ max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
+
+ if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (lane_settings[lane].FFE_PRESET.settings.level >
+ max_requested.FFE_PRESET.settings.level)
+ max_requested.FFE_PRESET.settings.level =
+ lane_settings[lane].FFE_PRESET.settings.level;
+#endif
}
/* make sure the requested settings are
@@ -585,10 +812,10 @@ static void find_max_drive_settings(
if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
- /*
- if (max_requested.postCursor2 > PostCursor2_MaxLevel)
- max_requested.postCursor2 = PostCursor2_MaxLevel;
- */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
+ max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
+#endif
/* make sure the pre-emphasis matches the voltage swing*/
if (max_requested.PRE_EMPHASIS >
@@ -598,57 +825,58 @@ static void find_max_drive_settings(
get_max_pre_emphasis_for_voltage_swing(
max_requested.VOLTAGE_SWING);
- /*
- * Post Cursor2 levels are completely independent from
- * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
- * can only be applied to each allowable combination of voltage
- * swing and pre-emphasis levels */
- /* if ( max_requested.postCursor2 >
- * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
- * max_requested.postCursor2 =
- * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
- */
-
- max_lt_setting->link_settings.link_rate =
- link_training_setting->link_settings.link_rate;
- max_lt_setting->link_settings.lane_count =
- link_training_setting->link_settings.lane_count;
- max_lt_setting->link_settings.link_spread =
- link_training_setting->link_settings.link_spread;
-
- for (lane = 0; lane <
- link_training_setting->link_settings.lane_count;
- lane++) {
- max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
- max_requested.VOLTAGE_SWING;
- max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
- max_requested.PRE_EMPHASIS;
- /*max_lt_setting->laneSettings[lane].postCursor2 =
- * max_requested.postCursor2;
- */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
+ lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
+#endif
}
+}
+
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ if (lt_settings->voltage_swing == NULL &&
+ lt_settings->pre_emphasis == NULL &&
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lt_settings->ffe_preset == NULL &&
+#endif
+ lt_settings->post_cursor2 == NULL)
+ return;
+
+ for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (lt_settings->voltage_swing)
+ lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
+ if (lt_settings->pre_emphasis)
+ lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
+ if (lt_settings->post_cursor2)
+ lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (lt_settings->ffe_preset)
+ lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
+#endif
+ }
}
-enum dc_status dp_get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_lane_adjust(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
- union lane_status *ln_status,
- union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
uint32_t offset)
{
unsigned int lane01_status_address = DP_LANE0_1_STATUS;
uint8_t lane_adjust_offset = 4;
unsigned int lane01_adjust_address;
uint8_t dpcd_buf[6] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
- struct link_training_settings request_settings = { {0} };
uint32_t lane;
enum dc_status status;
- memset(req_settings, '\0', sizeof(struct link_training_settings));
-
if (is_repeater(link, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
@@ -668,11 +896,11 @@ enum dc_status dp_get_lane_status_and_drive_settings(
ln_status[lane].raw =
get_nibble_at_index(&dpcd_buf[0], lane);
- dpcd_lane_adjust[lane].raw =
+ ln_adjust[lane].raw =
get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
}
- ln_status_updated->raw = dpcd_buf[2];
+ ln_align->raw = dpcd_buf[2];
if (is_repeater(link, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -711,39 +939,6 @@ enum dc_status dp_get_lane_status_and_drive_settings(
dpcd_buf[lane_adjust_offset + 1]);
}
- /*copy to req_settings*/
- request_settings.link_settings.lane_count =
- link_training_setting->link_settings.lane_count;
- request_settings.link_settings.link_rate =
- link_training_setting->link_settings.link_rate;
- request_settings.link_settings.link_spread =
- link_training_setting->link_settings.link_spread;
-
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->link_settings.lane_count);
- lane++) {
-
- request_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
- VOLTAGE_SWING_LANE);
- request_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
- PRE_EMPHASIS_LANE);
- }
-
- /*Note: for postcursor2, read adjusted
- * postcursor2 settings from*/
- /*DpcdAddress_AdjustRequestPostCursor2 =
- *0x020C (not implemented yet)*/
-
- /* we find the maximum of the requested settings across all lanes*/
- /* and set this maximum for all lanes*/
- find_max_drive_settings(&request_settings, req_settings);
-
- /* if post cursor 2 is needed in the future,
- * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
- */
-
return status;
}
@@ -752,8 +947,6 @@ enum dc_status dpcd_set_lane_settings(
const struct link_training_settings *link_training_setting,
uint32_t offset)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
- uint32_t lane;
unsigned int lane0_set_address;
enum dc_status status;
@@ -763,71 +956,53 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->
- link_settings.lane_count);
- lane++) {
- dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(link_training_setting->
- lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(link_training_setting->
- lane_settings[lane].PRE_EMPHASIS);
- dpcd_lane[lane].bits.MAX_SWING_REACHED =
- (link_training_setting->
- lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (link_training_setting->
- lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
-
status = core_link_write_dpcd(link,
lane0_set_address,
- (uint8_t *)(dpcd_lane),
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- /*
- if (LTSettings.link.rate == LinkRate_High2)
- {
- DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
- for ( uint32_t lane = 0;
- lane < lane_count_DPMax; lane++)
- {
- dpcd_lane2[lane].bits.post_cursor2_set =
- static_cast<unsigned char>(
- LTSettings.laneSettings[lane].postCursor2);
- dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
- }
- m_pDpcdAccessSrv->WriteDpcdData(
- DpcdAddress_Lane0Set2,
- reinterpret_cast<unsigned char*>(dpcd_lane2),
- LTSettings.link.lanes);
- }
- */
-
if (is_repeater(link, offset)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
offset,
lane0_set_address,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
- link->cur_lane_setting = link_training_setting->lane_settings[0];
return status;
}
@@ -839,7 +1014,7 @@ bool dp_is_max_vs_reached(
for (lane = 0; lane <
(uint32_t)(lt_settings->link_settings.lane_count);
lane++) {
- if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+ if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
== VOLTAGE_SWING_MAX_LEVEL)
return true;
}
@@ -869,17 +1044,17 @@ static bool perform_post_lt_adj_req_sequence(
adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
adj_req_timer++) {
- struct link_training_settings req_settings;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated
dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
DPRX);
if (dpcd_lane_status_updated.bits.
@@ -897,11 +1072,10 @@ static bool perform_post_lt_adj_req_sequence(
for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
if (lt_settings->
- lane_settings[lane].VOLTAGE_SWING !=
- req_settings.lane_settings[lane].
- VOLTAGE_SWING ||
- lt_settings->lane_settings[lane].PRE_EMPHASIS !=
- req_settings.lane_settings[lane].PRE_EMPHASIS) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
req_drv_setting_changed = true;
break;
@@ -909,8 +1083,8 @@ static bool perform_post_lt_adj_req_sequence(
}
if (req_drv_setting_changed) {
- dp_update_drive_settings(
- lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dc_link_dp_set_drive_settings(link,
lt_settings);
@@ -954,6 +1128,14 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval
case 0x04:
aux_rd_interval_us = 16000;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case 0x05:
+ aux_rd_interval_us = 32000;
+ break;
+ case 0x06:
+ aux_rd_interval_us = 64000;
+ break;
+#endif
default:
break;
}
@@ -982,20 +1164,24 @@ static enum link_training_result perform_channel_equalization_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
- struct link_training_settings req_settings;
enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
uint32_t wait_time_microsec;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = { {0} };
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
/* Note: also check that TPS4 is a supported feature*/
-
tr_pattern = lt_settings->pattern_for_eq;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+#else
if (is_repeater(link, offset))
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+#endif
dp_set_hw_training_pattern(link, tr_pattern, offset);
@@ -1032,12 +1218,12 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 4. Read lane status and requested
* drive settings as set by the sink*/
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
offset);
/* 5. check CR done*/
@@ -1051,7 +1237,8 @@ static enum link_training_result perform_channel_equalization_sequence(
return LINK_TRAINING_SUCCESS;
/* 7. update VS/PE/PC2 in lt_settings*/
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
return LINK_TRAINING_EQ_FAIL_EQ;
@@ -1077,10 +1264,10 @@ static enum link_training_result perform_clock_recovery_sequence(
uint32_t retries_cr;
uint32_t retry_count;
uint32_t wait_time_microsec;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
retries_cr = 0;
retry_count = 0;
@@ -1134,12 +1321,12 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 4. Read lane status and requested drive
* settings as set by the sink
*/
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
offset);
/* 5. check CR done*/
@@ -1147,23 +1334,35 @@ static enum link_training_result perform_clock_recovery_sequence(
return LINK_TRAINING_SUCCESS;
/* 6. max VS reached*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) &&
+ dp_is_max_vs_reached(lt_settings))
+ break;
+#else
if (dp_is_max_vs_reached(lt_settings))
break;
+#endif
/* 7. same lane settings*/
/* Note: settings are the same for all lanes,
* so comparing first lane is sufficient*/
- if ((lt_settings->lane_settings[0].VOLTAGE_SWING ==
- req_settings.lane_settings[0].VOLTAGE_SWING)
- && (lt_settings->lane_settings[0].PRE_EMPHASIS ==
- req_settings.lane_settings[0].PRE_EMPHASIS))
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
+ dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
retries_cr++;
+#endif
else
retries_cr = 0;
/* 8. update VS/PE/PC2 in lt_settings*/
- dp_update_drive_settings(lt_settings, req_settings);
-
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
retry_count++;
}
@@ -1183,7 +1382,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
struct link_training_settings *lt_settings,
enum link_training_result status)
{
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1194,7 +1393,11 @@ static inline enum link_training_result dp_transition_to_video_idle(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
+#else
lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) {
+#endif
/* delay 5ms after Main Link output idle pattern and then check
* DPCD 0202h.
*/
@@ -1288,8 +1491,40 @@ static inline void decide_8b_10b_training_settings(
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
lt_settings->enhanced_framing = 1;
lt_settings->should_set_fec_ready = true;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static inline void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, 0, sizeof(*lt_settings));
+
+ lt_settings->link_settings = *link_settings;
+ /* TODO: should decide link spread when populating link_settings */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->eq_pattern_time = 2500;
+ lt_settings->eq_wait_time_limit = 400000;
+ lt_settings->eq_loop_count_limit = 20;
+ lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
+ lt_settings->cds_pattern_time = 2500;
+ lt_settings->cds_wait_time_limit = (dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
+ lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
+ LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
+ lt_settings->disallow_per_lane_settings = true;
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+}
+#endif
+
void dp_decide_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_settings,
@@ -1297,6 +1532,10 @@ void dp_decide_training_settings(
{
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
decide_8b_10b_training_settings(link, link_settings, lt_settings);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING)
+ decide_128b_132b_training_settings(link, link_settings, lt_settings);
+#endif
}
static void override_training_settings(
@@ -1319,6 +1558,17 @@ static void override_training_settings(
lt_settings->pre_emphasis = overrides->pre_emphasis;
if (overrides->post_cursor2 != NULL)
lt_settings->post_cursor2 = overrides->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (overrides->ffe_preset != NULL)
+ lt_settings->ffe_preset = overrides->ffe_preset;
+#endif
+ /* Override HW lane settings with BIOS forced values if present */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
+ lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = false;
+ }
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
lt_settings->lane_settings[lane].VOLTAGE_SWING =
lt_settings->voltage_swing != NULL ?
@@ -1334,6 +1584,9 @@ static void override_training_settings(
: POST_CURSOR2_DISABLED;
}
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
/* Initialize training timings */
if (overrides->cr_pattern_time != NULL)
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
@@ -1378,7 +1631,7 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
+static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
{
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
@@ -1389,7 +1642,7 @@ enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
sizeof(repeater_mode));
}
-enum dc_status configure_lttpr_mode_non_transparent(
+static enum dc_status configure_lttpr_mode_non_transparent(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
@@ -1432,6 +1685,13 @@ enum dc_status configure_lttpr_mode_non_transparent(
if (encoding == DP_8b_10b_ENCODING) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Driver does not need to train the first hop. Skip DPCD read and clear
+ * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
@@ -1450,7 +1710,7 @@ enum dc_status configure_lttpr_mode_non_transparent(
static void repeater_training_done(struct dc_link *link, uint32_t offset)
{
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = {0};
const uint32_t dpcd_base_lt_offset =
DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
@@ -1505,6 +1765,17 @@ static void print_status_message(
case LINK_RATE_HIGH3:
link_rate = "HBR3";
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR10:
+ link_rate = "UHBR10";
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = "UHBR13.5";
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = "UHBR20";
+ break;
+#endif
default:
break;
}
@@ -1534,6 +1805,20 @@ static void print_status_message(
case LINK_TRAINING_LINK_LOSS:
lt_result = "Link loss";
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_LT_FAILED:
+ lt_result = "LT_FAILED received";
+ break;
+ case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
+ lt_result = "max loop count reached";
+ break;
+ case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
+ lt_result = "channel EQ timeout";
+ break;
+ case DP_128b_132b_CDS_DONE_TIMEOUT:
+ lt_result = "CDS timeout";
+ break;
+#endif
default:
break;
}
@@ -1553,6 +1838,9 @@ static void print_status_message(
}
/* Connectivity log: link training */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
+#endif
CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
link_rate,
lt_settings->link_settings.lane_count,
@@ -1569,6 +1857,9 @@ void dc_link_dp_set_drive_settings(
/* program ASIC PHY settings*/
dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
/* Notify DP sink the PHY settings from source */
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
@@ -1635,9 +1926,23 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
static void dpcd_exit_training_mode(struct dc_link *link)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t sink_status = 0;
+ uint8_t i;
+#endif
/* clear training pattern set */
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
+#endif
}
enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
@@ -1661,6 +1966,137 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
return status;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
+ uint32_t *interval_in_us)
+{
+ union dp_128b_132b_training_aux_rd_interval dpcd_interval;
+ uint32_t interval_unit = 0;
+
+ dpcd_interval.raw = 0;
+ core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
+ &dpcd_interval.raw, sizeof(dpcd_interval.raw));
+ interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
+ /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
+ * INTERVAL_UNIT. The maximum is 256 ms
+ */
+ *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
+}
+
+static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ uint8_t loop_count;
+ uint32_t aux_rd_interval = 0;
+ uint32_t wait_time = 0;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Transmit 128b/132b_TPS1 over Main-Link */
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ /* Set TRAINING_PATTERN_SET to 01h */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
+
+ /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
+
+ /* Set loop counter to start from 1 */
+ loop_count = 1;
+
+ /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
+ dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
+ lt_settings->pattern_for_eq, DPRX);
+
+ /* poll for channel EQ done */
+ while (status == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
+ wait_time += aux_rd_interval;
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+ dpcd_lane_status)) {
+ /* pass */
+ break;
+ } else if (loop_count >= lt_settings->eq_loop_count_limit) {
+ status = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+ }
+ loop_count++;
+ }
+
+ /* poll for EQ interlane align done */
+ while (status == LINK_TRAINING_SUCCESS) {
+ if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (wait_time >= lt_settings->eq_wait_time_limit) {
+ status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->eq_pattern_time);
+ wait_time += lt_settings->eq_pattern_time;
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ }
+ }
+
+ return status;
+}
+
+static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* Assumption: assume hardware has transmitted eq pattern */
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint32_t wait_time = 0;
+
+ /* initiate CDS done sequence */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
+
+ /* poll for CDS interlane align done and symbol lock */
+ while (status == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->cds_pattern_time);
+ wait_time += lt_settings->cds_pattern_time;
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else if (wait_time >= lt_settings->cds_wait_time_limit) {
+ status = DP_128b_132b_CDS_DONE_TIMEOUT;
+ }
+ }
+
+ return status;
+}
+#endif
+
static enum link_training_result dp_perform_8b_10b_link_training(
struct dc_link *link,
struct link_training_settings *lt_settings)
@@ -1702,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
- lt_settings->lane_settings[lane].VOLTAGE_SWING = VOLTAGE_SWING_LEVEL0;
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
}
if (status == LINK_TRAINING_SUCCESS) {
@@ -1717,6 +2153,35 @@ static enum link_training_result dp_perform_8b_10b_link_training(
return status;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
+ if (link->dc->debug.legacy_dp2_lt) {
+ struct link_training_settings legacy_settings;
+
+ decide_8b_10b_training_settings(link,
+ &lt_settings->link_settings,
+ &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, &legacy_settings);
+ }
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings);
+
+ return result;
+}
+#endif
+
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
const struct dc_link_settings *link_settings,
@@ -1751,6 +2216,10 @@ enum link_training_result dc_link_dp_perform_link_training(
*/
if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, &lt_settings);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (encoding == DP_128b_132b_ENCODING)
+ status = dp_perform_128b_132b_link_training(link, &lt_settings);
+#endif
else
ASSERT(0);
@@ -1788,16 +2257,19 @@ bool perform_link_training_with_retries(
/* Dynamically assigned link encoders associated with stream rather than
* link.
*/
- if (link->dc->res_pool->funcs->link_encs_assign)
- link_enc = stream->link_enc;
+ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
else
link_enc = link->link_enc;
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
*/
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING) {
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+ }
for (j = 0; j < attempts; ++j) {
@@ -1836,10 +2308,22 @@ bool perform_link_training_with_retries(
dc_link_dp_perform_link_training_skip_aux(link, &current_setting);
return true;
} else {
- status = dc_link_dp_perform_link_training(
- link,
- &current_setting,
- skip_video_pattern);
+ /** @todo Consolidate USB4 DP and DPx.x training. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dc_link_dpia_perform_link_training(link,
+ &current_setting,
+ skip_video_pattern);
+
+ /* Transmit idle pattern once training successful. */
+ if (status == LINK_TRAINING_SUCCESS)
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE,
+ NULL, 0);
+ } else {
+ status = dc_link_dp_perform_link_training(link,
+ &current_setting,
+ skip_video_pattern);
+ }
+
if (status == LINK_TRAINING_SUCCESS)
return true;
}
@@ -1862,12 +2346,16 @@ bool perform_link_training_with_retries(
if (type == dc_connection_none)
break;
} else if (do_fallback) {
+ uint32_t req_bw;
+ uint32_t link_bw;
+
decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
*/
- if (dc_bandwidth_in_kbps_from_timing(&stream->timing) <
- dc_link_bandwidth_kbps(link, &current_setting))
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ link_bw = dc_link_bandwidth_kbps(link, &current_setting);
+ if (req_bw > link_bw)
break;
}
@@ -1969,8 +2457,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_cs_id, link_settings);
/* Set FEC enable */
- fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, fec_enable);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+#endif
+ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
+ dp_set_fec_ready(link, fec_enable);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+#endif
if (lt_overrides->alternate_scrambler_reset) {
if (*lt_overrides->alternate_scrambler_reset)
@@ -2012,23 +2506,59 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
* Still shouldn't turn off dp_receiver (DPCD:600h)
*/
if (link_down == true) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings link_settings = link->cur_link_settings;
+#endif
dp_disable_link_phy(link, link->connector_signal);
- dp_set_fec_ready(link, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
+#endif
+ dp_set_fec_ready(link, false);
}
link->sync_lt_in_progress = false;
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
+ lttpr_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ lttpr_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
+ lttpr_max_link_rate = LINK_RATE_UHBR10;
+
+ return lttpr_max_link_rate;
+}
+#endif
+
bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
+ struct link_encoder *link_enc = NULL;
+
if (!max_link_enc_cap) {
DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
return false;
}
- if (link->link_enc->funcs->get_max_link_cap) {
- link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap);
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ } else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+
+ if (link_enc && link_enc->funcs->get_max_link_cap) {
+ link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
return true;
}
@@ -2041,9 +2571,31 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dc_link_rate lttpr_max_link_rate;
+#endif
+ struct link_encoder *link_enc = NULL;
+
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ } else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
/* get max link encoder capability */
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ if (link_enc)
+ link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (max_link_cap.link_rate >= LINK_RATE_UHBR10 &&
+ !link->hpo_dp_link_enc)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+#endif
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2064,8 +2616,15 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lttpr_max_link_rate = get_lttpr_max_link_rate(link);
+
+ if (lttpr_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = lttpr_max_link_rate;
+#else
if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
__func__,
@@ -2075,7 +2634,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-enum dc_status read_hpd_rx_irq_data(
+static enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
{
@@ -2206,17 +2765,32 @@ bool dp_verify_link_cap(
enum link_training_result status;
union hpd_irq_data irq_data;
- if (link->dc->debug.skip_detection_link_training) {
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ max_link_cap = get_max_link_cap(link);
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+
+ /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */
+ if (link->dc->debug.skip_detection_link_training ||
+ link->is_dig_mapping_flexible) {
+ /* TODO - should we check link encoder's max link caps here?
+ * How do we know which link encoder to check from?
+ */
link->verified_link_cap = *known_limit_link_setting;
return true;
+ } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign &&
+ !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) {
+ link->verified_link_cap = initial_link_settings;
+ return true;
}
memset(&irq_data, 0, sizeof(irq_data));
success = false;
skip_link_training = false;
- max_link_cap = get_max_link_cap(link);
-
/* Grant extended timeout request */
if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
@@ -2224,6 +2798,10 @@ bool dp_verify_link_cap(
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ reset_dp_hpo_stream_encoders_for_link(link);
+#endif
/* TODO implement override and monitor patch later */
/* try to train the link from high to low to
@@ -2234,19 +2812,13 @@ bool dp_verify_link_cap(
dp_cs_id = get_clock_source_id(link);
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
cur_link_setting = initial_link_settings;
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
*/
- if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa) {
dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
dp_disable_link_phy(link, link->connector_signal);
@@ -2333,7 +2905,7 @@ bool dp_verify_link_cap_with_retries(
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
- &link->reported_link_cap,
+ known_limit_link_setting,
&fail_count) && fail_count == 0) {
success = true;
break;
@@ -2348,11 +2920,21 @@ bool dp_verify_mst_link_cap(
{
struct dc_link_settings max_link_cap = {0};
- max_link_cap = get_max_link_cap(link);
- link->verified_link_cap = get_common_supported_link_settings(
- link->reported_link_cap,
- max_link_cap);
-
+ if (dp_get_link_encoding_format(&link->reported_link_cap) ==
+ DP_8b_10b_ENCODING) {
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
+ DP_128b_132b_ENCODING) {
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
+ }
+#endif
return true;
}
@@ -2379,7 +2961,17 @@ static struct dc_link_settings get_common_supported_link_settings(
* We map it to the maximum supported link rate that
* is smaller than MAX_LINK_BW in this case.
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link_settings.link_rate > LINK_RATE_UHBR20) {
+ link_settings.link_rate = LINK_RATE_UHBR20;
+ } else if (link_settings.link_rate < LINK_RATE_UHBR20 &&
+ link_settings.link_rate > LINK_RATE_UHBR13_5) {
+ link_settings.link_rate = LINK_RATE_UHBR13_5;
+ } else if (link_settings.link_rate < LINK_RATE_UHBR10 &&
+ link_settings.link_rate > LINK_RATE_HIGH3) {
+#else
if (link_settings.link_rate > LINK_RATE_HIGH3) {
+#endif
link_settings.link_rate = LINK_RATE_HIGH3;
} else if (link_settings.link_rate < LINK_RATE_HIGH3
&& link_settings.link_rate > LINK_RATE_HIGH2) {
@@ -2424,6 +3016,14 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
{
switch (link_rate) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_HIGH3;
+#endif
case LINK_RATE_HIGH3:
return LINK_RATE_HIGH2;
case LINK_RATE_HIGH2:
@@ -2458,11 +3058,55 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
return LINK_RATE_HIGH2;
case LINK_RATE_HIGH2:
return LINK_RATE_HIGH3;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR20;
+#endif
default:
return LINK_RATE_UNKNOWN;
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool decide_fallback_link_setting_max_bw_policy(
+ const struct dc_link_settings *max,
+ struct dc_link_settings *cur)
+{
+ uint8_t cur_idx = 0, next_idx;
+ bool found = false;
+
+ while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find current index */
+ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
+ dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
+ break;
+ else
+ cur_idx++;
+
+ next_idx = cur_idx + 1;
+
+ while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find next index */
+ if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count &&
+ dp_lt_fallbacks[next_idx].link_rate <= max->link_rate)
+ break;
+ else
+ next_idx++;
+
+ if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
+ cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
+ cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
+ found = true;
+ }
+
+ return found;
+}
+#endif
+
/*
* function: set link rate and lane count fallback based
* on current link setting and last link training result
@@ -2478,6 +3122,11 @@ static bool decide_fallback_link_setting(
{
if (!current_link_setting)
return false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING)
+ return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
+ current_link_setting);
+#endif
switch (training_result) {
case LINK_TRAINING_CR_FAIL_LANE0:
@@ -2743,7 +3392,7 @@ void decide_link_settings(struct dc_stream_state *stream,
}
/*************************Short Pulse IRQ***************************/
-static bool allow_hpd_rx_irq(const struct dc_link *link)
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
{
/*
* Don't handle RX IRQ unless one of following is met:
@@ -2793,6 +3442,8 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
if (psr_error_status.bits.LINK_CRC_ERROR ||
psr_error_status.bits.RFB_STORAGE_ERROR ||
psr_error_status.bits.VSC_SDP_ERROR) {
+ bool allow_active;
+
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -2802,8 +3453,10 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_allow_active(link, false, true, false);
- dc_link_set_psr_allow_active(link, true, true, false);
+ allow_active = false;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -2850,20 +3503,24 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
union phy_test_pattern dpcd_test_pattern;
union lane_adjust dpcd_lane_adjustment[2];
unsigned char dpcd_post_cursor_2_adjustment = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ unsigned char test_pattern_buffer[
+ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+#else
unsigned char test_pattern_buffer[
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+#endif
unsigned int test_pattern_size = 0;
enum dp_test_pattern test_pattern;
- struct dc_link_training_settings link_settings;
union lane_adjust dpcd_lane_adjust;
unsigned int lane;
struct link_training_settings link_training_settings;
- int i = 0;
dpcd_test_pattern.raw = 0;
memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
- memset(&link_settings, 0, sizeof(link_settings));
+ memset(&link_training_settings, 0, sizeof(link_training_settings));
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
@@ -2918,6 +3575,35 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
case PHY_TEST_PATTERN_CP2520_3:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case PHY_TEST_PATTERN_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case PHY_TEST_PATTERN_PRBS9:
+ test_pattern = DP_TEST_PATTERN_PRBS9;
+ break;
+ case PHY_TEST_PATTERN_PRBS11:
+ test_pattern = DP_TEST_PATTERN_PRBS11;
+ break;
+ case PHY_TEST_PATTERN_PRBS15:
+ test_pattern = DP_TEST_PATTERN_PRBS15;
+ break;
+ case PHY_TEST_PATTERN_PRBS23:
+ test_pattern = DP_TEST_PATTERN_PRBS23;
+ break;
+ case PHY_TEST_PATTERN_PRBS31:
+ test_pattern = DP_TEST_PATTERN_PRBS31;
+ break;
+ case PHY_TEST_PATTERN_264BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PULSE:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
+ break;
+#endif
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
break;
@@ -2933,30 +3619,59 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
+ test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
+ core_link_read_dpcd(
+ link,
+ DP_PHY_SQUARE_PATTERN,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+#endif
+
/* prepare link training settings */
- link_settings.link = link->cur_link_settings;
+ link_training_settings.link_settings = link->cur_link_settings;
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
dpcd_lane_adjust.raw =
get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
- link_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)
- (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
- link_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)
- (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
- link_settings.lane_settings[lane].POST_CURSOR2 =
- (enum dc_post_cursor2)
- ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
- }
-
- for (i = 0; i < 4; i++)
- link_training_settings.lane_settings[i] =
- link_settings.lane_settings[i];
- link_training_settings.link_settings = link_settings.link;
- link_training_settings.allow_invalid_msa_timing_param = false;
+ if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_8b_10b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
+ dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ }
+#endif
+ }
+
+ dp_hw_to_dpcd_lane_settings(&link_training_settings,
+ link_training_settings.hw_lane_settings,
+ link_training_settings.dpcd_lane_settings);
/*Usage: Measure DP physical lane signal
* by DP SI test equipment automatically.
* PHY test pattern request is generated by equipment via HPD interrupt.
@@ -3177,7 +3892,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
}
}
-static void handle_automated_test(struct dc_link *link)
+void dc_link_dp_handle_automated_test(struct dc_link *link)
{
union test_request test_request;
union test_response test_response;
@@ -3226,17 +3941,50 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
+void dc_link_dp_handle_link_loss(struct dc_link *link)
{
- union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
- union device_service_irq device_service_clear = { { 0 } };
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ break;
+ }
+
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ core_link_disable_stream(pipe_ctx);
+ }
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
+ }
+ }
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {0};
+ union device_service_irq device_service_clear = {0};
enum dc_status result;
bool status = false;
- struct pipe_ctx *pipe_ctx;
- int i;
if (out_link_loss)
*out_link_loss = false;
+
+ if (has_left_work)
+ *has_left_work = false;
/* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
@@ -3268,11 +4016,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&device_service_clear.raw,
sizeof(device_service_clear.raw));
device_service_clear.raw = 0;
- handle_automated_test(link);
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_automated_test(link);
return false;
}
- if (!allow_hpd_rx_irq(link)) {
+ if (!dc_link_dp_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -3286,12 +4037,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* so do not handle as a normal sink status change interrupt.
*/
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
return true;
+ }
/* check if we have MST msg and return since we poll for it */
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
return false;
+ }
/* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means
@@ -3308,29 +4065,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
sizeof(hpd_irq_dpcd_data),
"Status: ");
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- break;
- }
-
- if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
- return false;
-
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_disable_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_link_loss(link);
status = false;
if (out_link_loss)
@@ -3554,6 +4292,43 @@ static void get_active_converter_info(
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ union dp_dfp_cap_ext dfp_cap_ext;
+ memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
+ core_link_read_dpcd(
+ link,
+ DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
+ dfp_cap_ext.raw,
+ sizeof(dfp_cap_ext.raw));
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
+ dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
+ (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
+ dfp_cap_ext.fields.max_video_h_active_width[0] +
+ (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
+ dfp_cap_ext.fields.max_video_v_active_height[0] +
+ (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
+ dfp_cap_ext.fields.encoding_format_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
+ dfp_cap_ext.fields.rgb_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr444_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr422_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr420_color_depth_caps;
+ DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
+ DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
+ DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
+ }
+#endif
}
static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
@@ -3613,7 +4388,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t lttpr_dpcd_data[8];
+ bool allow_lttpr_non_transparent_mode = 0;
+#else
uint8_t lttpr_dpcd_data[6];
+#endif
bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
@@ -3621,6 +4401,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
+ link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
+ allow_lttpr_non_transparent_mode = 1;
+ } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ allow_lttpr_non_transparent_mode = 1;
+ }
+#endif
+
/*
* Logic to determine LTTPR mode
*/
@@ -3628,17 +4418,31 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
if (vbios_lttpr_enable && vbios_lttpr_interop)
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (allow_lttpr_non_transparent_mode)
+#else
if (link->dc->config.allow_lttpr_non_transparent_mode)
+#endif
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
else
link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
} else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+#else
if (!link->dc->config.allow_lttpr_non_transparent_mode
|| !link->dc->caps.extended_aux_timeout_support)
+#endif
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
else
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
@@ -3678,8 +4482,19 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+#endif
+
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
@@ -3728,6 +4543,8 @@ static bool retrieve_link_cap(struct dc_link *link)
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
is_lttpr_present = dp_retrieve_lttpr_cap(link);
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
status = core_link_read_dpcd(link, DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
@@ -3928,16 +4745,82 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_SUPPORT,
link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
+ DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
+ }
+#else
status = core_link_read_dpcd(
link,
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+#endif
}
if (!dpcd_read_sink_ext_caps(link))
link->dpcd_sink_ext_caps.raw = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV];
+
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+
+ core_link_read_dpcd(link,
+ DP_128b_132b_SUPPORTED_LINK_RATES,
+ &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
+ sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
+ if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
+ else
+ dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
+ DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
+ DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
+ link->reported_link_cap.link_rate / 100,
+ link->reported_link_cap.link_rate % 100);
+
+ core_link_read_dpcd(link,
+ DP_SINK_VIDEO_FALLBACK_FORMATS,
+ &link->dpcd_caps.fallback_formats.raw,
+ sizeof(link->dpcd_caps.fallback_formats.raw));
+ DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
+ if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.raw == 0) {
+ DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
+ link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
+ }
+
+ core_link_read_dpcd(link,
+ DP_FEC_CAPABILITY_1,
+ &link->dpcd_caps.fec_cap1.raw,
+ sizeof(link->dpcd_caps.fec_cap1.raw));
+ DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
+ if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
+ DC_LOG_DP2("\tFEC aggregated error counters are supported");
+ }
+#endif
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -4368,7 +5251,7 @@ bool dc_link_dp_set_test_pattern(
* MuteAudioEndpoint(pPathMode->pDisplayPath, true);
*/
/* Blank stream */
- pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
}
dp_set_hw_test_pattern(link, test_pattern,
@@ -4408,6 +5291,35 @@ bool dc_link_dp_set_test_pattern(
case DP_TEST_PATTERN_CP2520_3:
pattern = PHY_TEST_PATTERN_CP2520_3;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ pattern = PHY_TEST_PATTERN_PRBS9;
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ pattern = PHY_TEST_PATTERN_PRBS11;
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ pattern = PHY_TEST_PATTERN_PRBS15;
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ pattern = PHY_TEST_PATTERN_PRBS23;
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ pattern = PHY_TEST_PATTERN_PRBS31;
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PULSE:
+ pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
+ break;
+#endif
default:
return false;
}
@@ -4670,7 +5582,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -4690,7 +5602,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
link_enc->funcs->fec_set_ready(link_enc, true);
link->fec_state = dc_link_fec_ready;
} else {
- link_enc->funcs->fec_set_ready(link->link_enc, false);
+ link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
dm_error("dpcd write failed to set fec_ready");
}
@@ -4717,8 +5629,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(
- link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -4938,7 +5849,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
uint8_t link_bw_set;
uint8_t link_rate_set;
uint32_t req_bw;
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
ASSERT(link || crtc_timing); // invalid input
@@ -4983,6 +5894,227 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings
if ((link_settings->link_rate >= LINK_RATE_LOW) &&
(link_settings->link_rate <= LINK_RATE_HIGH3))
return DP_8b_10b_ENCODING;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
+ (link_settings->link_rate <= LINK_RATE_UHBR20))
+ return DP_128b_132b_ENCODING;
+#endif
return DP_UNKNOWN_ENCODING;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ if (!dc_is_dp_signal(link->connector_signal))
+ return DP_UNKNOWN_ENCODING;
+
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ link_settings = link->preferred_link_setting;
+ } else {
+ decide_mst_link_settings(link, &link_settings);
+ }
+
+ return dp_get_link_encoding_format(&link_settings);
+}
+
+// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
+static void get_lane_status(
+ struct dc_link *link,
+ uint32_t lane_count,
+ union lane_status *status,
+ union lane_align_status_updated *status_updated)
+{
+ unsigned int lane;
+ uint8_t dpcd_buf[3] = {0};
+
+ if (status == NULL || status_updated == NULL) {
+ return;
+ }
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ dpcd_buf,
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane < lane_count; lane++) {
+ status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane);
+ }
+
+ status_updated->raw = dpcd_buf[2];
+}
+
+bool dpcd_write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate)
+{
+ const uint8_t vc_id = 1; /// VC ID always 1 for SST
+ const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
+ bool result = false;
+ uint8_t req_slot_count = 0;
+ struct fixed31_32 avg_time_slots_per_mtp = { 0 };
+ union payload_table_update_status update_status = { 0 };
+ const uint32_t max_retries = 30;
+ uint32_t retries = 0;
+
+ if (allocate) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ } else {
+ /// Leave req_slot_count = 0 if allocate is false.
+ }
+
+ /// Write DPCD 2C0 = 1 to start updating
+ update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ /// Program the changes in DPCD 1C0 - 1C2
+ ASSERT(vc_id == 1);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_SET,
+ &vc_id,
+ 1);
+
+ ASSERT(start_time_slot == 0);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
+ &start_time_slot,
+ 1);
+
+ ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
+ &req_slot_count,
+ 1);
+
+ /// Poll till DPCD 2C0 read 1
+ /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
+
+ while (retries < max_retries) {
+ if (core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1) == DC_OK) {
+ if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
+ DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
+ result = true;
+ break;
+ }
+ } else {
+ union dpcd_rev dpcdRev;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ &dpcdRev.raw,
+ 1) != DC_OK) {
+ DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
+ "of sink while polling payload table "
+ "updated status bit.");
+ break;
+ }
+ }
+ retries++;
+ udelay(5000);
+ }
+
+ if (!result && retries == max_retries) {
+ DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
+ "continue on. Something is wrong with the branch.");
+ // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
+ }
+
+ proposed_table->stream_count = 1; /// Always 1 stream for SST
+ proposed_table->stream_allocations[0].slot_count = req_slot_count;
+ proposed_table->stream_allocations[0].vcp_id = vc_id;
+
+ return result;
+}
+
+bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
+{
+ /*
+ * wait for ACT handled
+ */
+ int i;
+ const int act_retries = 30;
+ enum act_return_status result = ACT_FAILED;
+ union payload_table_update_status update_status = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated lane_status_updated;
+
+ for (i = 0; i < act_retries; i++) {
+ get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
+
+ if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(lane_status_updated)) {
+ DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
+ "polling for ACT handled.");
+ result = ACT_LINK_LOST;
+ break;
+ }
+ core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ if (update_status.bits.ACT_HANDLED == 1) {
+ DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
+ result = ACT_SUCCESS;
+ break;
+ }
+
+ udelay(5000);
+ }
+
+ if (result == ACT_FAILED) {
+ DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
+ "continue on. Something is wrong with the branch.");
+ }
+
+ return (result == ACT_SUCCESS);
+}
+
+struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link)
+{
+ struct fixed31_32 link_bw_effective =
+ dc_fixpt_from_int(
+ dc_link_bandwidth_kbps(link, &link->cur_link_settings));
+ struct fixed31_32 timeslot_bw_effective =
+ dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
+ struct fixed31_32 timing_bw =
+ dc_fixpt_from_int(
+ dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ struct fixed31_32 avg_time_slots_per_mtp =
+ dc_fixpt_div(timing_bw, timeslot_bw_effective);
+
+ return avg_time_slots_per_mtp;
+}
+
+bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->stream->link->hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 72970e49800a..7f25c11f4248 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -176,12 +176,15 @@ static void dpcd_reduce_address_range(
uint8_t * const reduced_data,
const uint32_t reduced_size)
{
- const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size);
- const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size);
const uint32_t offset = reduced_address - extended_address;
- if (extended_end_address == reduced_end_address && extended_address == reduced_address)
- return; /* extended and reduced address ranges point to the same data */
+ /*
+ * If the address is same, address was not extended.
+ * So we do not need to free any memory.
+ * The data is in original buffer(reduced_data).
+ */
+ if (extended_data == reduced_data)
+ return;
memcpy(&extended_data[offset], reduced_data, reduced_size);
kfree(extended_data);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
new file mode 100644
index 000000000000..b1c9f77d6bf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_link_dpia.h"
+#include "inc/core_status.h"
+#include "dc_link.h"
+#include "dc_link_dp.h"
+#include "dpcd_defs.h"
+#include "link_hwss.h"
+#include "dm_helpers.h"
+#include "dmub/inc/dmub_cmd.h"
+#include "inc/link_dpcd.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
+{
+ enum dc_status status = DC_OK;
+ uint8_t dpcd_dp_tun_data[3] = {0};
+ uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0};
+ uint8_t i = 0;
+
+ status = core_link_read_dpcd(link,
+ DP_TUNNELING_CAPABILITIES_SUPPORT,
+ dpcd_dp_tun_data,
+ sizeof(dpcd_dp_tun_data));
+
+ status = core_link_read_dpcd(link,
+ DP_USB4_ROUTER_TOPOLOGY_ID,
+ dpcd_topology_data,
+ sizeof(dpcd_topology_data));
+
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT -
+ DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
+ link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
+
+ return status;
+}
+
+/* Configure link as prescribed in link_setting; set LTTPR mode; and
+ * Initialize link training settings.
+ * Abort link training if sink unplug detected.
+ *
+ * @param link DPIA link being trained.
+ * @param[in] link_setting Lane count, link rate and downspread control.
+ * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis).
+ */
+static enum link_training_result dpia_configure_link(struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_status status;
+ bool fec_enable;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->lttpr_mode);
+
+ dp_decide_training_settings(link,
+ link_setting,
+ lt_settings);
+
+ status = dpcd_configure_channel_coding(link, lt_settings);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ /* Configure lttpr mode */
+ status = dpcd_configure_lttpr_mode(link, lt_settings);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ /* Set link rate, lane count and spread. */
+ status = dpcd_set_link_settings(link, lt_settings);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ if (link->preferred_training_settings.fec_enable)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+ status = dp_set_fec_ready(link, fec_enable);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ return LINK_TRAINING_SUCCESS;
+}
+
+static enum dc_status core_link_send_set_config(struct dc_link *link,
+ uint8_t msg_type,
+ uint8_t msg_data)
+{
+ struct set_config_cmd_payload payload;
+ enum set_config_status set_config_result = SET_CONFIG_PENDING;
+
+ /* prepare set_config payload */
+ payload.msg_type = msg_type;
+ payload.msg_data = msg_data;
+
+ if (!link->ddc->ddc_pin && !link->aux_access_disabled &&
+ (dm_helpers_dmub_set_config_sync(link->ctx, link,
+ &payload, &set_config_result) == -1)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ /* set_config should return ACK if successful */
+ return (set_config_result == SET_CONFIG_ACK_RECEIVED) ? DC_OK : DC_ERROR_UNEXPECTED;
+}
+
+/* Build SET_CONFIG message data payload for specified message type. */
+static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ union dpia_set_config_data data;
+
+ data.raw = 0;
+
+ switch (type) {
+ case DPIA_SET_CFG_SET_LINK:
+ data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
+ break;
+ case DPIA_SET_CFG_SET_PHY_TEST_MODE:
+ break;
+ case DPIA_SET_CFG_SET_VSPE:
+ /* Assume all lanes have same drive settings. */
+ data.set_vspe.swing = lt_settings->lane_settings[0].VOLTAGE_SWING;
+ data.set_vspe.pre_emph = lt_settings->lane_settings[0].PRE_EMPHASIS;
+ data.set_vspe.max_swing_reached =
+ lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;
+ data.set_vspe.max_pre_emph_reached =
+ lt_settings->lane_settings[0].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;
+ break;
+ default:
+ ASSERT(false); /* Message type not supported by helper function. */
+ break;
+ }
+
+ return data.raw;
+}
+
+/* Convert DC training pattern to DPIA training stage. */
+static enum dpia_set_config_ts convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps)
+{
+ enum dpia_set_config_ts ts;
+
+ switch (tps) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ ts = DPIA_TS_TPS1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ ts = DPIA_TS_TPS2;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ ts = DPIA_TS_TPS3;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ ts = DPIA_TS_TPS4;
+ break;
+ default:
+ ts = DPIA_TS_DPRX_DONE;
+ ASSERT(false); /* TPS not supported by helper function. */
+ break;
+ }
+
+ return ts;
+}
+
+/* Write training pattern to DPCD. */
+static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
+ enum dc_dp_training_pattern pattern,
+ uint32_t hop)
+{
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+ uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
+ enum dc_status status;
+
+ if (hop != DPRX)
+ dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
+
+ /* DpcdAddress_TrainingPatternSet */
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
+ if (hop != DPRX) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+ __func__,
+ hop,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+ __func__,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ }
+
+ status = core_link_write_dpcd(link,
+ dpcd_tps_offset,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
+
+ return status;
+}
+
+/* Execute clock recovery phase of link training for specified hop in display
+ * path.in non-transparent mode:
+ * - Driver issues both DPCD and SET_CONFIG transactions.
+ * - TPS1 is transmitted for any hops downstream of DPOA.
+ * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.
+ * - CR for the first hop (DPTX-to-DPIA) is assumed to be successful.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ enum dc_status status;
+ uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
+ uint32_t retry_count = 0;
+ /* From DP spec, CR read interval is always 100us. */
+ uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint8_t set_cfg_data;
+ enum dpia_set_config_ts ts;
+
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
+ * Fix inherited from perform_clock_recovery_sequence() -
+ * the DP equivalent of this function:
+ * Required for Synaptics MST hub which can put the LT in
+ * infinite loop by switching the VS between level 0 and level 1
+ * continuously.
+ */
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ /* DPTX-to-DPIA */
+ if (hop == repeater_cnt) {
+ /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that
+ * non-transparent link training has started.
+ * This also enables the transmission of clk_sync packets.
+ */
+ set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_LINK,
+ set_cfg_data);
+ /* CR for this hop is considered successful as long as
+ * SET_CONFIG message is acknowledged by DPOA.
+ */
+ if (status == DC_OK)
+ result = LINK_TRAINING_SUCCESS;
+ else
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* DPOA-to-x */
+ /* Instruct DPOA to transmit TPS1 then update DPCD. */
+ if (retry_count == 0) {
+ ts = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ ts);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ /* Update DPOA drive settings then DPCD. DPOA does only adjusts
+ * drive settings for hops immediately downstream.
+ */
+ if (hop == repeater_cnt - 1) {
+ set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+ status = dpcd_set_lane_settings(link, lt_settings, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* Check if clock recovery successful. */
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ result = dp_get_cr_failure(lane_count, dpcd_lane_status);
+
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* Count number of attempts with same drive settings.
+ * Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient.
+ */
+ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET ==
+ dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE))
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->lane_settings,
+ lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ /* Abort link training if clock recovery failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
+ " -hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ retry_count);
+
+ return result;
+}
+
+/* Execute clock recovery phase of link training in transparent LTTPR mode:
+ * - Driver only issues DPCD transactions and leaves USB4 tunneling (SET_CONFIG) messages to DPIA.
+ * - Driver writes TPS1 to DPCD to kick off training.
+ * - Clock recovery (CR) for link is handled by DPOA, which reports result to DPIA on completion.
+ * - DPIA communicates result to driver by updating CR status when driver reads DPCD.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ */
+static enum link_training_result dpia_training_cr_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
+ enum dc_status status;
+ uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
+ uint32_t retry_count = 0;
+ uint32_t wait_time_microsec = lt_settings->cr_pattern_time;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+
+ /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
+ * Fix inherited from perform_clock_recovery_sequence() -
+ * the DP equivalent of this function:
+ * Required for Synaptics MST hub which can put the LT in
+ * infinite loop by switching the VS between level 0 and level 1
+ * continuously.
+ */
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ /* Write TPS1 (not VS or PE) to DPCD to start CR phase.
+ * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to
+ * start link training.
+ */
+ if (retry_count == 0) {
+ status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* Check if clock recovery successful. */
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ result = dp_get_cr_failure(lane_count, dpcd_lane_status);
+
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* Count number of attempts with same drive settings.
+ * Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient.
+ */
+ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET ==
+ dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE))
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ /* Abort link training if clock recovery failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
+ " -hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ DPRX,
+ result,
+ retry_count);
+
+ return result;
+}
+
+/* Execute clock recovery phase of link training for specified hop in display
+ * path.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_cr_phase(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ result = dpia_training_cr_non_transparent(link, lt_settings, hop);
+ else
+ result = dpia_training_cr_transparent(link, lt_settings);
+
+ return result;
+}
+
+/* Return status read interval during equalization phase. */
+static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ uint32_t wait_time_microsec;
+
+ if (hop == DPRX)
+ wait_time_microsec = lt_settings->eq_pattern_time;
+ else
+ wait_time_microsec =
+ dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check debug option for extending aux read interval. */
+ if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
+ wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
+#endif
+
+ return wait_time_microsec;
+}
+
+/* Execute equalization phase of link training for specified hop in display
+ * path in non-transparent mode:
+ * - driver issues both DPCD and SET_CONFIG transactions.
+ * - TPSx is transmitted for any hops downstream of DPOA.
+ * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.
+ * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful.
+ * - DPRX EQ only reported successful when both DPRX and DPIA requirements
+ * (clk sync packets sent) fulfilled.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ uint32_t retries_eq = 0;
+ enum dc_status status;
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint8_t set_cfg_data;
+ enum dpia_set_config_ts ts;
+
+ /* Training pattern is TPS4 for repeater;
+ * TPS2/3/4 for DPRX depending on what it supports.
+ */
+ if (hop == DPRX)
+ tr_pattern = lt_settings->pattern_for_eq;
+ else
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+ /* DPTX-to-DPIA equalization always successful. */
+ if (hop == repeater_cnt) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* Instruct DPOA to transmit TPSn then update DPCD. */
+ if (retries_eq == 0) {
+ ts = convert_trng_ptn_to_trng_stg(tr_pattern);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ ts);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ status = dpcd_set_lt_pattern(link, tr_pattern, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ /* Update DPOA drive settings then DPCD. DPOA only adjusts
+ * drive settings for hop immediately downstream.
+ */
+ if (hop == repeater_cnt - 1) {
+ set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+ status = dpcd_set_lane_settings(link, lt_settings, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* Extend wait time on second equalisation attempt on final hop to
+ * ensure clock sync packets have been sent.
+ */
+ if (hop == DPRX && retries_eq == 1)
+ wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY);
+ else
+ wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop);
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* CR can still fail during EQ phase. Fail training if CR fails. */
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+
+ /* Abort link training if equalization failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
+ " - hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ retries_eq);
+
+ return result;
+}
+
+/* Execute equalization phase of link training for specified hop in display
+ * path in transparent LTTPR mode:
+ * - driver only issues DPCD transactions leaves USB4 tunneling (SET_CONFIG) messages to DPIA.
+ * - driver writes TPSx to DPCD to notify DPIA that is in equalization phase.
+ * - equalization (EQ) for link is handled by DPOA, which reports result to DPIA on completion.
+ * - DPIA communicates result to driver by updating EQ status when driver reads DPCD.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_eq_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
+ uint32_t retries_eq = 0;
+ enum dc_status status;
+ enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+
+ wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX);
+
+ for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+ if (retries_eq == 0) {
+ status = dpcd_set_lt_pattern(link, tr_pattern, DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* CR can still fail during EQ phase. Fail training if CR fails. */
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+
+ /* Abort link training if equalization failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
+ " - hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ DPRX,
+ result,
+ retries_eq);
+
+ return result;
+}
+
+/* Execute equalization phase of link training for specified hop in display
+ * path.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_eq_phase(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ result = dpia_training_eq_non_transparent(link, lt_settings, hop);
+ else
+ result = dpia_training_eq_transparent(link, lt_settings);
+
+ return result;
+}
+
+/* End training of specified hop in display path. */
+static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
+{
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+ uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
+ enum dc_status status;
+
+ if (hop != DPRX)
+ dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
+
+ status = core_link_write_dpcd(link,
+ dpcd_tps_offset,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
+
+ return status;
+}
+
+/* End training of specified hop in display path.
+ *
+ * In transparent LTTPR mode:
+ * - driver clears training pattern for the specified hop in DPCD.
+ * In non-transparent LTTPR mode:
+ * - in addition to clearing training pattern, driver issues USB4 tunneling
+ * (SET_CONFIG) messages to notify DPOA when training is done for first hop
+ * (DPTX-to-DPIA) and last hop (DPRX).
+ *
+ * @param link DPIA link being trained.
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_end(struct dc_link *link,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ enum dc_status status;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ if (hop == repeater_cnt) { /* DPTX-to-DPIA */
+ /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that
+ * DPTX-to-DPIA hop trained. No DPCD write needed for first hop.
+ */
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ DPIA_TS_UFP_DONE);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ } else { /* DPOA-to-x */
+ /* Write 0x0 to TRAINING_PATTERN_SET */
+ status = dpcd_clear_lt_pattern(link, hop);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ }
+
+ /* Notify DPOA that non-transparent link training of DPRX done. */
+ if (hop == DPRX && result != LINK_TRAINING_ABORT) {
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ DPIA_TS_DPRX_DONE);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ }
+
+ } else { /* non-LTTPR or transparent LTTPR. */
+ /* Write 0x0 to TRAINING_PATTERN_SET */
+ status = dpcd_clear_lt_pattern(link, hop);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ }
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ link->lttpr_mode);
+
+ return result;
+}
+
+/* When aborting training of specified hop in display path, clean up by:
+ * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET.
+ * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.
+ *
+ * @param link DPIA link being trained.
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static void dpia_training_abort(struct dc_link *link, uint32_t hop)
+{
+ uint8_t data = 0;
+ uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->lttpr_mode,
+ link->hpd_status);
+
+ /* Abandon clean-up if sink unplugged. */
+ if (!link->hpd_status)
+ return;
+
+ if (hop != DPRX)
+ dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
+
+ core_link_write_dpcd(link, dpcd_tps_offset, &data, 1);
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1);
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1);
+ core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
+}
+
+enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ enum link_training_result result;
+ struct link_training_settings lt_settings;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ int8_t repeater_id; /* Current hop. */
+
+ /* Configure link as prescribed in link_setting and set LTTPR mode. */
+ result = dpia_configure_link(link, link_setting, &lt_settings);
+ if (result != LINK_TRAINING_SUCCESS)
+ return result;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Train each hop in turn starting with the one closest to DPTX.
+ * In transparent or non-LTTPR mode, train only the final hop (DPRX).
+ */
+ for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) {
+ /* Clock recovery. */
+ result = dpia_training_cr_phase(link, &lt_settings, repeater_id);
+ if (result != LINK_TRAINING_SUCCESS)
+ break;
+
+ /* Equalization. */
+ result = dpia_training_eq_phase(link, &lt_settings, repeater_id);
+ if (result != LINK_TRAINING_SUCCESS)
+ break;
+
+ /* Stop training hop. */
+ result = dpia_training_end(link, repeater_id);
+ if (result != LINK_TRAINING_SUCCESS)
+ break;
+ }
+
+ /* Double-check link status if training successful; gracefully abort
+ * training of current hop if training failed due to message tunneling
+ * failure; end training of hop if training ended conventionally and
+ * falling back to lower bandwidth settings possible.
+ */
+ if (result == LINK_TRAINING_SUCCESS) {
+ msleep(5);
+ result = dp_check_link_loss_status(link, &lt_settings);
+ } else if (result == LINK_TRAINING_ABORT) {
+ dpia_training_abort(link, repeater_id);
+ } else {
+ dpia_training_end(link, repeater_id);
+ }
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index de80a9ea4cfa..72b0f8594b4a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -1,5 +1,4 @@
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
+/* Copyright 2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,78 +34,128 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
int i;
/* Loop over created link encoder objects. */
- for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
- link_enc = stream->ctx->dc->res_pool->link_encoders[i];
-
- if (link_enc &&
- ((uint32_t)stream->signal & link_enc->output_signals)) {
- if (dc_is_dp_signal(stream->signal)) {
- /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
- struct dc_link_settings link_settings = {0};
-
- decide_link_settings(stream, &link_settings);
- if ((link_settings.link_rate >= LINK_RATE_LOW) &&
- link_settings.link_rate <= LINK_RATE_HIGH3) {
+ if (stream) {
+ for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ link_enc = stream->ctx->dc->res_pool->link_encoders[i];
+
+ /* Need to check link signal type rather than stream signal type which may not
+ * yet match.
+ */
+ if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) {
+ if (dc_is_dp_signal(stream->signal)) {
+ /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
+ struct dc_link_settings link_settings = {0};
+
+ decide_link_settings(stream, &link_settings);
+ if ((link_settings.link_rate >= LINK_RATE_LOW) &&
+ link_settings.link_rate <= LINK_RATE_HIGH3) {
+ is_dig_stream = true;
+ break;
+ }
+ } else {
is_dig_stream = true;
break;
}
- } else {
- is_dig_stream = true;
- break;
}
}
}
-
return is_dig_stream;
}
-/* Update DIG link encoder resource tracking variables in dc_state. */
-static void update_link_enc_assignment(
+static struct link_enc_assignment get_assignment(struct dc *dc, int i)
+{
+ struct link_enc_assignment assignment;
+
+ if (dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_TRANSIENT)
+ assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i];
+ else /* LINK_ENC_CFG_STEADY */
+ assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ return assignment;
+}
+
+/* Return stream using DIG link encoder resource. NULL if unused. */
+static struct dc_stream_state *get_stream_using_link_enc(
+ struct dc_state *state,
+ enum engine_id eng_id)
+{
+ struct dc_stream_state *stream = NULL;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if ((assignment.valid == true) && (assignment.eng_id == eng_id)) {
+ stream = state->streams[i];
+ break;
+ }
+ }
+
+ return stream;
+}
+
+static void remove_link_enc_assignment(
struct dc_state *state,
struct dc_stream_state *stream,
- enum engine_id eng_id,
- bool add_enc)
+ enum engine_id eng_id)
{
int eng_idx;
- int stream_idx;
int i;
if (eng_id != ENGINE_ID_UNKNOWN) {
eng_idx = eng_id - ENGINE_ID_DIGA;
- stream_idx = -1;
- /* Index of stream in dc_state used to update correct entry in
+ /* stream ptr of stream in dc_state used to update correct entry in
* link_enc_assignments table.
*/
- for (i = 0; i < state->stream_count; i++) {
- if (stream == state->streams[i]) {
- stream_idx = i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid && assignment.stream == stream) {
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
+ /* Only add link encoder back to availability pool if not being
+ * used by any other stream (i.e. removing SST stream or last MST stream).
+ */
+ if (get_stream_using_link_enc(state, eng_id) == NULL)
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id;
+ stream->link_enc = NULL;
break;
}
}
+ }
+}
+
+static void add_link_enc_assignment(
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ enum engine_id eng_id)
+{
+ int eng_idx;
+ int i;
- /* Update link encoder assignments table, link encoder availability
- * pool and link encoder assigned to stream in state.
- * Add/remove encoder resource to/from stream.
+ if (eng_id != ENGINE_ID_UNKNOWN) {
+ eng_idx = eng_id - ENGINE_ID_DIGA;
+
+ /* stream ptr of stream in dc_state used to update correct entry in
+ * link_enc_assignments table.
*/
- if (stream_idx != -1) {
- if (add_enc) {
- state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i]) {
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i] = (struct link_enc_assignment){
.valid = true,
.ep_id = (struct display_endpoint_id) {
.link_id = stream->link->link_id,
.ep_type = stream->link->ep_type},
- .eng_id = eng_id};
- state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
+ .eng_id = eng_id,
+ .stream = stream};
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
- } else {
- state->res_ctx.link_enc_assignments[stream_idx].valid = false;
- state->res_ctx.link_enc_avail[eng_idx] = eng_id;
- stream->link_enc = NULL;
+ break;
}
- } else {
- dm_output_to_console("%s: Stream not found in dc_state.\n", __func__);
}
+
+ /* Attempted to add an encoder assignment for a stream not in dc_state. */
+ ASSERT(i != state->stream_count);
}
}
@@ -119,7 +168,7 @@ static enum engine_id find_first_avail_link_enc(
int i;
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
- eng_id = state->res_ctx.link_enc_avail[i];
+ eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN)
break;
}
@@ -127,30 +176,65 @@ static enum engine_id find_first_avail_link_enc(
return eng_id;
}
-/* Return stream using DIG link encoder resource. NULL if unused. */
-static struct dc_stream_state *get_stream_using_link_enc(
+/* Check for availability of link encoder eng_id. */
+static bool is_avail_link_enc(struct dc_state *state, enum engine_id eng_id, struct dc_stream_state *stream)
+{
+ bool is_avail = false;
+ int eng_idx = eng_id - ENGINE_ID_DIGA;
+
+ /* An encoder is available if it is still in the availability pool. */
+ if (eng_id != ENGINE_ID_UNKNOWN && state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] != ENGINE_ID_UNKNOWN) {
+ is_avail = true;
+ } else {
+ struct dc_stream_state *stream_assigned = NULL;
+
+ /* MST streams share the same link and should share the same encoder.
+ * If a stream that has already been assigned a link encoder uses as the
+ * same link as the stream checking for availability, it is an MST stream
+ * and should use the same link encoder.
+ */
+ stream_assigned = get_stream_using_link_enc(state, eng_id);
+ if (stream_assigned && stream != stream_assigned && stream->link == stream_assigned->link)
+ is_avail = true;
+ }
+
+ return is_avail;
+}
+
+/* Test for display_endpoint_id equality. */
+static bool are_ep_ids_equal(struct display_endpoint_id *lhs, struct display_endpoint_id *rhs)
+{
+ bool are_equal = false;
+
+ if (lhs->link_id.id == rhs->link_id.id &&
+ lhs->link_id.enum_id == rhs->link_id.enum_id &&
+ lhs->link_id.type == rhs->link_id.type &&
+ lhs->ep_type == rhs->ep_type)
+ are_equal = true;
+
+ return are_equal;
+}
+
+static struct link_encoder *get_link_enc_used_by_link(
struct dc_state *state,
- enum engine_id eng_id)
+ const struct dc_link *link)
{
- struct dc_stream_state *stream = NULL;
- int stream_idx = -1;
+ struct link_encoder *link_enc = NULL;
+ struct display_endpoint_id ep_id;
int i;
+ ep_id = (struct display_endpoint_id) {
+ .link_id = link->link_id,
+ .ep_type = link->ep_type};
+
for (i = 0; i < state->stream_count; i++) {
- struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
- if (assignment.valid && (assignment.eng_id == eng_id)) {
- stream_idx = i;
- break;
- }
+ if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id))
+ link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
}
- if (stream_idx != -1)
- stream = state->streams[stream_idx];
- else
- dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id);
-
- return stream;
+ return link_enc;
}
void link_enc_cfg_init(
@@ -161,10 +245,12 @@ void link_enc_cfg_init(
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (dc->res_pool->link_encoders[i])
- state->res_ctx.link_enc_avail[i] = (enum engine_id) i;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = (enum engine_id) i;
else
- state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+
+ state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
void link_enc_cfg_link_encs_assign(
@@ -175,11 +261,17 @@ void link_enc_cfg_link_encs_assign(
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i;
+ int j;
+
+ ASSERT(state->stream_count == stream_count);
/* Release DIG link encoder resources before running assignment algorithm. */
for (i = 0; i < stream_count; i++)
dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+ for (i = 0; i < MAX_PIPES; i++)
+ ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false);
+
/* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
@@ -191,26 +283,82 @@ void link_enc_cfg_link_encs_assign(
/* Physical endpoints have a fixed mapping to DIG link encoders. */
if (!stream->link->is_dig_mapping_flexible) {
eng_id = stream->link->eng_id;
- update_link_enc_assignment(state, stream, eng_id, true);
+ add_link_enc_assignment(state, stream, eng_id);
+ }
+ }
+
+ /* (b) Retain previous assignments for mappable endpoints if encoders still available. */
+ eng_id = ENGINE_ID_UNKNOWN;
+
+ if (state != dc->current_state) {
+ struct dc_state *prev_state = dc->current_state;
+
+ for (i = 0; i < stream_count; i++) {
+ struct dc_stream_state *stream = state->streams[i];
+
+ /* Skip stream if not supported by DIG link encoder. */
+ if (!is_dig_link_enc_stream(stream))
+ continue;
+
+ if (!stream->link->is_dig_mapping_flexible)
+ continue;
+
+ for (j = 0; j < prev_state->stream_count; j++) {
+ struct dc_stream_state *prev_stream = prev_state->streams[j];
+
+ if (stream == prev_stream && stream->link == prev_stream->link &&
+ prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {
+ eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id;
+ if (is_avail_link_enc(state, eng_id, stream))
+ add_link_enc_assignment(state, stream, eng_id);
+ }
+ }
}
}
- /* (b) Then assign encoders to mappable endpoints. */
+ /* (c) Then assign encoders to remaining mappable endpoints. */
eng_id = ENGINE_ID_UNKNOWN;
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
/* Skip stream if not supported by DIG link encoder. */
- if (!is_dig_link_enc_stream(stream))
+ if (!is_dig_link_enc_stream(stream)) {
+ ASSERT(stream->link->is_dig_mapping_flexible != true);
continue;
+ }
/* Mappable endpoints have a flexible mapping to DIG link encoders. */
if (stream->link->is_dig_mapping_flexible) {
- eng_id = find_first_avail_link_enc(stream->ctx, state);
- update_link_enc_assignment(state, stream, eng_id, true);
+ struct link_encoder *link_enc = NULL;
+
+ /* Skip if encoder assignment retained in step (b) above. */
+ if (stream->link_enc)
+ continue;
+
+ /* For MST, multiple streams will share the same link / display
+ * endpoint. These streams should use the same link encoder
+ * assigned to that endpoint.
+ */
+ link_enc = get_link_enc_used_by_link(state, stream->link);
+ if (link_enc == NULL)
+ eng_id = find_first_avail_link_enc(stream->ctx, state);
+ else
+ eng_id = link_enc->preferred_engine;
+ add_link_enc_assignment(state, stream, eng_id);
}
}
+
+ link_enc_cfg_validate(dc, state);
+
+ /* Update transient assignments. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i] =
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+ }
+
+ /* Current state mode will be set to steady once this state committed. */
+ state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
void link_enc_cfg_link_enc_unassign(
@@ -226,16 +374,16 @@ void link_enc_cfg_link_enc_unassign(
if (stream->link_enc)
eng_id = stream->link_enc->preferred_engine;
- update_link_enc_assignment(state, stream, eng_id, false);
+ remove_link_enc_assignment(state, stream, eng_id);
}
bool link_enc_cfg_is_transmitter_mappable(
- struct dc_state *state,
+ struct dc *dc,
struct link_encoder *link_enc)
{
bool is_mappable = false;
enum engine_id eng_id = link_enc->preferred_engine;
- struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id);
+ struct dc_stream_state *stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id);
if (stream)
is_mappable = stream->link->is_dig_mapping_flexible;
@@ -243,73 +391,217 @@ bool link_enc_cfg_is_transmitter_mappable(
return is_mappable;
}
-struct dc_link *link_enc_cfg_get_link_using_link_enc(
- struct dc_state *state,
+struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc(
+ struct dc *dc,
enum engine_id eng_id)
{
- struct dc_link *link = NULL;
- int stream_idx = -1;
+ struct dc_stream_state *stream = NULL;
int i;
- for (i = 0; i < state->stream_count; i++) {
- struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
- if (assignment.valid && (assignment.eng_id == eng_id)) {
- stream_idx = i;
+ if ((assignment.valid == true) && (assignment.eng_id == eng_id)) {
+ stream = assignment.stream;
break;
}
}
- if (stream_idx != -1)
- link = state->streams[stream_idx]->link;
- else
- dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
+ return stream;
+}
+
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+ struct dc *dc,
+ enum engine_id eng_id)
+{
+ struct dc_link *link = NULL;
+ struct dc_stream_state *stream = NULL;
+
+ stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id);
+
+ if (stream)
+ link = stream->link;
+ // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link;
}
struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
- struct dc_state *state,
+ struct dc *dc,
const struct dc_link *link)
{
struct link_encoder *link_enc = NULL;
struct display_endpoint_id ep_id;
- int stream_idx = -1;
int i;
ep_id = (struct display_endpoint_id) {
.link_id = link->link_id,
.ep_type = link->ep_type};
- for (i = 0; i < state->stream_count; i++) {
- struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
-
- if (assignment.valid &&
- assignment.ep_id.link_id.id == ep_id.link_id.id &&
- assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id &&
- assignment.ep_id.link_id.type == ep_id.link_id.type &&
- assignment.ep_id.ep_type == ep_id.ep_type) {
- stream_idx = i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
+
+ if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) {
+ link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
break;
}
}
- if (stream_idx != -1)
- link_enc = state->streams[stream_idx]->link_enc;
-
return link_enc;
}
-struct link_encoder *link_enc_cfg_get_next_avail_link_enc(
- const struct dc *dc,
- const struct dc_state *state)
+struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id eng_id;
+ enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ int i;
+
+ for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ encs_assigned[i] = ENGINE_ID_UNKNOWN;
+
+ /* Add assigned encoders to list. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
+
+ if (assignment.valid)
+ encs_assigned[assignment.eng_id - ENGINE_ID_DIGA] = assignment.eng_id;
+ }
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ if (encs_assigned[i] == ENGINE_ID_UNKNOWN) {
+ link_enc = dc->res_pool->link_encoders[i];
+ break;
+ }
+ }
+
+ return link_enc;
+}
- eng_id = find_first_avail_link_enc(dc->ctx, state);
- if (eng_id != ENGINE_ID_UNKNOWN)
- link_enc = dc->res_pool->link_encoders[eng_id - ENGINE_ID_DIGA];
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
+ struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct link_encoder *link_enc;
+
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link);
return link_enc;
}
+
+bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link)
+{
+ bool is_avail = true;
+ int i;
+
+ /* An encoder is not available if it has already been assigned to a different endpoint. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
+ struct display_endpoint_id ep_id = (struct display_endpoint_id) {
+ .link_id = link->link_id,
+ .ep_type = link->ep_type};
+
+ if (assignment.valid && assignment.eng_id == eng_id && !are_ep_ids_equal(&ep_id, &assignment.ep_id)) {
+ is_avail = false;
+ break;
+ }
+ }
+
+ return is_avail;
+}
+
+bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
+{
+ bool is_valid = false;
+ bool valid_entries = true;
+ bool valid_stream_ptrs = true;
+ bool valid_uniqueness = true;
+ bool valid_avail = true;
+ bool valid_streams = true;
+ int i, j;
+ uint8_t valid_count = 0;
+ uint8_t dig_stream_count = 0;
+ int matching_stream_ptrs = 0;
+ int eng_ids_per_ep_id[MAX_PIPES] = {0};
+
+ /* (1) No. valid entries same as stream count. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid)
+ valid_count++;
+
+ if (is_dig_link_enc_stream(state->streams[i]))
+ dig_stream_count++;
+ }
+ if (valid_count != dig_stream_count)
+ valid_entries = false;
+
+ /* (2) Matching stream ptrs. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid) {
+ if (assignment.stream == state->streams[i])
+ matching_stream_ptrs++;
+ else
+ valid_stream_ptrs = false;
+ }
+ }
+
+ /* (3) Each endpoint assigned unique encoder. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment_i = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment_i.valid) {
+ struct display_endpoint_id ep_id_i = assignment_i.ep_id;
+
+ eng_ids_per_ep_id[i]++;
+ for (j = 0; j < MAX_PIPES; j++) {
+ struct link_enc_assignment assignment_j =
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j];
+
+ if (j == i)
+ continue;
+
+ if (assignment_j.valid) {
+ struct display_endpoint_id ep_id_j = assignment_j.ep_id;
+
+ if (are_ep_ids_equal(&ep_id_i, &ep_id_j) &&
+ assignment_i.eng_id != assignment_j.eng_id) {
+ valid_uniqueness = false;
+ eng_ids_per_ep_id[i]++;
+ }
+ }
+ }
+ }
+ }
+
+ /* (4) Assigned encoders not in available pool. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid) {
+ for (j = 0; j < dc->res_pool->res_cap->num_dig_link_enc; j++) {
+ if (state->res_ctx.link_enc_cfg_ctx.link_enc_avail[j] == assignment.eng_id) {
+ valid_avail = false;
+ break;
+ }
+ }
+ }
+ }
+
+ /* (5) All streams have valid link encoders. */
+ for (i = 0; i < state->stream_count; i++) {
+ struct dc_stream_state *stream = state->streams[i];
+
+ if (is_dig_link_enc_stream(stream) && stream->link_enc == NULL) {
+ valid_streams = false;
+ break;
+ }
+ }
+
+ is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;
+ ASSERT(is_valid);
+
+ return is_valid;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 9c51cd09dcf1..368e834c6809 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -17,6 +17,7 @@
#include "link_enc_cfg.h"
#include "clk_mgr.h"
#include "inc/link_dpcd.h"
+#include "dccg.h"
static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
{
@@ -61,6 +62,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
sizeof(state));
}
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+{
+ if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
+ core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+ &dp_test_mode, sizeof(dp_test_mode));
+}
+
void dp_enable_link_phy(
struct dc_link *link,
enum signal_type signal,
@@ -79,7 +87,7 @@ void dp_enable_link_phy(
/* Link should always be assigned encoder when en-/disabling. */
if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -111,12 +119,37 @@ void dp_enable_link_phy(
link->cur_link_settings = *link_settings;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ /* TODO - DP2.0 HW: notify link rate change here */
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+ }
+#else
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-
+#endif
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ enable_dp_hpo_output(link, link_settings);
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc_is_dp_sst_signal(signal)) {
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ } else {
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+ }
+#else
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
link_enc,
@@ -128,10 +161,11 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
-
+#endif
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
dp_receiver_power_ctrl(link, true);
}
@@ -206,11 +240,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc;
+#endif
struct link_encoder *link_enc;
/* Link should always be assigned encoder when en-/disabling. */
if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -221,18 +258,34 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
if (signal == SIGNAL_TYPE_EDP) {
if (link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ disable_dp_hpo_output(link, signal);
+ else
+ link_enc->funcs->disable_output(link_enc, signal);
+#else
link_enc->funcs->disable_output(link_enc, signal);
+#endif
link->dc->hwss.edp_power_control(link, false);
} else {
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
+ hpo_link_enc)
+ disable_dp_hpo_output(link, signal);
+ else
+ link_enc->funcs->disable_output(link_enc, signal);
+#else
link_enc->funcs->disable_output(link_enc, signal);
-
+#endif
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
}
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
@@ -273,6 +326,14 @@ bool dp_set_hw_training_pattern(
case DP_TRAINING_PATTERN_SEQUENCE_4:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
+ break;
+ case DP_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
+ break;
+#endif
default:
break;
}
@@ -282,6 +343,10 @@ bool dp_set_hw_training_pattern(
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DC_LOGGER \
+ link->ctx->logger
+#endif
void dp_set_hw_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_settings,
@@ -293,7 +358,23 @@ void dp_set_hw_lane_settings(
return;
/* call Encoder to set lane settings */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link->hpo_dp_link_enc->funcs->set_ffe(
+ link->hpo_dp_link_enc,
+ &link_settings->link_settings,
+ link_settings->lane_settings[0].FFE_PRESET.raw);
+ } else if (dp_get_link_encoding_format(&link_settings->link_settings)
+ == DP_8b_10b_ENCODING) {
+ encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+ }
+#else
encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+#endif
+ memmove(link->cur_lane_setting,
+ link_settings->lane_settings,
+ sizeof(link->cur_lane_setting));
}
void dp_set_hw_test_pattern(
@@ -304,13 +385,16 @@ void dp_set_hw_test_pattern(
{
struct encoder_set_dp_phy_pattern_param pattern_param = {0};
struct link_encoder *encoder;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings);
+#endif
/* Access link encoder based on whether it is statically
* or dynamically assigned to a link.
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- encoder = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
encoder = link->link_enc;
@@ -319,8 +403,28 @@ void dp_set_hw_test_pattern(
pattern_param.custom_pattern_size = custom_pattern_size;
pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (link_encoding_format) {
+ case DP_128b_132b_ENCODING:
+ link->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link->hpo_dp_link_enc, &pattern_param);
+ break;
+ case DP_8b_10b_ENCODING:
+ ASSERT(encoder);
+ encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+ break;
+ default:
+ DC_LOG_ERROR("%s: Unknown link encoding format.", __func__);
+ break;
+ }
+#else
encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+#endif
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#undef DC_LOGGER
+#endif
void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
@@ -338,7 +442,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i].stream->link == link) {
udelay(100);
- pipes[i].stream_res.stream_enc->funcs->dp_blank(
+ pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
pipes[i].stream_res.stream_enc);
/* disable any test pattern that might be active */
@@ -351,9 +455,10 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
(&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
- link->link_enc->funcs->disable_output(
- link->link_enc,
- SIGNAL_TYPE_DISPLAY_PORT);
+ if (link->link_enc)
+ link->link_enc->funcs->disable_output(
+ link->link_enc,
+ SIGNAL_TYPE_DISPLAY_PORT);
/* Clear current link setting. */
memset(&link->cur_link_settings, 0,
@@ -468,7 +573,12 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
/* Enable DSC in encoder */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
+ && !is_dp_128b_132b_signal(pipe_ctx)) {
+#else
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+#endif
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -495,13 +605,22 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* disable DSC in stream encoder */
if (dc_is_dp_signal(stream->signal)) {
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
- pipe_ctx->stream_res.stream_enc,
- OPTC_DSC_DISABLED, 0, 0);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL);
- }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else
+#endif
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+ pipe_ctx->stream_res.stream_enc,
+ OPTC_DSC_DISABLED, 0, 0);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
+ }
}
/* disable DSC block */
@@ -535,7 +654,16 @@ out:
return result;
}
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
+/*
+ * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
+ * hence PPS info packet update need to use frame update instead of immediate update.
+ * Added parameter immediate_update for this purpose.
+ * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
+ * which is the only place where a "false" would be passed in for param immediate_update.
+ *
+ * immediate_update is only applicable when DSC is enabled.
+ */
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -562,16 +690,35 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc,
- true,
- &dsc_packed_pps[0]);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
+ else
+#endif
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
}
} else {
/* disable DSC PPS in stream encoder */
if (dc_is_dp_signal(stream->signal)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else
+#endif
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
}
}
@@ -589,7 +736,171 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
return false;
dp_set_dsc_on_stream(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true);
+ dp_set_dsc_pps_sdp(pipe_ctx, true, false);
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#undef DC_LOGGER
+#define DC_LOGGER \
+ link->ctx->logger
+
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+ switch (link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYD32CLKA;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYD32CLKB;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYD32CLKC;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYD32CLKD;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYD32CLKE;
+ default:
+ return PHYD32CLKA;
+ }
+}
+
+void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings)
+{
+ const struct dc *dc = link->dc;
+ enum phyd32clk_clock_source phyd32clk;
+
+ /* Enable PHY PLL at target bit rate
+ * UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz)
+ * UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz)
+ * UHBR20 = 20Gbps (SYMCLK32 = 625MHz)
+ */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ dm_set_phyd32clk(dc->ctx, 312500);
+ break;
+ case LINK_RATE_UHBR13_5:
+ dm_set_phyd32clk(dc->ctx, 412875);
+ break;
+ case LINK_RATE_UHBR20:
+ dm_set_phyd32clk(dc->ctx, 625000);
+ break;
+ default:
+ return;
+ }
+ } else {
+ /* DP2.0 HW: call transmitter control to enable PHY */
+ link->hpo_dp_link_enc->funcs->enable_link_phy(
+ link->hpo_dp_link_enc,
+ link_settings,
+ link->link_enc->transmitter);
+ }
+
+ /* DCCG muxing and DTBCLK DTO */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->res_pool->dccg->funcs->set_physymclk(
+ dc->res_pool->dccg,
+ link->link_enc_hw_inst,
+ PHYSYMCLK_FORCE_SRC_PHYD32CLK,
+ true);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dc->res_pool->dccg->funcs->enable_symclk32_le(
+ dc->res_pool->dccg,
+ link->hpo_dp_link_enc->inst,
+ phyd32clk);
+ link->hpo_dp_link_enc->funcs->link_enable(
+ link->hpo_dp_link_enc,
+ link_settings->lane_count);
+ }
+}
+
+void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
+{
+ const struct dc *dc = link->dc;
+
+ link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc);
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->res_pool->dccg->funcs->disable_symclk32_le(
+ dc->res_pool->dccg,
+ link->hpo_dp_link_enc->inst);
+
+ dc->res_pool->dccg->funcs->set_physymclk(
+ dc->res_pool->dccg,
+ link->link_enc_hw_inst,
+ PHYSYMCLK_FORCE_SRC_SYMCLK,
+ false);
+
+ dm_set_phyd32clk(dc->ctx, 0);
+ } else {
+ /* DP2.0 HW: call transmitter control to disable PHY */
+ link->hpo_dp_link_enc->funcs->disable_link_phy(
+ link->hpo_dp_link_enc,
+ signal);
+ }
+}
+
+void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct pipe_ctx *odm_pipe;
+ int odm_combine_num_segments = 1;
+ enum phyd32clk_clock_source phyd32clk;
+
+ if (enable) {
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_combine_num_segments++;
+
+ dc->res_pool->dccg->funcs->set_dpstreamclk(
+ dc->res_pool->dccg,
+ DTBCLK0,
+ pipe_ctx->stream_res.tg->inst);
+
+ phyd32clk = get_phyd32clk_src(stream->link);
+ dc->res_pool->dccg->funcs->enable_symclk32_se(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
+ phyd32clk);
+
+ dc->res_pool->dccg->funcs->set_dtbclk_dto(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ stream->phy_pix_clk,
+ odm_combine_num_segments,
+ &stream->timing);
+ } else {
+ dc->res_pool->dccg->funcs->set_dtbclk_dto(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ 0,
+ 0,
+ &stream->timing);
+ dc->res_pool->dccg->funcs->disable_symclk32_se(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst);
+ dc->res_pool->dccg->funcs->set_dpstreamclk(
+ dc->res_pool->dccg,
+ REFCLK,
+ pipe_ctx->stream_res.tg->inst);
+ }
+}
+
+void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link)
+{
+ const struct dc *dc = link->dc;
+ struct dc_state *state = dc->current_state;
+ uint8_t i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc &&
+ state->res_ctx.pipe_ctx[i].stream &&
+ state->res_ctx.pipe_ctx[i].stream->link == link &&
+ !state->res_ctx.pipe_ctx[i].stream->dpms_off) {
+ setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false);
+ }
+ }
+}
+
+#undef DC_LOGGER
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a60396d5be44..c32fdccd4d92 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -41,6 +41,8 @@
#include "set_mode_types.h"
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
+#include "link_enc_cfg.h"
+#include "dc_link_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h"
@@ -54,6 +56,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "dcn201/dcn201_resource.h"
#include "dcn30/dcn30_resource.h"
#include "dcn301/dcn301_resource.h"
#include "dcn302/dcn302_resource.h"
@@ -128,6 +131,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ dc_version = DCN_VERSION_2_01;
+ break;
+ }
if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_0;
if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev))
@@ -217,6 +224,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_2_1:
res_pool = dcn21_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_2_01:
+ res_pool = dcn201_create_resource_pool(init_data, dc);
+ break;
case DCN_VERSION_3_0:
res_pool = dcn30_create_resource_pool(init_data, dc);
break;
@@ -347,6 +357,29 @@ bool resource_construct(
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ pool->hpo_dp_stream_enc_count = 0;
+ if (create_funcs->create_hpo_dp_stream_encoder) {
+ for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) {
+ pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx);
+ if (pool->hpo_dp_stream_enc[i] == NULL)
+ DC_ERR("DC: failed to create HPO DP stream encoder!\n");
+ pool->hpo_dp_stream_enc_count++;
+
+ }
+ }
+
+ pool->hpo_dp_link_enc_count = 0;
+ if (create_funcs->create_hpo_dp_link_encoder) {
+ for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) {
+ pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx);
+ if (pool->hpo_dp_link_enc[i] == NULL)
+ DC_ERR("DC: failed to create HPO DP link encoder!\n");
+ pool->hpo_dp_link_enc_count++;
+ }
+ }
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
for (i = 0; i < caps->num_mpc_3dlut; i++) {
pool->mpc_lut[i] = dc_create_3dlut_func();
if (pool->mpc_lut[i] == NULL)
@@ -1122,9 +1155,17 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
}
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
- pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- res = false;
+ if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+ pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ res = false;
+ } else {
+ /* Clamp minimum viewport size */
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
+ if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
+ }
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -1665,6 +1706,22 @@ static void update_stream_engine_usage(
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void update_hpo_dp_stream_engine_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc,
+ bool acquired)
+{
+ int i;
+
+ for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+ if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc)
+ res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
+ }
+}
+#endif
+
/* TODO: release audio object */
void update_audio_usage(
struct resource_context *res_ctx,
@@ -1709,6 +1766,26 @@ static int acquire_first_free_pipe(
return -1;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+ if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] &&
+ pool->hpo_dp_stream_enc[i]) {
+
+ return pool->hpo_dp_stream_enc[i];
+ }
+ }
+
+ return NULL;
+}
+#endif
+
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1799,6 +1876,15 @@ enum dc_status dc_remove_stream_from_ctx(
if (dc->res_pool->funcs->link_enc_unassign)
dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(del_pipe)) {
+ update_hpo_dp_stream_engine_usage(
+ &new_ctx->res_ctx, dc->res_pool,
+ del_pipe->stream_res.hpo_dp_stream_enc,
+ false);
+ }
+#endif
+
if (del_pipe->stream_res.audio)
update_audio_usage(
&new_ctx->res_ctx,
@@ -2051,6 +2137,31 @@ enum dc_status resource_map_pool_resources(
pipe_ctx->stream_res.stream_enc,
true);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Allocate DP HPO Stream Encoder based on signal, hw capabilities
+ * and link settings
+ */
+ if (dc_is_dp_signal(stream->signal) &&
+ dc->caps.dp_hpo) {
+ struct dc_link_settings link_settings = {0};
+
+ decide_link_settings(stream, &link_settings);
+ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc =
+ find_first_free_match_hpo_dp_stream_enc_for_link(
+ &context->res_ctx, pool, stream);
+
+ if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
+ return DC_NO_STREAM_ENC_RESOURCE;
+
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true);
+ }
+ }
+#endif
+
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
@@ -2147,7 +2258,7 @@ enum dc_status dc_validate_global_state(
* Update link encoder to stream assignment.
* TODO: Split out reason allocation from validation.
*/
- if (dc->res_pool->funcs->link_encs_assign)
+ if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif
@@ -2726,9 +2837,24 @@ bool pipe_need_reprogram(
if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
return true;
- /* DIG link encoder resource assignment for stream changed. */
- if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
return true;
+#endif
+
+ /* DIG link encoder resource assignment for stream changed. */
+ if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
+ bool need_reprogram = false;
+ struct dc *dc = pipe_ctx_old->stream->ctx->dc;
+ enum link_enc_cfg_mode mode = dc->current_state->res_ctx.link_enc_cfg_ctx.mode;
+
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
+ if (link_enc_cfg_get_link_enc_used_by_stream(dc, pipe_ctx_old->stream) != pipe_ctx->stream->link_enc)
+ need_reprogram = true;
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = mode;
+
+ return need_reprogram;
+ }
return false;
}
@@ -2871,7 +2997,8 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
res = DC_FAIL_CONTROLLER_VALIDATE;
if (res == DC_OK) {
- if (!link->link_enc->funcs->validate_output_with_stream(
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ !link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE;
}
@@ -2890,6 +3017,11 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
{
enum dc_status res = DC_OK;
+ /* check if surface has invalid dimensions */
+ if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 ||
+ plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0)
+ return DC_FAIL_SURFACE_VALIDATE;
+
/* TODO For now validates pixel format only */
if (dc->res_pool->funcs->validate_plane)
return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
@@ -2975,3 +3107,22 @@ void get_audio_check(struct audio_info *aud_modes,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
+ const struct resource_pool *pool)
+{
+ uint8_t i;
+ struct hpo_dp_link_encoder *enc = NULL;
+
+ ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS);
+
+ for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
+ if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) {
+ enc = pool->hpo_dp_link_enc[i];
+ break;
+ }
+ }
+
+ return enc;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 28ef9760fa34..4b372aa52801 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,6 +61,14 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
status = dmub_srv_stat_get_notification(dmub, notify);
ASSERT(status == DMUB_STATUS_OK);
+
+ /* For HPD/HPD RX, convert dpia port index into link index */
+ if (notify->type == DMUB_NOTIFICATION_HPD ||
+ notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
+ notify->link_index =
+ get_link_index_from_dpia_port_index(dc, notify->link_index);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f0f54f4d3d9b..57cf4cb82370 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -202,6 +202,10 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
new_stream->ctx->dc_stream_id_count++;
+ /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
+ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
+ new_stream->link_enc = NULL;
+
kref_init(&new_stream->refcount);
return new_stream;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3ab52d9a82cf..a5339796902a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -44,8 +44,10 @@
/* forward declaration */
struct aux_payload;
+struct set_config_cmd_payload;
+struct dmub_notification;
-#define DC_VER "3.2.149"
+#define DC_VER "3.2.159"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -183,6 +185,9 @@ struct dc_caps {
unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool dp_hpo;
+#endif
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
};
@@ -206,12 +211,12 @@ struct dc_dcc_setting {
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- //These bitfields to be used starting with DCN 3.0
+ //These bitfields to be used starting with DCN
struct {
- uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
- uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
- uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
- uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
} dcc_controls;
#endif
};
@@ -289,7 +294,15 @@ struct dc_cap_funcs {
struct link_training_settings;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union allow_lttpr_non_transparent_mode {
+ struct {
+ bool DP1_4A : 1;
+ bool DP2_0 : 1;
+ } bits;
+ unsigned char raw;
+};
+#endif
/* Structure to hold configuration flags set by dm at dc creation. */
struct dc_config {
bool gpu_vm_support;
@@ -302,10 +315,15 @@ struct dc_config {
bool edp_no_power_sequencing;
bool force_enum_edp;
bool forced_clocks;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode;
+#else
bool allow_lttpr_non_transparent_mode;
+#endif
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
+ bool enable_windowed_mpo_odm;
bool allow_edp_hotplug_detection;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool clamp_min_dcfclk;
@@ -325,6 +343,12 @@ enum visual_confirm {
VISUAL_CONFIRM_SWIZZLE = 9,
};
+enum dc_psr_power_opts {
+ psr_power_opt_invalid = 0x0,
+ psr_power_opt_smu_opt_static_screen = 0x1,
+ psr_power_opt_z10_static_screen = 0x10,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -456,10 +480,39 @@ union mem_low_power_enable_options {
bool cm: 1;
bool mpc: 1;
bool optc: 1;
+ bool vpg: 1;
+ bool afmt: 1;
+ } bits;
+ uint32_t u32All;
+};
+
+union root_clock_optimization_options {
+ struct {
+ bool dpp: 1;
+ bool dsc: 1;
+ bool hdmistream: 1;
+ bool hdmichar: 1;
+ bool dpstream: 1;
+ bool symclk32_se: 1;
+ bool symclk32_le: 1;
+ bool symclk_fe: 1;
+ bool physymclk: 1;
+ bool dpiasymclk: 1;
+ uint32_t reserved: 22;
} bits;
uint32_t u32All;
};
+union dpia_debug_options {
+ struct {
+ uint32_t disable_dpia:1;
+ uint32_t force_non_lttpr:1;
+ uint32_t extend_aux_rd_interval:1;
+ uint32_t reserved:29;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
@@ -548,6 +601,7 @@ struct dc_debug_options {
enum wm_report_mode pplib_wm_report_mode;
unsigned int min_disp_clk_khz;
unsigned int min_dpp_clk_khz;
+ unsigned int min_dram_clk_khz;
int sr_exit_time_dpm0_ns;
int sr_enter_plus_exit_time_dpm0_ns;
int sr_exit_time_ns;
@@ -614,19 +668,25 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool enable_dram_clock_change_one_display_vactive;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - remove once tested */
+ bool legacy_dp2_lt;
+ bool set_mst_en_for_sst;
+#endif
union mem_low_power_enable_options enable_mem_low_power;
+ union root_clock_optimization_options root_clock_optimization;
bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool optimize_edp_link_rate; /* eDP ILR */
- /* force enable edp FEC */
- bool force_enable_edp_fec;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
+ bool enable_driver_sequence_debug;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
bool enable_sw_cntl_psr;
+ union dpia_debug_options dpia_debug;
#endif
};
@@ -672,6 +732,9 @@ struct dc {
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool idle_optimizations_allowed;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool enable_c20_dtm_b0;
+#endif
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
@@ -878,6 +941,7 @@ union surface_update_flags {
uint32_t bandwidth_change:1;
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
+ uint32_t lut_3d:1;
uint32_t full_update:1;
} bits;
@@ -1145,7 +1209,14 @@ struct dpcd_caps {
struct dpcd_dsc_capabilities dsc_caps;
struct dc_lttpr_caps lttpr_caps;
struct psr_caps psr_caps;
+ struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
+ union dp_main_line_channel_coding_cap channel_coding_cap;
+ union dp_sink_video_fallback_formats fallback_formats;
+ union dp_fec_capability1 fec_cap1;
+#endif
};
union dpcd_sink_ext_caps {
@@ -1287,6 +1358,8 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
enum dc_irq_source dc_get_hpd_irq_source_at_index(
struct dc *dc, uint32_t link_index);
+void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
+
/*******************************************************************************
* Power Interfaces
******************************************************************************/
@@ -1337,7 +1410,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
-void dc_z10_restore(struct dc *dc);
+void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
#endif
@@ -1347,6 +1420,20 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
+/* Get dc link index from dpia port index */
+uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ uint8_t dpia_port_index);
+
+bool dc_process_dmub_set_config_async(struct dc *dc,
+ uint32_t link_index,
+ struct set_config_cmd_payload *payload,
+ struct dmub_notification *notify);
+
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 4f54bde1bb1c..bc87ea0adf94 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -53,7 +53,17 @@ enum dc_link_rate {
LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane
LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane
+ /* Starting from DP2.0 link rate enum directly represents actual
+ * link rate value in unit of 10 mbps
+ */
+ LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane
+ LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane
+ LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane
+#else
LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane
+#endif
};
enum dc_link_spread {
@@ -90,17 +100,47 @@ enum dc_post_cursor2 {
POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_dp_ffe_preset_level {
+ DP_FFE_PRESET_LEVEL0 = 0,
+ DP_FFE_PRESET_LEVEL1,
+ DP_FFE_PRESET_LEVEL2,
+ DP_FFE_PRESET_LEVEL3,
+ DP_FFE_PRESET_LEVEL4,
+ DP_FFE_PRESET_LEVEL5,
+ DP_FFE_PRESET_LEVEL6,
+ DP_FFE_PRESET_LEVEL7,
+ DP_FFE_PRESET_LEVEL8,
+ DP_FFE_PRESET_LEVEL9,
+ DP_FFE_PRESET_LEVEL10,
+ DP_FFE_PRESET_LEVEL11,
+ DP_FFE_PRESET_LEVEL12,
+ DP_FFE_PRESET_LEVEL13,
+ DP_FFE_PRESET_LEVEL14,
+ DP_FFE_PRESET_LEVEL15,
+ DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15,
+};
+#endif
+
enum dc_dp_training_pattern {
DP_TRAINING_PATTERN_SEQUENCE_1 = 0,
DP_TRAINING_PATTERN_SEQUENCE_2,
DP_TRAINING_PATTERN_SEQUENCE_3,
DP_TRAINING_PATTERN_SEQUENCE_4,
DP_TRAINING_PATTERN_VIDEOIDLE,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_128b_132b_TPS1,
+ DP_128b_132b_TPS2,
+ DP_128b_132b_TPS2_CDS,
+#endif
};
enum dp_link_encoding {
DP_UNKNOWN_ENCODING = 0,
DP_8b_10b_ENCODING = 1,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_128b_132b_ENCODING = 2,
+#endif
};
struct dc_link_settings {
@@ -112,21 +152,35 @@ struct dc_link_settings {
bool dpcd_source_device_specific_field_support;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union dc_dp_ffe_preset {
+ struct {
+ uint8_t level : 4;
+ uint8_t reserved : 1;
+ uint8_t no_preshoot : 1;
+ uint8_t no_deemphasis : 1;
+ uint8_t method2 : 1;
+ } settings;
+ uint8_t raw;
+};
+#endif
+
struct dc_lane_settings {
enum dc_voltage_swing VOLTAGE_SWING;
enum dc_pre_emphasis PRE_EMPHASIS;
enum dc_post_cursor2 POST_CURSOR2;
-};
-
-struct dc_link_training_settings {
- struct dc_link_settings link;
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dc_dp_ffe_preset FFE_PRESET;
+#endif
};
struct dc_link_training_overrides {
enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dc_dp_ffe_preset *ffe_preset;
+#endif
uint16_t *cr_pattern_time;
uint16_t *eq_pattern_time;
@@ -140,6 +194,16 @@ struct dc_link_training_overrides {
bool *fec_enable;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union payload_table_update_status {
+ struct {
+ uint8_t VC_PAYLOAD_TABLE_UPDATED:1;
+ uint8_t ACT_HANDLED:1;
+ } bits;
+ uint8_t raw;
+};
+#endif
+
union dpcd_rev {
struct {
uint8_t MINOR:4;
@@ -227,7 +291,14 @@ union lane_align_status_updated {
struct {
uint8_t INTERLANE_ALIGN_DONE:1;
uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1;
+ uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1;
+ uint8_t LT_FAILED_128b_132b:1;
+ uint8_t RESERVED:1;
+#else
uint8_t RESERVED:4;
+#endif
uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
uint8_t LINK_STATUS_UPDATED:1;
} bits;
@@ -240,6 +311,12 @@ union lane_adjust {
uint8_t PRE_EMPHASIS_LANE:2;
uint8_t RESERVED:4;
} bits;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct {
+ uint8_t PRESET_VALUE :4;
+ uint8_t RESERVED :4;
+ } tx_ffe;
+#endif
uint8_t raw;
};
@@ -269,6 +346,12 @@ union dpcd_training_lane {
uint8_t MAX_PRE_EMPHASIS_REACHED:1;
uint8_t RESERVED:2;
} bits;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct {
+ uint8_t PRESET_VALUE :4;
+ uint8_t RESERVED :4;
+ } tx_ffe;
+#endif
uint8_t raw;
};
@@ -551,12 +634,18 @@ union test_response {
union phy_test_pattern {
struct {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* This field is 7 bits for DP2.0 */
+ uint8_t PATTERN :7;
+ uint8_t RESERVED :1;
+#else
/* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
* and 3 bits for DP1.2.
*/
uint8_t PATTERN :3;
/* BY speci, bit7:2 is 0 for DP1.1. */
uint8_t RESERVED :5;
+#endif
} bits;
uint8_t raw;
};
@@ -634,7 +723,14 @@ union dpcd_fec_capability {
uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t BIT_ERROR_COUNT_CAPABLE:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1;
+ uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1;
+ uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1;
+ uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1;
+#else
uint8_t RESERVED:4;
+#endif
} bits;
uint8_t raw;
};
@@ -758,4 +854,200 @@ struct psr_caps {
bool psr_exit_link_training_required;
};
+/* Length of router topology ID read from DPCD in bytes. */
+#define DPCD_USB4_TOPOLOGY_ID_LEN 5
+
+/* DPCD[0xE000D] DP_TUNNELING_CAPABILITIES SUPPORT register. */
+union dp_tun_cap_support {
+ struct {
+ uint8_t dp_tunneling :1;
+ uint8_t rsvd :5;
+ uint8_t panel_replay_tun_opt :1;
+ uint8_t dpia_bw_alloc :1;
+ } bits;
+ uint8_t raw;
+};
+
+/* DPCD[0xE000E] DP_IN_ADAPTER_INFO register. */
+union dpia_info {
+ struct {
+ uint8_t dpia_num :5;
+ uint8_t rsvd :3;
+ } bits;
+ uint8_t raw;
+};
+
+/* DP Tunneling over USB4 */
+struct dpcd_usb4_dp_tunneling_info {
+ union dp_tun_cap_support dp_tun_cap;
+ union dpia_info dpia_info;
+ uint8_t usb4_driver_id;
+ uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#ifndef DP_MAIN_LINK_CHANNEL_CODING_CAP
+#define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006
+#endif
+#ifndef DP_SINK_VIDEO_FALLBACK_FORMATS
+#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020
+#endif
+#ifndef DP_FEC_CAPABILITY_1
+#define DP_FEC_CAPABILITY_1 0x091
+#endif
+#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
+#endif
+#ifndef DP_DSC_CONFIGURATION
+#define DP_DSC_CONFIGURATION 0x161
+#endif
+#ifndef DP_PHY_SQUARE_PATTERN
+#define DP_PHY_SQUARE_PATTERN 0x249
+#endif
+#ifndef DP_128b_132b_SUPPORTED_LINK_RATES
+#define DP_128b_132b_SUPPORTED_LINK_RATES 0x2215
+#endif
+#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
+#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
+#endif
+#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
+#endif
+#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250
+#endif
+#ifndef DP_DSC_SUPPORT_AND_DECODER_COUNT
+#define DP_DSC_SUPPORT_AND_DECODER_COUNT 0x2260
+#endif
+#ifndef DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270
+#endif
+#ifndef DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK
+#define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+#endif
+#ifndef DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK
+#define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+#endif
+#ifndef DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT
+#define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+#endif
+#ifndef DP_DSC_DECODER_COUNT_MASK
+#define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+#endif
+#ifndef DP_DSC_DECODER_COUNT_SHIFT
+#define DP_DSC_DECODER_COUNT_SHIFT 5
+#endif
+#ifndef DP_MAIN_LINK_CHANNEL_CODING_SET
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+#endif
+#ifndef DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xF0006
+#endif
+#ifndef DP_PHY_REPEATER_128b_132b_RATES
+#define DP_PHY_REPEATER_128b_132b_RATES 0xF0007
+#endif
+#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1
+#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xF0022
+#endif
+#ifndef DP_INTRA_HOP_AUX_REPLY_INDICATION
+#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3)
+#endif
+/* TODO - Use DRM header to replace above once available */
+
+union dp_main_line_channel_coding_cap {
+ struct {
+ uint8_t DP_8b_10b_SUPPORTED :1;
+ uint8_t DP_128b_132b_SUPPORTED :1;
+ uint8_t RESERVED :6;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_main_link_channel_coding_lttpr_cap {
+ struct {
+ uint8_t DP_128b_132b_SUPPORTED :1;
+ uint8_t RESERVED :7;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_128b_132b_supported_link_rates {
+ struct {
+ uint8_t UHBR10 :1;
+ uint8_t UHBR20 :1;
+ uint8_t UHBR13_5:1;
+ uint8_t RESERVED:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_128b_132b_supported_lttpr_link_rates {
+ struct {
+ uint8_t UHBR10 :1;
+ uint8_t UHBR13_5:1;
+ uint8_t UHBR20 :1;
+ uint8_t RESERVED:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_sink_video_fallback_formats {
+ struct {
+ uint8_t dp_1024x768_60Hz_24bpp_support :1;
+ uint8_t dp_1280x720_60Hz_24bpp_support :1;
+ uint8_t dp_1920x1080_60Hz_24bpp_support :1;
+ uint8_t RESERVED :5;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_fec_capability1 {
+ struct {
+ uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1;
+ uint8_t RESERVED :7;
+ } bits;
+ uint8_t raw;
+};
+
+struct dp_color_depth_caps {
+ uint8_t support_6bpc :1;
+ uint8_t support_8bpc :1;
+ uint8_t support_10bpc :1;
+ uint8_t support_12bpc :1;
+ uint8_t support_16bpc :1;
+ uint8_t RESERVED :3;
+};
+
+struct dp_encoding_format_caps {
+ uint8_t support_rgb :1;
+ uint8_t support_ycbcr444:1;
+ uint8_t support_ycbcr422:1;
+ uint8_t support_ycbcr420:1;
+ uint8_t RESERVED :4;
+};
+
+union dp_dfp_cap_ext {
+ struct {
+ uint8_t supported;
+ uint8_t max_pixel_rate_in_mps[2];
+ uint8_t max_video_h_active_width[2];
+ uint8_t max_video_v_active_height[2];
+ struct dp_encoding_format_caps encoding_format_caps;
+ struct dp_color_depth_caps rgb_color_depth_caps;
+ struct dp_color_depth_caps ycbcr444_color_depth_caps;
+ struct dp_color_depth_caps ycbcr422_color_depth_caps;
+ struct dp_color_depth_caps ycbcr420_color_depth_caps;
+ } fields;
+ uint8_t raw[12];
+};
+
+union dp_128b_132b_training_aux_rd_interval {
+ struct {
+ uint8_t VALUE :7;
+ uint8_t UNIT :1;
+ } bits;
+ uint8_t raw;
+};
+#endif
+
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 16cc76ce3739..684713b2cff7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -51,7 +51,6 @@ struct dc_dsc_policy {
int min_slice_height; // Must not be less than 8
uint32_t max_target_bpp;
uint32_t min_target_bpp;
- uint32_t preferred_bpp_x16;
bool enable_dsc_when_not_needed;
};
@@ -81,6 +80,16 @@ bool dc_dsc_compute_config(
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp);
+uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+ const struct dc_crtc_timing *timing,
+ const int num_slices_h,
+ const bool is_dp);
+
+/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
+ * and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
+ * Hardware/specs limitation should not be writable by DM.
+ * It should be decoupled from DM specific policy and named differently.
+ */
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 83845d006c54..180ecd860296 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -45,6 +45,10 @@ struct dc_link_status {
struct link_mst_stream_allocation {
/* DIG front */
const struct stream_encoder *stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* HPO DP Stream Encoder */
+ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+#endif
/* associate DRM payload table with DC stream encoder */
uint8_t vcp_id;
/* number of slots required for the DP stream in transport packet */
@@ -81,6 +85,7 @@ struct psr_settings {
*/
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ unsigned int psr_power_opt;
};
/*
@@ -117,8 +122,12 @@ struct dc_link {
struct dc_link_settings reported_link_cap;
struct dc_link_settings verified_link_cap;
struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
struct dc_link_training_overrides preferred_training_settings;
struct dp_audio_test_data audio_test_data;
@@ -150,6 +159,9 @@ struct dc_link {
struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_enc;
+#endif
struct graphics_object_id link_id;
/* Endpoint type distinguishes display endpoints which do not have entries
* in the BIOS connector table from those that do. Helps when tracking link
@@ -170,11 +182,15 @@ struct dc_link {
struct psr_settings psr_settings;
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
bool dp_skip_reset_segment;
+ bool dp_mot_reset_segment;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -260,8 +276,8 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
int dc_link_get_target_backlight_pwm(const struct dc_link *link);
-bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable,
- bool wait, bool force_static);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
@@ -288,6 +304,10 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+#endif
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -296,7 +316,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
* false - no change in Downstream port status. No further action required
* from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
/*
* On eDP links this function call will stall until T12 has elapsed.
@@ -305,9 +326,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
*/
bool dc_link_wait_for_t12(struct dc_link *link);
-enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data);
+void dc_link_dp_handle_automated_test(struct dc_link *link);
+void dc_link_dp_handle_link_loss(struct dc_link *link);
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
struct dc_sink_init_data;
@@ -416,4 +437,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
bool dc_link_is_fec_supported(const struct dc_link *link);
bool dc_link_should_enable_fec(const struct dc_link *link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
+#endif
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b8ebc1f09538..e37c4a10bfd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -115,6 +115,13 @@ struct periodic_interrupt_config {
int lines_offset;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dc_mst_stream_bw_update {
+ bool is_increase; // is bandwidth reduced or increased
+ uint32_t mst_stream_bw; // new mst bandwidth in kbps
+};
+#endif
+
union stream_update_flags {
struct {
uint32_t scaling:1;
@@ -125,6 +132,9 @@ union stream_update_flags {
uint32_t gamut_remap:1;
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t mst_bw : 1;
+#endif
} bits;
uint32_t raw;
@@ -278,6 +288,9 @@ struct dc_stream_update {
struct dc_writeback_update *wb_update;
struct dc_dsc_config *dsc_config;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_mst_stream_bw_update *mst_bw_update;
+#endif
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index c1532930169b..388457ffc0a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -395,9 +395,27 @@ struct dc_lttpr_caps {
uint8_t max_link_rate;
uint8_t phy_repeater_cnt;
uint8_t max_ext_timeout;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
+ union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+#endif
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dc_dongle_dfp_cap_ext {
+ bool supported;
+ uint16_t max_pixel_rate_in_mps;
+ uint16_t max_video_h_active_width;
+ uint16_t max_video_v_active_height;
+ struct dp_encoding_format_caps encoding_format_caps;
+ struct dp_color_depth_caps rgb_color_depth_caps;
+ struct dp_color_depth_caps ycbcr444_color_depth_caps;
+ struct dp_color_depth_caps ycbcr422_color_depth_caps;
+ struct dp_color_depth_caps ycbcr420_color_depth_caps;
+};
+#endif
+
struct dc_dongle_caps {
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
@@ -411,6 +429,9 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_dongle_dfp_cap_ext dfp_cap_ext;
+#endif
};
/* Scaling format */
enum scaling_transformation {
@@ -632,6 +653,7 @@ enum dc_psr_state {
PSR_STATE1a,
PSR_STATE2,
PSR_STATE2a,
+ PSR_STATE2b,
PSR_STATE3,
PSR_STATE3Init,
PSR_STATE4,
@@ -934,6 +956,7 @@ enum dc_psr_version {
/* Possible values of display_endpoint_id.endpoint */
enum display_endpoint_type {
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
+ DISPLAY_ENDPOINT_USB4_DPIA, /* USB4 DisplayPort tunnel. */
DISPLAY_ENDPOINT_UNKNOWN = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 456fadbbfac7..b699d1b2ba83 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -96,6 +96,22 @@
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
+#define ABM_DCN302_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+
#define ABM_DCN30_REG_LIST(id)\
ABM_COMMON_REG_LIST_DCE_BASE(), \
SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 7866cf2a668f..27218ede150a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -514,13 +514,15 @@ void dce_aud_az_configure(
union audio_sample_rates sample_rates =
audio_mode->sample_rates;
uint8_t byte2 = audio_mode->max_bit_rate;
+ uint8_t channel_count = audio_mode->channel_count;
/* adjust specific properties */
switch (audio_format_code) {
case AUDIO_FORMAT_CODE_LINEARPCM: {
+
check_audio_bandwidth(
crtc_info,
- audio_mode->channel_count,
+ channel_count,
signal,
&sample_rates);
@@ -548,7 +550,7 @@ void dce_aud_az_configure(
/* fill audio format data */
set_reg_field_value(value,
- audio_mode->channel_count - 1,
+ channel_count - 1,
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
MAX_CHANNELS);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 3c3347341103..6d42a9cc9916 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -534,17 +534,26 @@ struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine
static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload)
{
if (payload->i2c_over_aux) {
+ if (payload->write_status_update) {
+ if (payload->mot)
+ return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT;
+ else
+ return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+ }
if (payload->write) {
if (payload->mot)
return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
- return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else
+ return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
}
if (payload->mot)
return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+
return I2CAUX_TRANSACTION_ACTION_I2C_READ;
}
if (payload->write)
return I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
@@ -627,6 +636,7 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
#define AUX_MAX_I2C_DEFER_RETRIES 7
#define AUX_MAX_INVALID_REPLY_RETRIES 2
#define AUX_MAX_TIMEOUT_RETRIES 3
+#define AUX_DEFER_DELAY_FOR_DPIA 4 /*ms*/
static void dce_aux_log_payload(const char *payload_name,
unsigned char *payload, uint32_t length, uint32_t max_length_to_log)
@@ -689,15 +699,21 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
enum aux_return_code_type operation_result;
bool retry_on_defer = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+ struct dce_aux *aux_engine = NULL;
+ struct aux_engine_dce110 *aux110 = NULL;
uint32_t defer_time_in_ms = 0;
int aux_ack_retries = 0,
aux_defer_retries = 0,
aux_i2c_defer_retries = 0,
aux_timeout_retries = 0,
- aux_invalid_reply_retries = 0;
+ aux_invalid_reply_retries = 0,
+ aux_ack_m_retries = 0;
+
+ if (ddc_pin) {
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ aux110 = FROM_AUX_ENGINE(aux_engine);
+ }
if (!payload->reply) {
payload_reply = false;
@@ -752,9 +768,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_defer_retries,
AUX_MAX_RETRIES);
goto fail;
- } else {
+ } else
udelay(300);
+ } else if (payload->write && ret > 0) {
+ /* sink requested more time to complete the write via AUX_ACKM */
+ if (++aux_ack_m_retries >= AUX_MAX_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_ack_m_retries=%d >= AUX_MAX_RETRIES=%d",
+ aux_ack_m_retries,
+ AUX_MAX_RETRIES);
+ goto fail;
}
+
+ /* retry reading the write status until complete
+ * NOTE: payload is modified here
+ */
+ payload->write = false;
+ payload->write_status_update = true;
+ payload->length = 0;
+ udelay(300);
+
} else
return true;
break;
@@ -765,7 +799,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER");
/* polling_timeout_period is in us */
- defer_time_in_ms += aux110->polling_timeout_period / 1000;
+ if (aux110)
+ defer_time_in_ms += aux110->polling_timeout_period / 1000;
+ else
+ defer_time_in_ms += AUX_DEFER_DELAY_FOR_DPIA;
++aux_defer_retries;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 3139285bd403..692fa23ca02b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -76,6 +76,15 @@
SRII(PIXEL_RATE_CNTL, OTG, 4),\
SRII(PIXEL_RATE_CNTL, OTG, 5)
+#define CS_COMMON_REG_LIST_DCN201(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PIXEL_RATE_CNTL, OTG, 1)
+
#define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 0464a8f3db3c..989f5b6907e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -70,6 +70,10 @@
SRII(PIXEL_RATE_CNTL, blk, 4), \
SRII(PIXEL_RATE_CNTL, blk, 5)
+#define HWSEQ_PIXEL_RATE_REG_LIST_201(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1)
+
#define HWSEQ_PHYPLL_REG_LIST(blk) \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
@@ -94,6 +98,10 @@
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#define HWSEQ_PHYPLL_REG_LIST_201(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
+
#define HWSEQ_DCE11_REG_LIST_BASE() \
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SR(DCFEV_CLOCK_CONTROL), \
@@ -337,6 +345,29 @@
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL)
+#define HWSEQ_DCN201_REG_LIST()\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST_201(OTG), \
+ HWSEQ_PHYPLL_REG_LIST_201(OTG), \
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
+ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
+ SR(MPC_CRC_CTRL), \
+ SR(MPC_CRC_RESULT_GB), \
+ SR(MPC_CRC_RESULT_C), \
+ SR(MPC_CRC_RESULT_AR), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ MMHUB_SR(MC_VM_FB_LOCATION_BASE), \
+ MMHUB_SR(MC_VM_FB_LOCATION_TOP), \
+ MMHUB_SR(MC_VM_FB_OFFSET)
+
#define HWSEQ_DCN30_REG_LIST()\
HWSEQ_DCN2_REG_LIST(),\
HWSEQ_DCN_REG_LIST(), \
@@ -637,6 +668,9 @@ struct dce_hwseq_registers {
uint32_t DMU_MEM_PWR_CNTL;
uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
+ uint32_t MC_VM_FB_LOCATION_BASE;
+ uint32_t MC_VM_FB_LOCATION_TOP;
+ uint32_t MC_VM_FB_OFFSET;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -872,6 +906,11 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
+#define HWSEQ_DCN201_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
+
#define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
@@ -1112,7 +1151,8 @@ struct dce_hwseq_registers {
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;\
- type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
+ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\
+ type I2C_LIGHT_SLEEP_FORCE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 8d4263da59f2..779bc92a2968 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -919,6 +919,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
}
static void dce110_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
@@ -967,6 +968,7 @@ static void dce110_stream_encoder_dp_blank(
/* output video stream to link encoder */
static void dce110_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 54a1408c8015..fb0dec4ed3a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -203,12 +203,33 @@ static bool dmub_abm_init_config(struct abm *abm,
return true;
}
+static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint8_t panel_mask = 0x01 << panel_inst;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_pause.header.type = DMUB_CMD__ABM;
+ cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE;
+ cmd.abm_pause.abm_pause_data.enable = pause;
+ cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
static const struct abm_funcs abm_funcs = {
.abm_init = dmub_abm_init,
.set_abm_level = dmub_abm_set_level,
.get_current_backlight = dmub_abm_get_current_backlight,
.get_target_backlight = dmub_abm_get_target_backlight,
.init_abm_config = dmub_abm_init_config,
+ .set_abm_pause = dmub_abm_set_pause,
};
static void dmub_abm_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index aa8403bc4c83..90eb8eedacf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -50,6 +50,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
state = PSR_STATE2;
else if (raw_state == 0x21)
state = PSR_STATE2a;
+ else if (raw_state == 0x22)
+ state = PSR_STATE2b;
else if (raw_state == 0x30)
state = PSR_STATE3;
else if (raw_state == 0x31)
@@ -225,6 +227,25 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
+/**
+ * Set PSR power optimization flags.
+ */
+static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;
+ cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data);
+ cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
/*
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
@@ -356,6 +377,7 @@ static const struct dmub_psr_funcs psr_funcs = {
.psr_set_level = dmub_psr_set_level,
.psr_force_static = dmub_psr_force_static,
.psr_get_residency = dmub_psr_get_residency,
+ .psr_set_power_opt = dmub_psr_set_power_opt,
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 9675c269e649..5dbd479660f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -46,6 +46,7 @@ struct dmub_psr_funcs {
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
uint8_t panel_inst);
+ void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 62d595ded866..af3e68d3e747 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -46,6 +46,7 @@
#include "transform.h"
#include "stream_encoder.h"
#include "link_encoder.h"
+#include "link_enc_cfg.h"
#include "link_hwss.h"
#include "dc_link_dp.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -57,7 +58,8 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
-
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -1108,11 +1110,23 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
clk_mgr->funcs->enable_pme_wa(clk_mgr);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.hpo_dp_stream_enc, false);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, false);
+#else
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+#endif
if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.audio->enabled = true;
}
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
}
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -1129,14 +1143,32 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
return;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.hpo_dp_stream_enc, true);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
+#else
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
+#endif
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+#else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
+#endif
else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
pipe_ctx->stream_res.stream_enc);
@@ -1151,6 +1183,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
* stream->stream_engine_id);
*/
}
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
}
void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
@@ -1158,6 +1193,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct link_encoder *link_enc = NULL;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1166,17 +1202,48 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#else
if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#endif
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.stream_enc);
dc->hwss.disable_audio_stream(pipe_ctx);
- link->link_enc->funcs->connect_dig_be_to_fe(
+ /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = link->link_enc;
+ else if (dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ ASSERT(link_enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ setup_dp_hpo_stream(pipe_ctx, false);
+ /* TODO - DP2.0 HW: unmap stream from link encoder here */
+ } else {
+ if (link_enc)
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+ }
+#else
+ if (link_enc)
+ link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
pipe_ctx->stream_res.stream_enc->id,
false);
-
+#endif
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1192,7 +1259,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, true);
@@ -1210,8 +1277,16 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+#else
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+#endif
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) {
/*
@@ -1436,6 +1511,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct dc *dc)
{
struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
struct drr_params params = {0};
unsigned int event_triggers = 0;
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
@@ -1451,10 +1527,23 @@ static enum dc_status apply_single_controller_ctx_to_hw(
build_audio_output(context, pipe_ctx, &audio_output);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+#else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.audio->inst,
&pipe_ctx->stream->audio_info);
+#endif
else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
pipe_ctx->stream_res.stream_enc,
@@ -1469,10 +1558,18 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info);
}
- /* */
- /* Do not touch stream timing on seamless boot optimization. */
- if (!pipe_ctx->stream->apply_seamless_boot_optimization)
- hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* DCN3.1 FPGA Workaround
+ * Need to enable HPO DP Stream Encoder before setting OTG master enable.
+ * To do so, move calling function enable_stream_timing to only be done AFTER calling
+ * function core_link_enable_stream
+ */
+ if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
+#endif
+ /* */
+ /* Do not touch stream timing on seamless boot optimization. */
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization)
+ hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@@ -1499,6 +1596,9 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
@@ -1526,6 +1626,18 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (!stream->dpms_off)
core_link_enable_stream(context, pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* DCN3.1 FPGA Workaround
+ * Need to enable HPO DP Stream Encoder before setting OTG master enable.
+ * To do so, move calling function enable_stream_timing to only be done AFTER calling
+ * function core_link_enable_stream
+ */
+ if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) {
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization)
+ hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
+ }
+#endif
+
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
@@ -1537,29 +1649,37 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
- int i;
-
- /* do not know BIOS back-front mapping, simply blank all. It will not
- * hurt for non-DP
- */
- for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
- dc->res_pool->stream_enc[i]->funcs->dp_blank(
- dc->res_pool->stream_enc[i]);
- }
+ int i, j;
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT))
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
+ dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
+ }
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
- dc->links[i]->link_enc->funcs->disable_output(
- dc->links[i]->link_enc, signal);
+ if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY)
+ dc->links[i]->link_enc->funcs->disable_output(
+ dc->links[i]->link_enc, signal);
dc->links[i]->link_status.link_active = false;
memset(&dc->links[i]->cur_link_settings, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index cb9767ddf93d..44293d66b46b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -205,9 +205,17 @@ static void dpp1_power_on_dscl(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
- REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
+ dpp->base.ctx->dc->optimized_required = true;
+ dpp->base.deferred_reg_writes.bits.disable_dscl = true;
+ } else {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index df8a7718a85f..a25732d07222 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -466,6 +466,71 @@ void dcn10_log_hw_state(struct dc *dc,
log_mpc_crc(dc, log_ctx);
+ {
+ int hpo_dp_link_enc_count = 0;
+
+ if (pool->hpo_dp_stream_enc_count > 0) {
+ DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
+ for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+ struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
+
+ if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
+ hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
+
+ DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
+ hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
+ hpo_dp_se_state.stream_enc_enabled,
+ hpo_dp_se_state.otg_inst,
+ (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
+ ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
+ (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
+ (hpo_dp_se_state.component_depth == 0) ? 6 :
+ ((hpo_dp_se_state.component_depth == 1) ? 8 :
+ (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
+ hpo_dp_se_state.vid_stream_enabled,
+ hpo_dp_se_state.sdp_enabled,
+ hpo_dp_se_state.compressed_format,
+ hpo_dp_se_state.mapped_to_link_enc);
+ }
+ }
+
+ DTN_INFO("\n");
+ }
+
+ /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
+ for (i = 0; i < dc->link_count; i++)
+ if (dc->links[i]->hpo_dp_link_enc)
+ hpo_dp_link_enc_count++;
+
+ if (hpo_dp_link_enc_count) {
+ DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
+
+ for (i = 0; i < dc->link_count; i++) {
+ struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
+ struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
+
+ if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
+ hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
+ DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
+ hpo_dp_link_enc->inst,
+ hpo_dp_le_state.link_enc_enabled,
+ (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
+ (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
+ (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
+ hpo_dp_le_state.lane_count,
+ hpo_dp_le_state.stream_src[0],
+ hpo_dp_le_state.slot_count[0],
+ hpo_dp_le_state.vc_rate_x[0],
+ hpo_dp_le_state.vc_rate_y[0]);
+ DTN_INFO("\n");
+ }
+ }
+
+ DTN_INFO("\n");
+ }
+ }
+
DTN_INFO_END();
}
@@ -1313,6 +1378,12 @@ void dcn10_init_hw(struct dc *dc)
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+ /* Align bw context with hw config when system resume. */
+ if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
+ }
+
// Initialize the dccg
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -1424,7 +1495,7 @@ void dcn10_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
@@ -1522,7 +1593,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->link_enc->funcs->is_dig_enabled &&
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
dc->hwss.power_down) {
dc->hwss.power_down(dc);
@@ -2274,8 +2345,8 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- struct vm_system_aperture_param apt = { {{ 0 } } };
- struct vm_context0_param vm0 = { { { 0 } } };
+ struct vm_system_aperture_param apt = {0};
+ struct vm_context0_param vm0 = {0};
mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
@@ -2448,7 +2519,7 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = {{0}};
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -3176,13 +3247,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
{
- struct pipe_ctx *test_pipe;
+ struct pipe_ctx *test_pipe, *split_pipe;
const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- const struct rect *r1 = &scl_data->recout, *r2;
- int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+ struct rect r1 = scl_data->recout, r2, r2_half;
+ int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
int cur_layer = pipe_ctx->plane_state->layer_index;
- bool upper_pipe_exists = false;
- struct fixed31_32 one = dc_fixpt_from_int(1);
/**
* Disable the cursor if there's another pipe above this with a
@@ -3191,26 +3260,33 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
*/
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
- if (!test_pipe->plane_state->visible)
+ // Skip invisible layer and pipe-split plane on same layer
+ if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
continue;
- r2 = &test_pipe->plane_res.scl_data.recout;
- r2_r = r2->x + r2->width;
- r2_b = r2->y + r2->height;
+ r2 = test_pipe->plane_res.scl_data.recout;
+ r2_r = r2.x + r2.width;
+ r2_b = r2.y + r2.height;
+ split_pipe = test_pipe;
- if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
+ /**
+ * There is another half plane on same layer because of
+ * pipe-split, merge together per same height.
+ */
+ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+ split_pipe = split_pipe->top_pipe)
+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
+ r2_half = split_pipe->plane_res.scl_data.recout;
+ r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
+ r2.width = r2.width + r2_half.width;
+ r2_r = r2.x + r2.width;
+ break;
+ }
- if (test_pipe->plane_state->layer_index < cur_layer)
- upper_pipe_exists = true;
+ if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
+ return true;
}
- // if plane scaled, assume an upper plane can handle cursor if it exists.
- if (upper_pipe_exists &&
- (scl_data->ratios.horz.value != one.value ||
- scl_data->ratios.vert.value != one.value))
- return true;
-
return false;
}
@@ -3600,7 +3676,7 @@ void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
@@ -3613,7 +3689,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
params.timing.pix_clk_100hz /= 2;
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index f0e0d07b0311..e2508d637e0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -60,14 +60,18 @@
SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
+#define IPP_REG_LIST_DCN201(id) \
+ IPP_REG_LIST_DCN(id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI(CURSOR_SIZE, CURSOR0_, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(CURSOR_POSITION, CURSOR0_, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
+
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
-#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
-#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
-#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
-#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
-#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
-#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
#define IPP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -122,6 +126,23 @@
IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+#define IPP_MASK_SH_LIST_DCN201(mask_sh) \
+ IPP_MASK_SH_LIST_DCN(mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
#define IPP_DCN10_REG_FIELD_LIST(type) \
type CNVC_SURFACE_PIXEL_FORMAT; \
type CNVC_BYPASS; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index e4701825b5a0..2dc4b4e4ba02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1460,5 +1460,14 @@ void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
if (enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (enc->features.flags.bits.IS_UHBR10_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_UHBR10;
+
+ if (enc->features.flags.bits.IS_UHBR13_5_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_UHBR13_5;
+
+ if (enc->features.flags.bits.IS_UHBR20_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_UHBR20;
+
*link_settings = max_link_cap;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 37848f4577b1..3d2a2848857a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -304,7 +304,7 @@ void optc1_program_timing(
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
- if (REG(OPTC_DATA_FORMAT_CONTROL)) {
+ if (REG(OPTC_DATA_FORMAT_CONTROL) && optc1->tg_mask->OPTC_DATA_FORMAT != 0) {
uint32_t data_fmt = 0;
if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 7daadb6a5233..f37551e00023 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1296,7 +1296,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
* in daisy chain use case
*/
j = i;
- if (pool->stream_enc[i]->id ==
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index cf364ae93138..b0c08ee6bc2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -29,6 +29,9 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
+#include "dcn30/dcn30_afmt.h"
#define DC_LOGGER \
enc1->base.ctx->logger
@@ -644,6 +647,12 @@ void enc1_stream_encoder_set_throttled_vcp_size(
x),
26));
+ // If y rounds up to integer, carry it over to x.
+ if (y >> 26) {
+ x += 1;
+ y = 0;
+ }
+
REG_SET_2(DP_MSE_RATE_CNTL, 0,
DP_MSE_RATE_X, x,
DP_MSE_RATE_Y, y);
@@ -726,6 +735,16 @@ void enc1_stream_encoder_update_dp_info_packets(
0, /* packetIndex */
&info_frame->vsc);
+ /* VSC SDP at packetIndex 1 is used by PSR in DMCUB FW.
+ * Note that the enablement of GSP1 is not done below,
+ * it's done in FW.
+ */
+ if (info_frame->vsc.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 1, /* packetIndex */
+ &info_frame->vsc);
+
if (info_frame->spd.valid)
enc1_update_generic_info_packet(
enc1,
@@ -884,6 +903,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
void enc1_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -914,6 +934,8 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
+
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
* Poll for DP_VID_STREAM_STATUS == 0
@@ -930,10 +952,13 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
void enc1_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
@@ -1000,6 +1025,8 @@ void enc1_stream_encoder_dp_unblank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
@@ -1444,6 +1471,10 @@ void enc1_se_hdmi_audio_setup(
void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
+ enc->afmt->funcs->afmt_powerdown(enc->afmt);
+#endif
enc1_se_enable_audio_clock(enc, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 0d86df97878c..687d7e4bf7ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -627,9 +627,11 @@ void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
void enc1_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc);
void enc1_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index ede65100a050..f98aba308028 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -169,7 +169,29 @@
type DTBCLK_DTO_DIV[MAX_PIPES];\
type DCCG_AUDIO_DTO_SEL;\
type DCCG_AUDIO_DTO0_SOURCE_SEL;\
- type DENTIST_DISPCLK_CHG_MODE;
+ type DENTIST_DISPCLK_CHG_MODE;\
+ type DSCCLK0_DTO_PHASE;\
+ type DSCCLK0_DTO_MODULO;\
+ type DSCCLK1_DTO_PHASE;\
+ type DSCCLK1_DTO_MODULO;\
+ type DSCCLK2_DTO_PHASE;\
+ type DSCCLK2_DTO_MODULO;\
+ type DSCCLK0_DTO_ENABLE;\
+ type DSCCLK1_DTO_ENABLE;\
+ type DSCCLK2_DTO_ENABLE;\
+ type SYMCLK32_ROOT_SE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\
+ type DPSTREAMCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK_GATE_DISABLE;\
+ type HDMISTREAMCLK0_DTO_PHASE;\
+ type HDMISTREAMCLK0_DTO_MODULO;\
+ type HDMICHARCLK0_GATE_DISABLE;\
+ type HDMICHARCLK0_ROOT_GATE_DISABLE;
+
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
@@ -205,6 +227,16 @@ struct dccg_registers {
uint32_t SYMCLK32_SE_CNTL;
uint32_t SYMCLK32_LE_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DSCCLK_DTO_CTRL;
+ uint32_t DSCCLK0_DTO_PARAM;
+ uint32_t DSCCLK1_DTO_PARAM;
+ uint32_t DSCCLK2_DTO_PARAM;
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
+ uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL3;
+ uint32_t HDMISTREAMCLK0_DTO_PARAM;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
+
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a47ba1d45be9..cfee456c6c9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -52,6 +52,9 @@
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "hw_sequencer.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
+#include "inc/link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -2120,7 +2123,7 @@ void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
@@ -2135,12 +2138,17 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.tg->inst);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
@@ -2290,7 +2298,7 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = { {0} };
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -2374,14 +2382,36 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t active_total_with_borders;
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct link_encoder *link_enc;
+
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
/* For MST, there are multiply stream go to only one link.
* connect DIG back_end to front_end while enable_stream and
* disconnect them during disable_stream
* BY this, it is logic clean to separate stream and link
*/
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ setup_dp_hpo_stream(pipe_ctx, true);
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
+ link->hpo_dp_link_enc->inst);
+ }
+
+ if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc, pipe_ctx->stream_res.stream_enc->id, true);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
if (link->dc->hwss.program_dmdata_engine)
@@ -2390,6 +2420,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.update_info_frame(pipe_ctx);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
timing->h_addressable
@@ -2406,7 +2439,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
/* enable audio only within mode set */
if (pipe_ctx->stream_res.audio != NULL) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc);
+ else if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index f6e747f25ebe..c90b8516dcc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -467,6 +467,11 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
(h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
}
void optc2_lock_doublebuffer_disable(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e3e01b17c164..3883f918b3bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -35,7 +35,7 @@
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
-#include "dml/dcn2x/dcn2x.h"
+#include "dml/dcn20/dcn20_fpu.h"
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
@@ -63,6 +63,7 @@
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
#include "dc_link_ddc.h"
+#include "dc_link_dp.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -86,6 +87,7 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
+#include "link_enc_cfg.h"
#include "amdgpu_socbb.h"
@@ -1595,15 +1597,32 @@ static void get_pixel_clock_parameters(
const struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ struct dc_link *link = stream->link;
+ struct link_encoder *link_enc = NULL;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
+
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc);
+ } else
+ link_enc = stream->link->link_enc;
+ ASSERT(link_enc);
+
+ if (link_enc)
+ pixel_clk_params->encoder_object_id = link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
+ /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
pixel_clk_params->flags.ENABLE_SS = 0;
@@ -1854,7 +1873,9 @@ static void swizzle_to_dml_params(
case DC_SW_VAR_D_X:
*sw_mode = dm_sw_var_d_x;
break;
-
+ case DC_SW_VAR_R_X:
+ *sw_mode = dm_sw_var_r_x;
+ break;
default:
ASSERT(0); /* Not supported */
break;
@@ -3044,6 +3065,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ return true;
}
return false;
}
@@ -3094,6 +3117,10 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+
+ if (dc->debug.min_dram_clk_khz > context->bw_ctx.bw.dcn.clk.dramclk_khz)
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = dc->debug.min_dram_clk_khz;
+
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
@@ -3140,6 +3167,9 @@ void dcn20_calculate_dlg_params(
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (dc->ctx->dce_version == DCN_VERSION_2_01)
+ cstate_en = false;
+
context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].dlg_regs,
&context->res_ctx.pipe_ctx[i].ttu_regs,
@@ -3152,7 +3182,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
- pipes[pipe_idx].pipe);
+ &pipes[pipe_idx].pipe);
pipe_idx++;
}
}
@@ -3630,9 +3660,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
return DML_PROJECT_NAVI10v2;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
@@ -3668,16 +3695,22 @@ static bool init_soc_bounding_box(struct dc *dc,
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
- if (clock_limits_available && uclk_states_available && num_states)
+ if (clock_limits_available && uclk_states_available && num_states) {
+ DC_FP_START();
dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
- else if (clock_limits_available)
+ DC_FP_END();
+ } else if (clock_limits_available) {
+ DC_FP_START();
dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+ DC_FP_END();
+ }
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
-
+ DC_FP_END();
return true;
}
@@ -3697,8 +3730,6 @@ static bool dcn20_resource_construct(
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
@@ -4047,12 +4078,10 @@ static bool dcn20_resource_construct(
pool->base.oem_device = NULL;
}
- DC_FP_END();
return true;
create_fail:
- DC_FP_END();
dcn20_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index e6307397e0d2..aab25ca8343a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -29,6 +29,8 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
#define DC_LOGGER \
enc1->base.ctx->logger
@@ -209,7 +211,8 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
/* Update GSP7 SDP 128 byte long */
static void enc2_update_gsp7_128_info_packet(
struct dcn10_stream_encoder *enc1,
- const struct dc_info_packet_128 *info_packet)
+ const struct dc_info_packet_128 *info_packet,
+ bool immediate_update)
{
uint32_t i;
@@ -264,7 +267,9 @@ static void enc2_update_gsp7_128_info_packet(
REG_WRITE(AFMT_GENERIC_7, *content++);
}
- REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1);
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, !immediate_update,
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, immediate_update);
}
/* Set DSC-related configuration.
@@ -290,7 +295,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps)
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -306,7 +312,7 @@ static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
pps_sdp.hb2 = 127;
pps_sdp.hb3 = 0;
memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
- enc2_update_gsp7_128_info_packet(enc1, &pps_sdp);
+ enc2_update_gsp7_128_info_packet(enc1, &pps_sdp, immediate_update);
/* Enable Generic Stream Packet 7 (GSP) transmission */
//REG_UPDATE(DP_SEC_CNTL,
@@ -444,6 +450,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
}
void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
@@ -522,6 +529,8 @@ void enc2_stream_encoder_dp_unblank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index f3d1a0237bda..baa1e539f341 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -104,6 +104,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(
uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
new file mode 100644
index 000000000000..f68038ceb1b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: MIT
+#
+# Makefile for DCN.
+DCN201 = dcn201_init.o dcn201_resource.o dcn201_hwseq.o \
+ dcn201_hubbub.o\
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \
+ dcn201_dccg.o dcn201_link_encoder.o
+
+ifdef CONFIG_X86
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mhard-float
+endif
+
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -msse2
+endif
+endif
+AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN201)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
new file mode 100644
index 000000000000..f5bf04f7da25
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn201_dccg.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
+
+#define REG(reg) \
+ (dccg_dcn->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
+
+#define CTX \
+ dccg_dcn->base.ctx
+
+#define DC_LOGGER \
+ dccg->ctx->logger
+
+void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ /* vbios handles it */
+}
+
+static const struct dccg_funcs dccg201_funcs = {
+ .update_dpp_dto = dccg201_update_dpp_dto,
+ .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg2_otg_add_pixel,
+ .otg_drop_pixel = dccg2_otg_drop_pixel,
+ .dccg_init = dccg2_init
+};
+
+struct dccg *dccg201_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg201_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h
new file mode 100644
index 000000000000..80888b0484fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN201_DCCG_H__
+#define __DCN201_DCCG_H__
+
+#include "dcn20/dcn20_dccg.h"
+
+struct dccg *dccg201_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+#endif //__DCN201_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
new file mode 100644
index 000000000000..8b6505b7dca8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn201_dpp.h"
+#include "basics/conversion.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+static void dpp201_cnv_setup(
+ struct dpp *dpp_base,
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct dc_csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space,
+ struct cnv_alpha_2bit_lut *alpha_2bit_lut)
+{
+ struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base);
+ uint32_t pixel_format = 0;
+ uint32_t alpha_en = 1;
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+ enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ bool force_disable_cursor = false;
+ uint32_t is_2bit = 0;
+
+ REG_SET_2(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode);
+
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
+ REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ pixel_format = 12;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ pixel_format = 112;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ pixel_format = 113;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ pixel_format = 114;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
+ pixel_format = 115;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ pixel_format = 118;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ pixel_format = 119;
+ alpha_en = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) {
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
+ }
+
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+ dpp2_power_on_obuf(dpp_base, true);
+}
+
+#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
+
+static bool dpp201_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ uint32_t pixel_width;
+
+ if (scl_data->viewport.width > scl_data->recout.width)
+ pixel_width = scl_data->recout.width;
+ else
+ pixel_width = scl_data->viewport.width;
+
+ if (scl_data->viewport.width != scl_data->h_active &&
+ scl_data->viewport.height != scl_data->v_active &&
+ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+ scl_data->format == PIXEL_FORMAT_FP16)
+ return false;
+
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
+ if (scl_data->ratios.horz.value == (8ll << 32))
+ scl_data->ratios.horz.value--;
+ if (scl_data->ratios.vert.value == (8ll << 32))
+ scl_data->ratios.vert.value--;
+ if (scl_data->ratios.horz_c.value == (8ll << 32))
+ scl_data->ratios.horz_c.value--;
+ if (scl_data->ratios.vert_c.value == (8ll << 32))
+ scl_data->ratios.vert_c.value--;
+
+ if (in_taps->h_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
+ scl_data->taps.h_taps = 8;
+ else
+ scl_data->taps.h_taps = 4;
+ } else
+ scl_data->taps.h_taps = in_taps->h_taps;
+
+ if (in_taps->v_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
+ scl_data->taps.v_taps = 8;
+ else
+ scl_data->taps.v_taps = 4;
+ } else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
+ scl_data->taps.v_taps_c = 4;
+ else
+ scl_data->taps.v_taps_c = 2;
+ } else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
+ scl_data->taps.h_taps_c = 4;
+ else
+ scl_data->taps.h_taps_c = 2;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
+ }
+
+ return true;
+}
+
+static struct dpp_funcs dcn201_dpp_funcs = {
+ .dpp_read_state = dpp20_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
+ .dpp_set_degamma = dpp2_set_degamma,
+ .dpp_program_input_lut = dpp2_dummy_program_input_lut,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp201_cnv_setup,
+ .dpp_program_degamma_pwl = dpp2_set_degamma_pwl,
+ .dpp_program_blnd_lut = dpp20_program_blnd_lut,
+ .dpp_program_shaper_lut = dpp20_program_shaper,
+ .dpp_program_3dlut = dpp20_program_3dlut,
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp2_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+};
+
+static struct dpp_caps dcn201_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
+ .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
+};
+
+bool dpp201_construct(
+ struct dcn201_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_dpp_registers *tf_regs,
+ const struct dcn201_dpp_shift *tf_shift,
+ const struct dcn201_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn201_dpp_funcs;
+ dpp->base.caps = &dcn201_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
new file mode 100644
index 000000000000..cbd5b47b4acf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
@@ -0,0 +1,83 @@
+/* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN201_DPP_H__
+#define __DCN201_DPP_H__
+
+#include "dcn20/dcn20_dpp.h"
+
+#define TO_DCN201_DPP(dpp)\
+ container_of(dpp, struct dcn201_dpp, base)
+
+#define TF_REG_LIST_DCN201(id) \
+ TF_REG_LIST_DCN20(id)
+
+#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN20(mask_sh)
+
+#define TF_REG_FIELD_LIST_DCN201(type) \
+ TF_REG_FIELD_LIST_DCN2_0(type)
+
+struct dcn201_dpp_shift {
+ TF_REG_FIELD_LIST_DCN201(uint8_t);
+};
+
+struct dcn201_dpp_mask {
+ TF_REG_FIELD_LIST_DCN201(uint32_t);
+};
+
+#define DPP_DCN201_REG_VARIABLE_LIST \
+ DPP_DCN2_REG_VARIABLE_LIST
+
+struct dcn201_dpp_registers {
+ DPP_DCN201_REG_VARIABLE_LIST;
+};
+
+struct dcn201_dpp {
+ struct dpp base;
+
+ const struct dcn201_dpp_registers *tf_regs;
+ const struct dcn201_dpp_shift *tf_shift;
+ const struct dcn201_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+ struct scaler_data scl_data;
+ struct pwl_params pwl_data;
+};
+
+bool dpp201_construct(struct dcn201_dpp *dpp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_dpp_registers *tf_regs,
+ const struct dcn201_dpp_shift *tf_shift,
+ const struct dcn201_dpp_mask *tf_mask);
+
+#endif /* __DC_HWSS_DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
new file mode 100644
index 000000000000..037d265431c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
@@ -0,0 +1,107 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dcn20/dcn20_hubbub.h"
+#include "dcn201_hubbub.h"
+#include "reg_helper.h"
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define DC_LOGGER \
+ hubbub1->base.ctx->logger
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+static bool hubbub201_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
+
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
+}
+
+static const struct hubbub_funcs hubbub201_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = NULL,
+ .init_vm_ctx = NULL,
+ .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
+ .wm_read_state = hubbub2_wm_read_state,
+ .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
+ .program_watermarks = hubbub201_program_watermarks,
+ .hubbub_read_state = hubbub2_read_state,
+};
+
+void hubbub201_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub->base.ctx = ctx;
+
+ hubbub->base.funcs = &hubbub201_funcs;
+
+ hubbub->regs = hubbub_regs;
+ hubbub->shifts = hubbub_shift;
+ hubbub->masks = hubbub_mask;
+
+ hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h
new file mode 100644
index 000000000000..5aeca0be3e15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DAL_DC_DCN201_DCN201_HUBBUB_H_
+#define DAL_DC_DCN201_DCN201_HUBBUB_H_
+
+#include "dcn20/dcn20_hubbub.h"
+
+#define HUBBUB_REG_LIST_DCN201(id)\
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_VM_REG_LIST(), \
+ SR(DCHUBBUB_CRC_CTRL)
+
+#define HUBBUB_MASK_SH_LIST_DCN201(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh)
+
+void hubbub201_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+#endif /* DAL_DC_DCN201_DCN201_HUBBUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
new file mode 100644
index 000000000000..6b6f74d4afd1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dcn201_hubp.h"
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+
+#define REG(reg)\
+ hubp201->hubp_regs->reg
+
+#define CTX \
+ hubp201->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp201->hubp_shift->field_name, hubp201->hubp_mask->field_name
+
+static void hubp201_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level)
+{
+ hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
+ hubp1_program_tiling(hubp, tiling_info, format);
+ hubp1_program_size(hubp, format, plane_size, dcc);
+ hubp1_program_pixel_format(hubp, format);
+}
+
+void hubp201_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+}
+
+void hubp201_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+
+ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height);
+
+ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height);
+}
+
+static void hubp201_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
+ hubp201_program_requestor(hubp, rq_regs);
+ hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
+}
+
+static struct hubp_funcs dcn201_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp1_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp201_program_surface_config,
+ .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_setup = hubp201_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp1_cursor_set_position,
+ .set_blank = hubp1_set_blank,
+ .dcc_control = hubp1_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .hubp_clk_cntl = hubp1_clk_cntl,
+ .hubp_vtg_sel = hubp1_vtg_sel,
+ .dmdata_set_attributes = hubp2_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp2_read_state,
+ .hubp_clear_underflow = hubp1_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp1_init,
+};
+
+bool dcn201_hubp_construct(
+ struct dcn201_hubp *hubp201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_hubp_registers *hubp_regs,
+ const struct dcn201_hubp_shift *hubp_shift,
+ const struct dcn201_hubp_mask *hubp_mask)
+{
+ hubp201->base.funcs = &dcn201_hubp_funcs;
+ hubp201->base.ctx = ctx;
+ hubp201->hubp_regs = hubp_regs;
+ hubp201->hubp_shift = hubp_shift;
+ hubp201->hubp_mask = hubp_mask;
+ hubp201->base.inst = inst;
+ hubp201->base.opp_id = OPP_ID_INVALID;
+ hubp201->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h
new file mode 100644
index 000000000000..a1e3384eed63
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_DCN201_H__
+#define __DC_MEM_INPUT_DCN201_H__
+
+#include "../dcn10/dcn10_hubp.h"
+#include "../dcn20/dcn20_hubp.h"
+
+#define TO_DCN201_HUBP(hubp)\
+ container_of(hubp, struct dcn201_hubp, base)
+
+#define HUBP_REG_LIST_DCN201(id)\
+ HUBP_REG_LIST_DCN(id),\
+ SRI(PREFETCH_SETTINGS, HUBPREQ, id),\
+ SRI(PREFETCH_SETTINGS_C, HUBPREQ, id),\
+ SRI(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
+ SRI(CURSOR_SETTINGS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI(CURSOR_SIZE, CURSOR0_, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(CURSOR_POSITION, CURSOR0_, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR0_, id), \
+ SRI(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
+ SRI(DMDATA_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_SW_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_QOS_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_SW_DATA, CURSOR0_, id), \
+ SRI(DMDATA_STATUS, CURSOR0_, id),\
+ SRI(FLIP_PARAMETERS_0, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_2, HUBPREQ, id)
+
+#define HUBP_MASK_SH_LIST_DCN201(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN(mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh)
+
+#define DCN201_HUBP_REG_VARIABLE_LIST \
+ DCN2_HUBP_REG_COMMON_VARIABLE_LIST
+
+#define DCN201_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type)
+
+struct dcn201_hubp_registers {
+ DCN201_HUBP_REG_VARIABLE_LIST;
+};
+
+struct dcn201_hubp_shift {
+ DCN201_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+};
+
+struct dcn201_hubp_mask {
+ DCN201_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+};
+
+struct dcn201_hubp {
+ struct hubp base;
+ struct dcn_hubp_state state;
+ const struct dcn201_hubp_registers *hubp_regs;
+ const struct dcn201_hubp_shift *hubp_shift;
+ const struct dcn201_hubp_mask *hubp_mask;
+};
+
+bool dcn201_hubp_construct(
+ struct dcn201_hubp *hubp201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_hubp_registers *hubp_regs,
+ const struct dcn201_hubp_shift *hubp_shift,
+ const struct dcn201_hubp_mask *hubp_mask);
+
+#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
new file mode 100644
index 000000000000..cfd09b3f705e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "basics/dc_common.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dcn201_hwseq.h"
+#include "dcn201_optc.h"
+#include "dce/dce_hwseq.h"
+#include "hubp.h"
+#include "dchubbub.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "dccg.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+
+#define CTX \
+ hws->ctx
+
+#define REG(reg)\
+ hws->regs->reg
+
+#define DC_LOGGER \
+ dc->ctx->logger
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static bool patch_address_for_sbs_tb_stereo(
+ struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+
+ if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ (pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
+ *addr = plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.left_addr =
+ plane_state->address.grph_stereo.right_addr;
+ return true;
+ } else {
+ if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
+ plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
+ plane_state->address.grph_stereo.right_addr =
+ plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.right_meta_addr =
+ plane_state->address.grph_stereo.left_meta_addr;
+ }
+ }
+ return false;
+}
+
+static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
+ PHYSICAL_ADDRESS_LOC *addr)
+{
+ bool is_in_uma;
+
+ if (hwseq->fb_base.quad_part <= addr->quad_part &&
+ addr->quad_part < hwseq->fb_top.quad_part) {
+ addr->quad_part -= hwseq->fb_base.quad_part;
+ addr->quad_part += hwseq->fb_offset.quad_part;
+ is_in_uma = true;
+ } else if (hwseq->fb_offset.quad_part <= addr->quad_part &&
+ addr->quad_part <= hwseq->uma_top.quad_part) {
+ is_in_uma = true;
+ } else if (addr->quad_part == 0) {
+ is_in_uma = false;
+ } else {
+ is_in_uma = false;
+ }
+}
+
+static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
+ struct dc_plane_address *addr)
+{
+ switch (addr->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ gpu_addr_to_uma(hwseq, &addr->grph.addr);
+ gpu_addr_to_uma(hwseq, &addr->grph.meta_addr);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_addr);
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_meta_addr);
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_addr);
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_meta_addr);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_addr);
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_meta_addr);
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_addr);
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_meta_addr);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool addr_patched = false;
+ PHYSICAL_ADDRESS_LOC addr;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_plane_address uma = plane_state->address;
+
+ if (plane_state == NULL)
+ return;
+
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
+ plane_address_in_gpu_space_to_uma(hws, &uma);
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &uma,
+ plane_state->flip_immediate);
+
+ plane_state->status.requested_address = plane_state->address;
+
+ if (plane_state->flip_immediate)
+ plane_state->status.current_address = plane_state->address;
+
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+}
+
+/* Blank pixel data during initialization */
+void dcn201_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ /* get the OTG active size */
+ tg->funcs->get_otg_active_size(tg,
+ &otg_active_width,
+ &otg_active_height);
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+ opp = dc->res_pool->opps[opp_id_src0];
+
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
+static void read_mmhub_vm_setup(struct dce_hwseq *hws)
+{
+ uint32_t fb_base = REG_READ(MC_VM_FB_LOCATION_BASE);
+ uint32_t fb_top = REG_READ(MC_VM_FB_LOCATION_TOP);
+ uint32_t fb_offset = REG_READ(MC_VM_FB_OFFSET);
+
+ /* MC_VM_FB_LOCATION_TOP is in pages, actual top should add 1 */
+ fb_top++;
+
+ /* bit 23:0 in register map to bit 47:24 in address */
+ hws->fb_base.low_part = fb_base;
+ hws->fb_base.quad_part <<= 24;
+
+ hws->fb_top.low_part = fb_top;
+ hws->fb_top.quad_part <<= 24;
+ hws->fb_offset.low_part = fb_offset;
+ hws->fb_offset.quad_part <<= 24;
+
+ hws->uma_top.quad_part = hws->fb_top.quad_part
+ - hws->fb_base.quad_part + hws->fb_offset.quad_part;
+}
+
+void dcn201_init_hw(struct dc *dc)
+{
+ int i, j;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct resource_pool *res_pool = dc->res_pool;
+ struct dc_state *context = dc->current_state;
+
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
+ REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
+
+ hws->funcs.dccg_init(hws);
+
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(REFCLK_CNTL, 0);
+ } else {
+ hws->funcs.bios_golden_init(dc);
+
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (res_pool->dccg && res_pool->hubbub) {
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ }
+ } else
+ ASSERT_CRITICAL(false);
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+ if (hws->fb_offset.quad_part == 0)
+ read_mmhub_vm_setup(hws);
+ }
+
+ /* Blank pixel data with OPP DPG */
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg)) {
+ dcn201_init_blank(dc, tg);
+ }
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->lock(tg);
+ }
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ struct dpp *dpp = res_pool->dpps[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ }
+
+ /* Reset all MPCC muxes */
+ res_pool->mpc->funcs->mpc_init(res_pool->mpc);
+
+ /* initialize OPP mpc_tree parameter */
+ for (i = 0; i < res_pool->res_cap->num_opp; i++) {
+ res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
+ res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ for (j = 0; j < MAX_PIPES; j++)
+ res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = res_pool->hubps[i];
+ struct dpp *dpp = res_pool->dpps[i];
+
+ pipe_ctx->stream_res.tg = tg;
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->plane_res.hubp = hubp;
+ pipe_ctx->plane_res.dpp = dpp;
+ pipe_ctx->plane_res.mpcc_inst = dpp->inst;
+ hubp->mpcc_id = dpp->inst;
+ hubp->opp_id = OPP_ID_INVALID;
+ hubp->power_gated = false;
+ pipe_ctx->stream_res.opp = NULL;
+
+ hubp->funcs->hubp_init(hubp);
+
+ res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = res_pool->opps[i];
+ /*To do: number of MPCC != number of opp*/
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ /* initialize DWB pointer to MCIF_WB */
+ for (i = 0; i < res_pool->res_cap->num_dwb; i++)
+ res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->unlock(tg);
+ }
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ dc->hwss.disable_plane(dc, pipe_ctx);
+
+ pipe_ctx->stream_res.tg = NULL;
+ pipe_ctx->plane_res.hubp = NULL;
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ tg->funcs->tg_init(tg);
+ }
+
+ /* end of FPGA. Below if real ASIC */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+ for (i = 0; i < res_pool->audio_count; i++) {
+ struct audio *audio = res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync */
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+ bool mpcc_removed = false;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+
+ /* check if this plane is being used by an MPCC in the secondary blending chain */
+ if (mpc->funcs->get_mpcc_for_dpp_from_secondary)
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id);
+
+ /* remove MPCC from secondary if being used */
+ if (mpcc_to_remove != NULL && mpc->funcs->remove_mpcc_from_secondary) {
+ mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, mpcc_to_remove);
+ mpcc_removed = true;
+ }
+
+ /* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+ if (mpcc_to_remove != NULL) {
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+ mpcc_removed = true;
+ }
+
+ /*Already reset*/
+ if (mpcc_removed == false)
+ return;
+
+ if (opp != NULL)
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+
+ dc->optimized_required = true;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+
+ if (dc->debug.sanity_checks)
+ hws->funcs.verify_allow_pstate_change_high(dc);
+}
+
+void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg;
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id, dpp_id;
+ struct mpcc *new_mpcc;
+ struct mpcc *remove_mpcc = NULL;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ get_hdr_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+ get_surface_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else {
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+ }
+
+ if (per_pixel_alpha)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ else
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+
+ blnd_cfg.overlap_only = false;
+
+ if (pipe_ctx->plane_state->global_alpha_value)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.global_gain = 0xff;
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+ /*the input to MPCC is RGB*/
+ blnd_cfg.black_color.color_b_cb = 0;
+ blnd_cfg.black_color.color_g_y = 0;
+ blnd_cfg.black_color.color_r_cr = 0;
+
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha. This is a w/a hopefully unnecessary for DCN2.
+ */
+ blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ dpp_id = hubp->inst;
+ mpcc_id = dpp_id;
+
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ return;
+ }
+
+ /* check if this plane is being used by an MPCC in the secondary blending chain */
+ if (mpc->funcs->get_mpcc_for_dpp_from_secondary)
+ remove_mpcc = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id);
+
+ /* remove MPCC from secondary if being used */
+ if (remove_mpcc != NULL && mpc->funcs->remove_mpcc_from_secondary)
+ mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, remove_mpcc);
+
+ /* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */
+ remove_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+ /* remove MPCC if being used */
+
+ if (remove_mpcc != NULL)
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, remove_mpcc);
+ else
+ if (dc->debug.sanity_checks)
+ mpc->funcs->assert_mpcc_idle_before_connect(
+ dc->res_pool->mpc, mpcc_id);
+
+ /* Call MPC to insert new plane */
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
+ mpc_tree_params,
+ &blnd_cfg,
+ NULL,
+ NULL,
+ dpp_id,
+ mpcc_id);
+
+ ASSERT(new_mpcc != NULL);
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+void dcn201_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = NULL;
+ hubp = dc->res_pool->hubps[pipe->pipe_idx];
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (pipe->top_pipe)
+ return;
+
+ if (dc->debug.sanity_checks)
+ hws->funcs.verify_allow_pstate_change_high(dc);
+
+ if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
+ if (lock)
+ pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
+ } else {
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ }
+
+ if (dc->debug.sanity_checks)
+ hws->funcs.verify_allow_pstate_change_high(dc);
+}
+
+void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+ gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq, &attributes->address);
+
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes);
+}
+
+void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_dmdata_attributes attr = { 0 };
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq,
+ &pipe_ctx->stream->dmdata_address);
+
+ attr.dmdata_mode = DMDATA_HW_MODE;
+ attr.dmdata_size =
+ dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36;
+ attr.address.quad_part =
+ pipe_ctx->stream->dmdata_address.quad_part;
+ attr.dmdata_dl_delta = 0;
+ attr.dmdata_qos_mode = 0;
+ attr.dmdata_qos_level = 0;
+ attr.dmdata_repeat = 1; /* always repeat */
+ attr.dmdata_updated = 1;
+ attr.dmdata_sw_data = NULL;
+
+ hubp->funcs->dmdata_set_attributes(hubp, &attr);
+}
+
+void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
+
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ /*check whether it is half the rate*/
+ if (optc201_is_two_pixels_per_containter(&stream->timing))
+ params.timing.pix_clk_100hz /= 2;
+
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ hws->funcs.edp_backlight_control(link, true);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h
new file mode 100644
index 000000000000..26cd62be6418
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN201_H__
+#define __DC_HWSS_DCN201_H__
+
+#include "hw_sequencer_private.h"
+
+void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
+void dcn201_init_hw(struct dc *dc);
+void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
+void dcn201_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn201_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+#endif /* __DC_HWSS_DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
new file mode 100644
index 000000000000..f1f89f93603f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn201_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn201_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn201_init_hw,
+ .power_down_on_boot = NULL,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
+ .update_plane_addr = dcn201_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn201_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn10_disable_plane,
+ .pipe_control_lock = dcn201_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .set_dmdata_attributes = dcn201_set_dmdata_attributes,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn201_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
+ .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+};
+
+static const struct hwseq_private_funcs dcn201_private_funcs = {
+ .init_pipes = NULL,
+ .update_plane_addr = dcn201_update_plane_addr,
+ .plane_atomic_disconnect = dcn201_plane_atomic_disconnect,
+ .program_pipe = dcn10_program_pipe,
+ .update_mpcc = dcn201_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn201_init_blank,
+ .disable_vga = dcn10_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn10_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn10_enable_power_gating_plane,
+ .dpp_pg_control = dcn10_dpp_pg_control,
+ .hubp_pg_control = dcn10_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn201_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn201_funcs;
+ dc->hwseq->funcs = dcn201_private_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
new file mode 100644
index 000000000000..1168887b033d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN201_INIT_H__
+#define __DC_DCN201_INIT_H__
+
+struct dc;
+
+void dcn201_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN201_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
new file mode 100644
index 000000000000..a65e8f7801db
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn201_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ uint32_t value1, value2;
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+ REG_GET_2(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value1,
+ RDPCS_PHY_DPALT_DP4, &value2);
+ /*limit to combo_phy*/
+ if (enc->usbc_combo_phy) {
+ if (!value1 && !value2 && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+ }
+}
+
+bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ uint32_t value;
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ REG_GET(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+static const struct link_encoder_funcs dcn201_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn10_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn10_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .fec_is_active = enc2_fec_is_active,
+ .is_in_alt_mode = dcn201_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn201_link_encoder_get_max_link_cap,
+};
+
+void dcn201_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn201_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+ /* if (dal_adapter_service_is_feature_supported(as,
+ * FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ * enc10->base.features.flags.bits.
+ * DP_SINK_DETECT_POLL_DATA_PIN = true;
+ */
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
new file mode 100644
index 000000000000..8b95ef251332
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN201_H__
+#define __DC_LINK_ENCODER__DCN201_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh)
+
+#define DPCS_DCN201_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id)
+
+void dcn201_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif /* __DC_LINK_ENCODER__DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
new file mode 100644
index 000000000000..95c4c55f067c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn201_mpc.h"
+
+#define REG(reg)\
+ mpc201->mpc_regs->reg
+
+#define CTX \
+ mpc201->base.ctx
+
+#define DC_LOGGER \
+ mpc201->base.ctx->logger
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc201->mpc_shift->field_name, mpc201->mpc_mask->field_name
+
+static void mpc201_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control)
+{
+ struct dcn201_mpc *mpc201 = TO_DCN201_MPC(mpc);
+
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_RATE_CONTROL_DISABLE, !enable,
+ MPC_OUT_RATE_CONTROL, rate_2x_mode);
+
+ if (flow_control)
+ REG_UPDATE_3(MUX[opp_id],
+ MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
+ MPC_OUT_FLOW_CONTROL_COUNT0, flow_control->flow_ctrl_cnt0,
+ MPC_OUT_FLOW_CONTROL_COUNT1, flow_control->flow_ctrl_cnt1);
+}
+
+static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->blnd_cfg.background_color_bpc = 4;
+ mpcc->blnd_cfg.bottom_gain_mode = 0;
+ mpcc->blnd_cfg.top_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
+ mpcc->sm_cfg.enable = false;
+ mpcc->shared_bottom = false;
+}
+
+const struct mpc_funcs dcn201_mpc_funcs = {
+ .read_mpcc_state = mpc1_read_mpcc_state,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
+ .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
+ .get_mpcc_for_dpp_from_secondary = NULL,
+ .wait_for_idle = mpc2_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .set_denorm = mpc2_set_denorm,
+ .set_denorm_clamp = mpc2_set_denorm_clamp,
+ .set_output_csc = mpc2_set_output_csc,
+ .set_ocsc_default = mpc2_set_ocsc_default,
+ .set_output_gamma = mpc2_set_output_gamma,
+ .set_out_rate_control = mpc201_set_out_rate_control,
+ .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .set_bg_color = mpc1_set_bg_color,
+};
+
+void dcn201_mpc_construct(struct dcn201_mpc *mpc201,
+ struct dc_context *ctx,
+ const struct dcn201_mpc_registers *mpc_regs,
+ const struct dcn201_mpc_shift *mpc_shift,
+ const struct dcn201_mpc_mask *mpc_mask,
+ int num_mpcc)
+{
+ int i;
+
+ mpc201->base.ctx = ctx;
+
+ mpc201->base.funcs = &dcn201_mpc_funcs;
+
+ mpc201->mpc_regs = mpc_regs;
+ mpc201->mpc_shift = mpc_shift;
+ mpc201->mpc_mask = mpc_mask;
+
+ mpc201->mpcc_in_use_mask = 0;
+ mpc201->num_mpcc = num_mpcc;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc201_init_mpcc(&mpc201->base.mpcc_array[i], i);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h
new file mode 100644
index 000000000000..b9ce0c1ba5c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h
@@ -0,0 +1,86 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN201_H__
+#define __DC_MPCC_DCN201_H__
+
+#include "dcn20/dcn20_mpc.h"
+
+#define TO_DCN201_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn201_mpc, base)
+
+#define MPC_REG_LIST_DCN201(inst) \
+ MPC_REG_LIST_DCN2_0(inst)
+
+#define MPC_OUT_MUX_REG_LIST_DCN201(inst) \
+ MPC_OUT_MUX_REG_LIST_DCN2_0(inst)
+
+#define MPC_REG_VARIABLE_LIST_DCN201 \
+ MPC_REG_VARIABLE_LIST_DCN2_0
+
+#define MPC_COMMON_MASK_SH_LIST_DCN201(mask_sh) \
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT0, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT1, mask_sh)
+
+#define MPC_REG_FIELD_LIST_DCN201(type) \
+ MPC_REG_FIELD_LIST_DCN2_0(type) \
+ type MPC_OUT_RATE_CONTROL;\
+ type MPC_OUT_RATE_CONTROL_DISABLE;\
+ type MPC_OUT_FLOW_CONTROL_MODE;\
+ type MPC_OUT_FLOW_CONTROL_COUNT0;\
+ type MPC_OUT_FLOW_CONTROL_COUNT1;
+
+struct dcn201_mpc_registers {
+ MPC_REG_VARIABLE_LIST_DCN201
+};
+
+struct dcn201_mpc_shift {
+ MPC_REG_FIELD_LIST_DCN201(uint8_t)
+};
+
+struct dcn201_mpc_mask {
+ MPC_REG_FIELD_LIST_DCN201(uint32_t)
+};
+
+struct dcn201_mpc {
+ struct mpc base;
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn201_mpc_registers *mpc_regs;
+ const struct dcn201_mpc_shift *mpc_shift;
+ const struct dcn201_mpc_mask *mpc_mask;
+};
+
+void dcn201_mpc_construct(struct dcn201_mpc *mpc201,
+ struct dc_context *ctx,
+ const struct dcn201_mpc_registers *mpc_regs,
+ const struct dcn201_mpc_shift *mpc_shift,
+ const struct dcn201_mpc_mask *mpc_mask,
+ int num_mpcc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
new file mode 100644
index 000000000000..8e77db46a409
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn201_opp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (oppn201->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ oppn201->opp_shift->field_name, oppn201->opp_mask->field_name
+
+#define CTX \
+ oppn201->base.ctx
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static struct opp_funcs dcn201_opp_funcs = {
+ .opp_set_dyn_expansion = opp1_set_dyn_expansion,
+ .opp_program_fmt = opp1_program_fmt,
+ .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
+ .opp_program_stereo = opp1_program_stereo,
+ .opp_pipe_clock_control = opp1_pipe_clock_control,
+ .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
+ .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
+ .dpg_is_blanked = opp2_dpg_is_blanked,
+ .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
+ .opp_destroy = opp1_destroy,
+ .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
+};
+
+void dcn201_opp_construct(struct dcn201_opp *oppn201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_opp_registers *regs,
+ const struct dcn201_opp_shift *opp_shift,
+ const struct dcn201_opp_mask *opp_mask)
+{
+ oppn201->base.ctx = ctx;
+ oppn201->base.inst = inst;
+ oppn201->base.funcs = &dcn201_opp_funcs;
+
+ oppn201->regs = regs;
+ oppn201->opp_shift = opp_shift;
+ oppn201->opp_mask = opp_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h
new file mode 100644
index 000000000000..aca389ec1779
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h
@@ -0,0 +1,74 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN201_H__
+#define __DC_OPP_DCN201_H__
+
+#include "dcn20/dcn20_opp.h"
+
+#define TO_DCN201_OPP(opp)\
+ container_of(opp, struct dcn201_opp, base)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_REG_LIST_DCN201(id) \
+ OPP_REG_LIST_DCN10(id), \
+ OPP_DPG_REG_LIST(id), \
+ SRI(FMT_422_CONTROL, FMT, id)
+
+#define OPP_MASK_SH_LIST_DCN201(mask_sh) \
+ OPP_MASK_SH_LIST_DCN20(mask_sh)
+
+#define OPP_DCN201_REG_FIELD_LIST(type) \
+ OPP_DCN20_REG_FIELD_LIST(type);
+
+struct dcn201_opp_shift {
+ OPP_DCN201_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn201_opp_mask {
+ OPP_DCN201_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn201_opp_registers {
+ OPP_REG_VARIABLE_LIST_DCN2_0;
+};
+
+struct dcn201_opp {
+ struct output_pixel_processor base;
+ const struct dcn201_opp_registers *regs;
+ const struct dcn201_opp_shift *opp_shift;
+ const struct dcn201_opp_mask *opp_mask;
+ bool is_write_to_ram_a_safe;
+};
+
+void dcn201_opp_construct(struct dcn201_opp *oppn201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_opp_registers *regs,
+ const struct dcn201_opp_shift *opp_shift,
+ const struct dcn201_opp_mask *opp_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
new file mode 100644
index 000000000000..730875dfd8b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn201_optc.h"
+#include "dcn10/dcn10_optc.h"
+#include "dc.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
+
+static void optc201_triplebuffer_lock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
+}
+
+static void optc201_triplebuffer_unlock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
+
+}
+
+static bool optc201_validate_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t v_blank;
+ uint32_t h_blank;
+ uint32_t min_v_blank;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ ASSERT(timing != NULL);
+
+ v_blank = (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom);
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > optc1->max_h_total ||
+ timing->v_total > optc1->max_v_total)
+ return false;
+
+ if (h_blank < optc1->min_h_blank)
+ return false;
+
+ if (timing->h_sync_width < optc1->min_h_sync_width ||
+ timing->v_sync_width < optc1->min_v_sync_width)
+ return false;
+
+ min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
+
+ if (v_blank < min_v_blank)
+ return false;
+
+ return true;
+
+}
+
+static void optc201_get_optc_source(struct timing_generator *optc,
+ uint32_t *num_of_src_opp,
+ uint32_t *src_opp_id_0,
+ uint32_t *src_opp_id_1)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, src_opp_id_0);
+
+ *num_of_src_opp = 1;
+}
+
+static struct timing_generator_funcs dcn201_tg_funcs = {
+ .validate_timing = optc201_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank = optc1_set_blank,
+ .is_blanked = optc1_is_blanked,
+ .set_blank_color = optc1_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .triplebuffer_lock = optc201_triplebuffer_lock,
+ .triplebuffer_unlock = optc201_triplebuffer_unlock,
+ .lock = optc1_lock,
+ .unlock = optc1_unlock,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
+ .tg_init = optc1_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc2_set_dsc_config,
+ .set_dwb_source = NULL,
+ .get_optc_source = optc201_get_optc_source,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+};
+
+void dcn201_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn201_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 8;
+ optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h
new file mode 100644
index 000000000000..e9545b73513a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN201_H__
+#define __DC_OPTC_DCN201_H__
+
+#include "dcn20/dcn20_optc.h"
+
+#define TG_COMMON_REG_LIST_DCN201(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_X, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
+ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
+ SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
+ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
+ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
+ SR(DWB_SOURCE_SELECT)
+
+#define TG_COMMON_MASK_SH_LIST_DCN201(mask_sh)\
+ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
+ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh)
+
+void dcn201_timing_generator_init(struct optc *optc);
+
+bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
new file mode 100644
index 000000000000..0fa381088d1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -0,0 +1,1307 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn201_init.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn201_resource.h"
+
+#include "dcn20/dcn20_resource.h"
+
+#include "dcn10/dcn10_hubp.h"
+#include "dcn10/dcn10_ipp.h"
+#include "dcn201_mpc.h"
+#include "dcn201_hubp.h"
+#include "irq/dcn201/irq_service_dcn201.h"
+#include "dcn201/dcn201_dpp.h"
+#include "dcn201/dcn201_hubbub.h"
+#include "dcn201_dccg.h"
+#include "dcn201_optc.h"
+#include "dcn201_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn201_opp.h"
+#include "dcn201/dcn201_link_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dcn201_hubbub.h"
+#include "dcn10/dcn10_resource.h"
+
+#include "cyan_skillfish_ip_offset.h"
+
+#include "dcn/dcn_2_0_3_offset.h"
+#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dpcs/dpcs_2_0_3_offset.h"
+#include "dpcs/dpcs_2_0_3_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+#include "nbio/nbio_7_4_offset.h"
+
+#include "reg_helper.h"
+
+#define MIN_DISP_CLK_KHZ 100000
+#define MIN_DPP_CLK_KHZ 100000
+
+struct _vcs_dpi_ip_params_st dcn201_ip = {
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .rob_buffer_size_kbytes = 168,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0,
+ .line_buffer_fixed_bpp = 0,
+ .dcc_supported = true,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 12,
+ .writeback_max_vscl_taps = 12,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 9600,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 2,
+ .max_num_dpp = 4,
+ .max_num_wb = 0,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 8,
+ .max_vscl_ratio = 8,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 30,
+ .dppclk_delay_subtotal = 77,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 8,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 87,
+ .dcfclk_cstate_latency = 10,
+ .max_inter_dcn_tile_repeaters = 8,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 200.0,
+ .dispclk_mhz = 300.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 1,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 250.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 3600.0,
+ },
+ {
+ .state = 2,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 750.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 6800.0,
+ },
+ {
+ .state = 3,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 250.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 14000.0,
+ },
+ {
+ .state = 4,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 750.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 14000.0,
+ }
+ },
+ .num_states = 4,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 256,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 256,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 256,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 80.0,
+ .max_avg_sdp_bw_use_normal_percent = 80.0,
+ .max_avg_dram_bw_use_normal_percent = 69.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 80.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 2,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.3,
+ .downspread_percent = 0.3,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 16,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 250.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3000,
+ .use_urgent_burst_bw = 0,
+};
+
+enum dcn20_clk_src_array_id {
+ DCN20_CLK_SRC_PLL0,
+ DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_TOTAL_DCN201
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRI_IX(reg_name, block, id)\
+ .reg_name = ix ## block ## id ## _ ## reg_name
+
+#define DCCG_SRII(reg_name, block, id)\
+ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIO_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
+ mmMM ## reg_name
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN201(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN2_REG_LIST(id)\
+}
+
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1)
+};
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+};
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN_COMMON_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+};
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN201(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN201(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN201(id),\
+}
+
+static const struct dcn201_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+};
+
+static const struct dcn201_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn201_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST0(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1)
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN201(id),\
+}
+
+static const struct dcn201_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+};
+
+static const struct dcn201_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN201(__SHIFT)
+};
+
+static const struct dcn201_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN201(_MASK)
+};
+
+static const struct dcn201_mpc_registers mpc_regs = {
+ MPC_REG_LIST_DCN201(0),
+ MPC_REG_LIST_DCN201(1),
+ MPC_REG_LIST_DCN201(2),
+ MPC_REG_LIST_DCN201(3),
+ MPC_REG_LIST_DCN201(4),
+ MPC_OUT_MUX_REG_LIST_DCN201(0),
+ MPC_OUT_MUX_REG_LIST_DCN201(1),
+};
+
+static const struct dcn201_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn201_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define tg_regs_dcn201(id)\
+[id] = {TG_COMMON_REG_LIST_DCN201(id)}
+
+static const struct dcn_optc_registers tg_regs[] = {
+ tg_regs_dcn201(0),
+ tg_regs_dcn201(1)
+};
+
+static const struct dcn_optc_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn_optc_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define hubp_regsDCN201(id)\
+[id] = {\
+ HUBP_REG_LIST_DCN201(id)\
+}
+
+static const struct dcn201_hubp_registers hubp_regs[] = {
+ hubp_regsDCN201(0),
+ hubp_regsDCN201(1),
+ hubp_regsDCN201(2),
+ hubp_regsDCN201(3)
+};
+
+static const struct dcn201_hubp_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn201_hubp_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN201(_MASK)
+};
+
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN201(0)
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN201(_MASK)
+};
+
+
+static const struct dccg_registers dccg_regs = {
+ DCCG_COMMON_REG_LIST_DCN_BASE()
+};
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(_MASK)
+};
+
+static const struct resource_caps res_cap_dnc201 = {
+ .num_timing_generator = 2,
+ .num_opp = 2,
+ .num_video_plane = 4,
+ .num_audio = 2,
+ .num_stream_encoder = 2,
+ .num_pll = 2,
+ .num_ddc = 2,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = true,
+ .p010 = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 250
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+ .az_endpoint_mute_only = true,
+ .max_downscale_src_width = 3840,
+ .disable_pplib_wm_range = true,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_tri_buf = false,
+};
+
+static void dcn201_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN201_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn201_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn201_dpp *dpp =
+ kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
+
+ if (!dpp)
+ return NULL;
+
+ if (dpp201_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask))
+ return &dpp->base;
+
+ kfree(dpp);
+ return NULL;
+}
+
+static struct input_pixel_processor *dcn201_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+
+ if (!ipp) {
+ return NULL;
+ }
+
+ dcn20_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+
+static struct output_pixel_processor *dcn201_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn201_opp *opp =
+ kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
+
+ if (!opp) {
+ return NULL;
+ }
+
+ dcn201_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct dce_aux *dcn201_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
+};
+
+struct dce_i2c_hw *dcn201_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+
+static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
+{
+ struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
+ GFP_ATOMIC);
+
+ if (!mpc201)
+ return NULL;
+
+ dcn201_mpc_construct(mpc201, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc);
+
+ return &mpc201->base;
+}
+
+static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
+{
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_ATOMIC);
+
+ if (!hubbub)
+ return NULL;
+
+ hubbub201_construct(hubbub, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask);
+
+ return &hubbub->base;
+}
+
+static struct timing_generator *dcn201_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_ATOMIC);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn201_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+struct link_encoder *dcn201_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ if (!enc20)
+ return NULL;
+
+ dcn201_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc10->base;
+}
+
+struct clock_source *dcn201_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce112_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+ kfree(clk_src);
+ return NULL;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+}
+
+static struct audio *dcn201_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct stream_encoder *dcn201_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1 =
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
+
+ if (!enc1)
+ return NULL;
+
+ dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN201_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN201_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN201_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dcn201_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn201_create_audio,
+ .create_stream_encoder = dcn201_stream_encoder_create,
+ .create_hwseq = dcn201_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn201_hwseq_create,
+};
+
+void dcn201_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void dcn201_resource_destruct(struct dcn201_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN201_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn201_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn201_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn201_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn201_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn201_hubp *hubp201 =
+ kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
+
+ if (!hubp201)
+ return NULL;
+
+ if (dcn201_hubp_construct(hubp201, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp201->base;
+
+ kfree(hubp201);
+ return NULL;
+}
+
+static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ if (!idle_pipe)
+ return NULL;
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+
+ return idle_pipe;
+}
+
+static bool dcn201_get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+ dc->res_pool->hubbub,
+ input,
+ output);
+}
+
+
+static void dcn201_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn201_resource_pool *dcn201_pool = TO_DCN201_RES_POOL(*pool);
+
+ dcn201_resource_destruct(dcn201_pool);
+ kfree(dcn201_pool);
+ *pool = NULL;
+}
+
+static void dcn201_link_init(struct dc_link *link)
+{
+ if (link->ctx->dc_bios->integrated_info)
+ link->dp_ss_off = !link->ctx->dc_bios->integrated_info->dp_ss_control;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn201_get_dcc_compression_cap,
+};
+
+static struct resource_funcs dcn201_res_pool_funcs = {
+ .link_init = dcn201_link_init,
+ .destroy = dcn201_destroy_resource_pool,
+ .link_enc_create = dcn201_link_encoder_create,
+ .panel_cntl_create = NULL,
+ .validate_bandwidth = dcn20_validate_bandwidth,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
+ .add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = NULL,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+};
+
+static bool dcn201_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn201_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dnc201;
+ pool->base.funcs = &dcn201_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ pool->base.pipe_count = 4;
+ pool->base.mpcc_count = 5;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+
+ dc->caps.max_slave_planes = 1;
+ dc->caps.max_slave_yuv_planes = 1;
+ dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 1;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ dc->debug = debug_defaults_drv;
+
+ /*a0 only, remove later*/
+ dc->work_arounds.no_connect_phy_config = true;
+ dc->work_arounds.dedcn20_305_wa = true;
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
+ dcn201_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
+ dcn201_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+
+ pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN201;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn201_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ goto create_fail;
+ }
+ }
+
+ pool->base.dccg = dccg201_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ goto create_fail;
+ }
+
+ dcn201_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn201_ip.max_num_dpp = pool->base.pipe_count;
+ dml_init_instance(&dc->dml, &dcn201_soc, &dcn201_ip, DML_PROJECT_DCN201);
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn201_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+ }
+
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn201_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto create_fail;
+ }
+
+ pool->base.ipps[i] = dcn201_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn201_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn201_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn201_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn201_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn201_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+
+ pool->base.timing_generator_count = i;
+
+ pool->base.mpc = dcn201_mpc_create(ctx, pool->base.mpcc_count);
+ if (pool->base.mpc == NULL) {
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ pool->base.hubbub = dcn201_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto create_fail;
+
+ dcn201_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+create_fail:
+
+ dcn201_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn201_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn201_resource_pool *pool =
+ kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn201_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h
new file mode 100644
index 000000000000..e0467d17d4ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h
@@ -0,0 +1,50 @@
+/*
+* Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCN201_H__
+#define __DC_RESOURCE_DCN201_H__
+
+#include "core_types.h"
+
+#define RRDPCS_PHY_DP_TX_PSTATE_POWER_UP 0x00000000
+#define RRDPCS_PHY_DP_TX_PSTATE_HOLD 0x00000001
+#define RRDPCS_PHY_DP_TX_PSTATE_HOLD_OFF 0x00000002
+#define RRDPCS_PHY_DP_TX_PSTATE_POWER_DOWN 0x00000003
+
+#define TO_DCN201_RES_POOL(pool)\
+ container_of(pool, struct dcn201_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn201_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn201_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index fbbdf9976183..d452a0d1777e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -35,7 +35,7 @@
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
-#include "dml/dcn2x/dcn2x.h"
+#include "dml/dcn20/dcn20_fpu.h"
#include "clk_mgr.h"
#include "dcn10/dcn10_hubp.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
index fa981cd04dd0..95528e5ef89e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -44,11 +44,14 @@
afmt3->base.ctx
-static void afmt3_setup_hdmi_audio(
+void afmt3_setup_hdmi_audio(
struct afmt *afmt)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+ if (afmt->funcs->afmt_poweron)
+ afmt->funcs->afmt_poweron(afmt);
+
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
@@ -113,7 +116,7 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
-static void afmt3_se_audio_setup(
+void afmt3_se_audio_setup(
struct afmt *afmt,
unsigned int az_inst,
struct audio_info *audio_info)
@@ -138,20 +141,24 @@ static void afmt3_se_audio_setup(
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */
- REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
+ if (afmt->funcs->afmt_poweron == NULL)
+ REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
}
-static void afmt3_audio_mute_control(
+void afmt3_audio_mute_control(
struct afmt *afmt,
bool mute)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
-
+ if (mute && afmt->funcs->afmt_powerdown)
+ afmt->funcs->afmt_powerdown(afmt);
+ if (!mute && afmt->funcs->afmt_poweron)
+ afmt->funcs->afmt_poweron(afmt);
/* enable/disable transmission of audio packets */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
}
-static void afmt3_audio_info_immediate_update(
+void afmt3_audio_info_immediate_update(
struct afmt *afmt)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
@@ -160,11 +167,14 @@ static void afmt3_audio_info_immediate_update(
REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
}
-static void afmt3_setup_dp_audio(
+void afmt3_setup_dp_audio(
struct afmt *afmt)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+ if (afmt->funcs->afmt_poweron)
+ afmt->funcs->afmt_poweron(afmt);
+
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
index 85d4619207e2..97e0cf62f98e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
@@ -121,6 +121,12 @@ struct afmt_funcs {
void (*setup_dp_audio)(
struct afmt *afmt);
+
+ void (*afmt_poweron)(
+ struct afmt *afmt);
+
+ void (*afmt_powerdown)(
+ struct afmt *afmt);
};
struct afmt {
@@ -136,6 +142,24 @@ struct dcn30_afmt {
const struct dcn30_afmt_mask *afmt_mask;
};
+void afmt3_setup_hdmi_audio(
+ struct afmt *afmt);
+
+void afmt3_se_audio_setup(
+ struct afmt *afmt,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+void afmt3_audio_mute_control(
+ struct afmt *afmt,
+ bool mute);
+
+void afmt3_audio_info_immediate_update(
+ struct afmt *afmt);
+
+void afmt3_setup_dp_audio(
+ struct afmt *afmt);
+
void afmt3_construct(struct dcn30_afmt *afmt3,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 46ea39f5ef8d..6f3c2fb60790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -192,6 +192,10 @@ void dcn30_link_encoder_construct(
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE;
+ enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
+ enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
+ enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 8487516819ef..ebd9c35c914f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -77,7 +77,8 @@ static void enc3_update_hdmi_info_packet(
enc1->base.vpg->funcs->update_generic_info_packet(
enc1->base.vpg,
packet_index,
- info_packet);
+ info_packet,
+ true);
/* enable transmission of packet(s) -
* packet transmission begins on the next frame */
@@ -335,7 +336,8 @@ static void enc3_dp_set_dsc_config(struct stream_encoder *enc,
static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps)
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -365,7 +367,8 @@ static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
enc1->base.vpg->funcs->update_generic_info_packet(
enc1->base.vpg,
11 + i,
- &pps_sdp);
+ &pps_sdp,
+ immediate_update);
}
/* SW should make sure VBID[6] update line number is bigger
@@ -429,19 +432,22 @@ static void enc3_stream_encoder_update_dp_info_packets(
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
- &info_frame->vsc);
+ &info_frame->vsc,
+ true);
}
if (info_frame->spd.valid) {
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
- &info_frame->spd);
+ &info_frame->spd,
+ true);
}
if (info_frame->hdrsmd.valid) {
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
- &info_frame->hdrsmd);
+ &info_frame->hdrsmd,
+ true);
}
/* packetIndex 4 is used for send immediate sdp message, and please
* use other packetIndex (such as 5,6) for other info packet
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 23a52d47e61c..c1d967ed6551 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -488,6 +488,54 @@ void dpp3_cnv_set_bias_scale(
REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
}
+void dpp3_deferred_update(
+ struct dpp *dpp_base)
+{
+ int bypass_state;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (dpp_base->deferred_reg_writes.bits.disable_dscl) {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
+ dpp_base->deferred_reg_writes.bits.disable_dscl = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_gamcor) {
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_gamcor = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) {
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_3dlut) {
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_3dlut = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_shaper) {
+ REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_shaper = false;
+ }
+}
+
static void dpp3_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
@@ -495,9 +543,13 @@ static void dpp3_power_on_blnd_lut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
+ }
} else {
REG_SET(CM_MEM_PWR_CTRL, 0,
BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
@@ -511,9 +563,13 @@ static void dpp3_power_on_hdr3dlut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_3dlut = true;
+ }
}
}
@@ -524,9 +580,13 @@ static void dpp3_power_on_shaper(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_shaper = true;
+ }
}
}
@@ -1400,6 +1460,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_program_blnd_lut = dpp3_program_blnd_lut,
.dpp_program_shaper_lut = dpp3_program_shaper,
.dpp_program_3dlut = dpp3_program_3dlut,
+ .dpp_deferred_update = dpp3_deferred_update,
.dpp_program_bias_and_scale = NULL,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp3_set_cursor_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 72c5687adc68..387eec616162 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -136,9 +136,13 @@ static void dpp3_power_on_gamcor_lut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
+ }
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index f24612523248..eac08926b574 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -356,12 +356,6 @@ void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- /*Workaround until UMD fix the new dcc_ind_blk interface */
- if (dcc->independent_64b_blks && dcc->dcc_ind_blk == 0)
- dcc->dcc_ind_blk = 1;
- if (dcc->independent_64b_blks_c && dcc->dcc_ind_blk_c == 0)
- dcc->dcc_ind_blk_c = 1;
-
REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, dcc->enable,
PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index fafed1e4a998..df2717116604 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -559,7 +559,7 @@ void dcn30_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
@@ -1002,7 +1002,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
/* turning off DPG */
pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+ if (mpcc_pipe->plane_res.hubp)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
color_depth, solid_color, width, height, offset);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3a5b53dd2f6d..93f32a312fee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .is_abm_supported = dcn21_is_abm_supported
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 089be7347591..5d9e6413d67a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -73,16 +73,23 @@ void optc3_lock_doublebuffer_enable(struct timing_generator *optc)
OTG_H_BLANK_END, &h_blank_end);
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
- MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start,
- MASTER_UPDATE_LOCK_DB_END_Y, v_blank_end);
+ MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1,
+ MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start);
REG_UPDATE_2(OTG_GLOBAL_CONTROL4,
- DIG_UPDATE_POSITION_X, 20,
- DIG_UPDATE_POSITION_Y, v_blank_start);
+ DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1,
+ DIG_UPDATE_POSITION_Y, v_blank_start - 1);
+ // there is a DIG_UPDATE_VCOUNT_MODE and it is 0.
+
REG_UPDATE_3(OTG_GLOBAL_CONTROL0,
MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1,
- MASTER_UPDATE_LOCK_DB_END_X, h_blank_end,
+ MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180,
MASTER_UPDATE_LOCK_DB_EN, 1);
REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
}
void optc3_lock_doublebuffer_disable(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index a0de309475a9..e50c695e3c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1164,8 +1164,12 @@ struct stream_encoder *dcn30_stream_encoder_create(
vpg = dcn30_vpg_create(ctx, vpg_inst);
afmt = dcn30_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
@@ -1703,9 +1707,6 @@ bool dcn30_release_post_bldn_3dlut(
return ret;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool is_soc_bounding_box_valid(struct dc *dc)
{
uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
@@ -1856,7 +1857,7 @@ static struct pipe_ctx *dcn30_find_split_pipe(
return pipe;
}
-static noinline bool dcn30_internal_validate_bw(
+noinline bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1925,23 +1926,25 @@ static noinline bool dcn30_internal_validate_bw(
if (vlevel == context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+ if (!dc->config.enable_windowed_mpo_odm) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
- if (!pipe->stream)
- continue;
+ if (!pipe->stream)
+ continue;
- /* We only support full screen mpo with ODM */
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
}
- pipe_idx++;
}
/* merge pipes if necessary */
@@ -2319,7 +2322,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
goto validate_out;
}
+ DC_FP_START();
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index b754b89beadf..b92e4cc0232f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -55,6 +55,13 @@ unsigned int dcn30_calc_max_scaled_time(
bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate);
+bool dcn30_internal_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
index 8cfd181b4d5f..14bc44b1f886 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
@@ -43,10 +43,11 @@
vpg3->base.ctx
-static void vpg3_update_generic_info_packet(
+void vpg3_update_generic_info_packet(
struct vpg *vpg,
uint32_t packet_index,
- const struct dc_info_packet *info_packet)
+ const struct dc_info_packet *info_packet,
+ bool immediate_update)
{
struct dcn30_vpg *vpg3 = DCN30_VPG_FROM_VPG(vpg);
uint32_t i;
@@ -106,69 +107,138 @@ static void vpg3_update_generic_info_packet(
/* atomically update double-buffered GENERIC0 registers in immediate mode
* (update at next block_update when block_update_lock == 0).
*/
- switch (packet_index) {
- case 0:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC0_IMMEDIATE_UPDATE, 1);
- break;
- case 1:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC1_IMMEDIATE_UPDATE, 1);
- break;
- case 2:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC2_IMMEDIATE_UPDATE, 1);
- break;
- case 3:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC3_IMMEDIATE_UPDATE, 1);
- break;
- case 4:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC4_IMMEDIATE_UPDATE, 1);
- break;
- case 5:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC5_IMMEDIATE_UPDATE, 1);
- break;
- case 6:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC6_IMMEDIATE_UPDATE, 1);
- break;
- case 7:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC7_IMMEDIATE_UPDATE, 1);
- break;
- case 8:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC8_IMMEDIATE_UPDATE, 1);
- break;
- case 9:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC9_IMMEDIATE_UPDATE, 1);
- break;
- case 10:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC10_IMMEDIATE_UPDATE, 1);
- break;
- case 11:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC11_IMMEDIATE_UPDATE, 1);
- break;
- case 12:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC12_IMMEDIATE_UPDATE, 1);
- break;
- case 13:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC13_IMMEDIATE_UPDATE, 1);
- break;
- case 14:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC14_IMMEDIATE_UPDATE, 1);
- break;
- default:
- break;
+ if (immediate_update) {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC0_IMMEDIATE_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC1_IMMEDIATE_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC2_IMMEDIATE_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC3_IMMEDIATE_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC4_IMMEDIATE_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC5_IMMEDIATE_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC6_IMMEDIATE_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC7_IMMEDIATE_UPDATE, 1);
+ break;
+ case 8:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC8_IMMEDIATE_UPDATE, 1);
+ break;
+ case 9:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC9_IMMEDIATE_UPDATE, 1);
+ break;
+ case 10:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC10_IMMEDIATE_UPDATE, 1);
+ break;
+ case 11:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC11_IMMEDIATE_UPDATE, 1);
+ break;
+ case 12:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC12_IMMEDIATE_UPDATE, 1);
+ break;
+ case 13:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC13_IMMEDIATE_UPDATE, 1);
+ break;
+ case 14:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC14_IMMEDIATE_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ case 8:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC8_FRAME_UPDATE, 1);
+ break;
+ case 9:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC9_FRAME_UPDATE, 1);
+ break;
+ case 10:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC10_FRAME_UPDATE, 1);
+ break;
+ case 11:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC11_FRAME_UPDATE, 1);
+ break;
+ case 12:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC12_FRAME_UPDATE, 1);
+ break;
+ case 13:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC13_FRAME_UPDATE, 1);
+ break;
+ case 14:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC14_FRAME_UPDATE, 1);
+ break;
+
+ default:
+ break;
+ }
+
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
index 6161e9e66355..ed9a5549c389 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
@@ -138,7 +138,14 @@ struct vpg_funcs {
void (*update_generic_info_packet)(
struct vpg *vpg,
uint32_t packet_index,
- const struct dc_info_packet *info_packet);
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
+ void (*vpg_poweron)(
+ struct vpg *vpg);
+
+ void (*vpg_powerdown)(
+ struct vpg *vpg);
};
struct vpg {
@@ -154,6 +161,12 @@ struct dcn30_vpg {
const struct dcn30_vpg_mask *vpg_mask;
};
+void vpg3_update_generic_info_packet(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
void vpg3_construct(struct dcn30_vpg *vpg3,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 09264716d1dc..7aa628c21973 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -13,32 +13,6 @@
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 912285fdce18..fbaa03f26d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -82,6 +82,7 @@
#include "dce/dce_i2c.h"
#include "dml/dcn30/display_mode_vba_30.h"
+#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "amdgpu_socbb.h"
@@ -91,184 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_01_ip = {
- .odm_capable = 1,
- .gpuvm_enable = 1,
- .hostvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_max_page_table_levels = 2,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 3,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 32,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 48, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 656640,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 4,
- .max_num_dpp = 4,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .max_num_hdmi_frl_outputs = 0,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dram_speed_mts = 2400.0,
- .fabricclk_mhz = 600,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 400.0,
- .dscclk_mhz = 206.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 600.0,
- },
- {
- .state = 1,
- .dram_speed_mts = 2400.0,
- .fabricclk_mhz = 688,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 400.0,
- .dscclk_mhz = 206.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 600.0,
- },
- {
- .state = 2,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 608.0,
- .dscclk_mhz = 296.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
-
- {
- .state = 3,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 715.0,
- .dcfclk_mhz = 676.0,
- .dscclk_mhz = 338.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
-
- {
- .state = 4,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 953.0,
- .dcfclk_mhz = 810.0,
- .dscclk_mhz = 338.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
- },
-
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .dram_channel_width_bytes = 4,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .num_chans = 4,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3550,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .num_states = 5,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn301_clk_src_array_id {
DCN301_CLK_SRC_PLL0,
DCN301_CLK_SRC_PLL1,
@@ -1195,8 +1018,12 @@ struct stream_encoder *dcn301_stream_encoder_create(
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
@@ -1476,8 +1303,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
static bool is_soc_bounding_box_valid(struct dc *dc)
{
@@ -1504,26 +1329,24 @@ static bool init_soc_bounding_box(struct dc *dc,
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
+ DC_FP_END();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ DC_FP_START();
+ dcn301_fpu_init_soc_bounding_box(bb_info);
+ DC_FP_END();
}
}
return true;
}
+
static void set_wm_ranges(
struct pp_smu_funcs *pp_smu,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
@@ -1546,9 +1369,9 @@ static void set_wm_ranges(
ranges.reader_wm_sets[i].wm_inst = i;
ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
- ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
-
+ DC_FP_START();
+ dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb);
+ DC_FP_END();
ranges.num_reader_wm_sets = i + 1;
}
@@ -1568,66 +1391,12 @@ static void set_wm_ranges(
pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
}
-static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
- dcn3_01_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
-
- clock_limits[i].state = i;
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_01_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_01_soc.num_states = clk_table->num_entries;
- /* duplicate last level */
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
- }
- }
-
- dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
-}
-
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
- .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+ .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
index 17e4e91ff4b8..ae8672680cdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
@@ -32,6 +32,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_01_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc;
+
struct dcn301_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 7d3ff5d44402..fcf96cf08c76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -542,8 +542,12 @@ static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id
vpg = dcn302_vpg_create(ctx, vpg_inst);
afmt = dcn302_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -1462,7 +1466,7 @@ static const struct dccg_mask dccg_mask = {
};
#define abm_regs(id)\
- [id] = { ABM_DCN301_REG_LIST(id) }
+ [id] = { ABM_DCN302_REG_LIST(id) }
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index dd38796ba30a..4a9b64023675 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
+
+ // WA: patch strobe modes to compensate for DCN303 BW issue
+ if (dcn3_03_soc.num_chans <= 4) {
+ for (i = 0; i < dcn3_03_soc.num_states; i++) {
+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700)
+ break;
+
+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) {
+ dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100;
+ dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100;
+ }
+ }
+ }
+
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
if (dc->current_state)
@@ -1394,7 +1408,7 @@ static const struct dccg_mask dccg_mask = {
};
#define abm_regs(id)\
- [id] = { ABM_DCN301_REG_LIST(id) }
+ [id] = { ABM_DCN302_REG_LIST(id) }
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 4bab97acb155..d20e3b8ccc30 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -11,7 +11,9 @@
# Makefile for dcn31.
DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o \
- dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o
+ dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
+ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
+ dcn31_afmt.o dcn31_vpg.o
ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
new file mode 100644
index 000000000000..d380a8ec2184
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "hw_shared.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn31_afmt.h"
+#include "reg_helper.h"
+#include "dc/dc.h"
+
+#define DC_LOGGER \
+ afmt31->base.ctx->logger
+
+#define REG(reg)\
+ (afmt31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ afmt31->afmt_shift->field_name, afmt31->afmt_mask->field_name
+
+
+#define CTX \
+ afmt31->base.ctx
+
+static struct afmt_funcs dcn31_afmt_funcs = {
+ .setup_hdmi_audio = afmt3_setup_hdmi_audio,
+ .se_audio_setup = afmt3_se_audio_setup,
+ .audio_mute_control = afmt3_audio_mute_control,
+ .audio_info_immediate_update = afmt3_audio_info_immediate_update,
+ .setup_dp_audio = afmt3_setup_dp_audio,
+ .afmt_powerdown = afmt31_powerdown,
+ .afmt_poweron = afmt31_poweron
+};
+
+void afmt31_powerdown(struct afmt *afmt)
+{
+ struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt);
+
+ if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false)
+ return;
+
+ REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 0, AFMT_MEM_PWR_FORCE, 1);
+}
+
+void afmt31_poweron(struct afmt *afmt)
+{
+ struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt);
+
+ if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false)
+ return;
+
+ REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 1, AFMT_MEM_PWR_FORCE, 0);
+}
+
+void afmt31_construct(struct dcn31_afmt *afmt31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_afmt_registers *afmt_regs,
+ const struct dcn31_afmt_shift *afmt_shift,
+ const struct dcn31_afmt_mask *afmt_mask)
+{
+ afmt31->base.ctx = ctx;
+
+ afmt31->base.inst = inst;
+ afmt31->base.funcs = &dcn31_afmt_funcs;
+
+ afmt31->regs = afmt_regs;
+ afmt31->afmt_shift = afmt_shift;
+ afmt31->afmt_mask = afmt_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
new file mode 100644
index 000000000000..802cb05b6ab9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_AFMT_H__
+#define __DAL_DCN31_AFMT_H__
+
+
+#define DCN31_AFMT_FROM_AFMT(afmt)\
+ container_of(afmt, struct dcn31_afmt, base)
+
+#define AFMT_DCN31_REG_LIST(id) \
+ SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI(AFMT_60958_0, AFMT, id), \
+ SRI(AFMT_60958_1, AFMT, id), \
+ SRI(AFMT_60958_2, AFMT, id), \
+ SRI(AFMT_MEM_PWR, AFMT, id)
+
+struct dcn31_afmt_registers {
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t AFMT_MEM_PWR;
+};
+
+#define DCN31_AFMT_MASK_SH_LIST(mask_sh)\
+ SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_STATE, mask_sh)
+
+#define AFMT_DCN31_REG_FIELD_LIST(type) \
+ type AFMT_AUDIO_INFO_UPDATE;\
+ type AFMT_AUDIO_SRC_SELECT;\
+ type AFMT_AUDIO_CHANNEL_ENABLE;\
+ type AFMT_60958_CS_UPDATE;\
+ type AFMT_AUDIO_LAYOUT_OVRD;\
+ type AFMT_60958_OSF_OVRD;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+ type AFMT_60958_CS_CLOCK_ACCURACY;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+ type AFMT_AUDIO_SAMPLE_SEND;\
+ type AFMT_MEM_PWR_FORCE;\
+ type AFMT_MEM_PWR_DIS;\
+ type AFMT_MEM_PWR_STATE
+
+struct dcn31_afmt_shift {
+ AFMT_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_afmt_mask {
+ AFMT_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_afmt {
+ struct afmt base;
+ const struct dcn31_afmt_registers *regs;
+ const struct dcn31_afmt_shift *afmt_shift;
+ const struct dcn31_afmt_mask *afmt_mask;
+};
+
+void afmt31_poweron(
+ struct afmt *afmt);
+
+void afmt31_powerdown(
+ struct afmt *afmt);
+
+void afmt31_construct(struct dcn31_afmt *afmt31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_afmt_registers *afmt_regs,
+ const struct dcn31_afmt_shift *afmt_shift,
+ const struct dcn31_afmt_mask *afmt_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
new file mode 100644
index 000000000000..de5e18c2a3ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "hw_shared.h"
+#include "dcn31_apg.h"
+#include "reg_helper.h"
+
+#define DC_LOGGER \
+ apg31->base.ctx->logger
+
+#define REG(reg)\
+ (apg31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ apg31->apg_shift->field_name, apg31->apg_mask->field_name
+
+
+#define CTX \
+ apg31->base.ctx
+
+
+static void apg31_enable(
+ struct apg *apg)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ /* Reset APG */
+ REG_UPDATE(APG_CONTROL, APG_RESET, 1);
+ REG_WAIT(APG_CONTROL,
+ APG_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(APG_CONTROL, APG_RESET, 0);
+ REG_WAIT(APG_CONTROL,
+ APG_RESET_DONE, 0,
+ 1, 10);
+
+ /* Enable APG */
+ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 1);
+}
+
+static void apg31_disable(
+ struct apg *apg)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ /* Disable APG */
+ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0);
+}
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static void apg31_se_audio_setup(
+ struct apg *apg,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ /* This should not happen.it does so we don't get BSOD*/
+ if (audio_info == NULL)
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* DisplayPort only allows for one audio stream with stream ID 0 */
+ REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0);
+
+ /* When running in "pair mode", pairs of audio channels have their own enable
+ * this is for really old audio drivers */
+ REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
+ // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels);
+
+ /* Disable forced mem power off */
+ REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0);
+
+ apg31_enable(apg);
+}
+
+static void apg31_audio_mute_control(
+ struct apg *apg,
+ bool mute)
+{
+ if (mute)
+ apg31_disable(apg);
+ else
+ apg31_enable(apg);
+}
+
+static struct apg_funcs dcn31_apg_funcs = {
+ .se_audio_setup = apg31_se_audio_setup,
+ .audio_mute_control = apg31_audio_mute_control,
+ .enable_apg = apg31_enable,
+ .disable_apg = apg31_disable,
+};
+
+void apg31_construct(struct dcn31_apg *apg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_apg_registers *apg_regs,
+ const struct dcn31_apg_shift *apg_shift,
+ const struct dcn31_apg_mask *apg_mask)
+{
+ apg31->base.ctx = ctx;
+
+ apg31->base.inst = inst;
+ apg31->base.funcs = &dcn31_apg_funcs;
+
+ apg31->regs = apg_regs;
+ apg31->apg_shift = apg_shift;
+ apg31->apg_mask = apg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h
new file mode 100644
index 000000000000..24f568e120d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_AGP_H__
+#define __DAL_DCN31_AGP_H__
+
+
+#define DCN31_APG_FROM_APG(apg)\
+ container_of(apg, struct dcn31_apg, base)
+
+#define APG_DCN31_REG_LIST(id) \
+ SRI(APG_CONTROL, APG, id), \
+ SRI(APG_CONTROL2, APG, id),\
+ SRI(APG_MEM_PWR, APG, id),\
+ SRI(APG_DBG_GEN_CONTROL, APG, id)
+
+struct dcn31_apg_registers {
+ uint32_t APG_CONTROL;
+ uint32_t APG_CONTROL2;
+ uint32_t APG_MEM_PWR;
+ uint32_t APG_DBG_GEN_CONTROL;
+};
+
+
+#define DCN31_APG_MASK_SH_LIST(mask_sh)\
+ SE_SF(APG0_APG_CONTROL, APG_RESET, mask_sh),\
+ SE_SF(APG0_APG_CONTROL, APG_RESET_DONE, mask_sh),\
+ SE_SF(APG0_APG_CONTROL2, APG_ENABLE, mask_sh),\
+ SE_SF(APG0_APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, mask_sh),\
+ SE_SF(APG0_APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(APG0_APG_MEM_PWR, APG_MEM_PWR_FORCE, mask_sh)
+
+#define APG_DCN31_REG_FIELD_LIST(type) \
+ type APG_RESET;\
+ type APG_RESET_DONE;\
+ type APG_ENABLE;\
+ type APG_DP_AUDIO_STREAM_ID;\
+ type APG_DBG_AUDIO_CHANNEL_ENABLE;\
+ type APG_MEM_PWR_FORCE
+
+struct dcn31_apg_shift {
+ APG_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_apg_mask {
+ APG_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct apg {
+ const struct apg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct apg_funcs {
+
+ void (*setup_hdmi_audio)(
+ struct apg *apg);
+
+ void (*se_audio_setup)(
+ struct apg *apg,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+ void (*audio_mute_control)(
+ struct apg *apg,
+ bool mute);
+
+ void (*enable_apg)(
+ struct apg *apg);
+
+ void (*disable_apg)(
+ struct apg *apg);
+};
+
+
+
+struct dcn31_apg {
+ struct apg base;
+ const struct dcn31_apg_registers *regs;
+ const struct dcn31_apg_shift *apg_shift;
+ const struct dcn31_apg_mask *apg_mask;
+};
+
+void apg31_construct(struct dcn31_apg *apg3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_apg_registers *apg_regs,
+ const struct dcn31_apg_shift *apg_shift,
+ const struct dcn31_apg_mask *apg_mask);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 696c9307715d..815481a3ef54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -26,6 +26,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dcn31_dccg.h"
+#include "dal_asic_id.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
@@ -42,6 +43,358 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ } else {
+ //DTO must be enabled to generate a 0Hz clock output
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ } else {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ }
+ }
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+static enum phyd32clk_clock_source get_phy_mux_symclk(
+ struct dcn_dccg *dccg_dcn,
+ enum phyd32clk_clock_source src)
+{
+ if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (src == PHYD32CLKC)
+ src = PHYD32CLKF;
+ if (src == PHYD32CLKD)
+ src = PHYD32CLKG;
+ }
+ return src;
+}
+
+static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* enabled to select one of the DTBCLKs for pipe */
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 1);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
+}
+
+static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 0);
+
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 0);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 0);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 0);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst)
+{
+ if (src == REFCLK)
+ dccg31_disable_dpstreamclk(dccg, otg_inst);
+ else
+ dccg31_enable_dpstreamclk(dccg, otg_inst);
+}
+
+void dccg31_enable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
+ /* select one of the PHYD32CLKs as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, phyd32clk,
+ SYMCLK32_SE0_EN, 1);
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, phyd32clk,
+ SYMCLK32_SE1_EN, 1);
+ break;
+ case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, phyd32clk,
+ SYMCLK32_SE2_EN, 1);
+ break;
+ case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, phyd32clk,
+ SYMCLK32_SE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, 0,
+ SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, 0,
+ SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, 0,
+ SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, 0,
+ SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
+ /* select one of the PHYD32CLKs as the source for symclk32_le */
+ switch (hpo_le_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, phyd32clk,
+ SYMCLK32_LE0_EN, 1);
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, phyd32clk,
+ SYMCLK32_LE1_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_le */
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, 0,
+ SYMCLK32_LE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, 0,
+ SYMCLK32_LE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg31_disable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //DTO must be enabled to generate a 0 Hz clock output
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 1);
+ break;
+ case 1:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 1);
+ break;
+ case 2:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg31_enable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //Disable DTO
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
@@ -241,16 +594,44 @@ static void dccg31_set_dispclk_change_mode(
void dccg31_init(struct dccg *dccg)
{
+ /* Set HPO stream encoder to use refclk to avoid case where PHY is
+ * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which
+ * will cause DCN to hang.
+ */
+ dccg31_disable_symclk32_se(dccg, 0);
+ dccg31_disable_symclk32_se(dccg, 1);
+ dccg31_disable_symclk32_se(dccg, 2);
+ dccg31_disable_symclk32_se(dccg, 3);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
+ dccg31_disable_symclk32_le(dccg, 0);
+ dccg31_disable_symclk32_le(dccg, 1);
+ }
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ dccg31_disable_dpstreamclk(dccg, 0);
+ dccg31_disable_dpstreamclk(dccg, 1);
+ dccg31_disable_dpstreamclk(dccg, 2);
+ dccg31_disable_dpstreamclk(dccg, 3);
+ }
+
}
static const struct dccg_funcs dccg31_funcs = {
- .update_dpp_dto = dccg2_update_dpp_dto,
+ .update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg31_init,
+ .set_dpstreamclk = dccg31_set_dpstreamclk,
+ .enable_symclk32_se = dccg31_enable_symclk32_se,
+ .disable_symclk32_se = dccg31_disable_symclk32_se,
+ .enable_symclk32_le = dccg31_enable_symclk32_le,
+ .disable_symclk32_le = dccg31_disable_symclk32_le,
.set_physymclk = dccg31_set_physymclk,
.set_dtbclk_dto = dccg31_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
+ .disable_dsc = dccg31_disable_dscclk,
+ .enable_dsc = dccg31_enable_dscclk,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index 706ad80ba873..a013a32bbaf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -61,7 +61,13 @@
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
SR(DCCG_AUDIO_DTO_SOURCE),\
- SR(DENTIST_DISPCLK_CNTL)
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM)
#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
@@ -119,7 +125,26 @@
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 3, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh)
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh), \
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
struct dccg *dccg31_create(
@@ -130,6 +155,29 @@ struct dccg *dccg31_create(
void dccg31_init(struct dccg *dccg);
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst);
+
+void dccg31_enable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+void dccg31_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst);
+
+void dccg31_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+void dccg31_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst);
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index b0892443fbd5..ee6f13bef377 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -325,6 +325,10 @@ void dcn31_link_encoder_construct(
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE;
+ enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
+ enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
+ enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
@@ -362,19 +366,79 @@ void dcn31_link_encoder_construct_minimal(
SIGNAL_TYPE_EDP;
}
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_dmub_srv *dmub = dc_ctx->dmub_srv;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dc_dmub_srv_cmd_queue(dmub, &cmd);
+ dc_dmub_srv_cmd_execute(dmub);
+ dc_dmub_srv_wait_idle(dmub);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
void dcn31_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
/* Enable transmitter and encoder. */
- if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) {
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source);
} else {
- /** @todo Handle transmitter with programmable mapping to link encoder. */
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin
+ * unused by DPIA.
+ */
+ dpia_control.hpdsel = 6;
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ link_dpia_control(enc->ctx, &dpia_control);
}
}
@@ -383,14 +447,43 @@ void dcn31_link_encoder_enable_dp_mst_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
/* Enable transmitter and encoder. */
- if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) {
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
} else {
- /** @todo Handle transmitter with programmable mapping to link encoder. */
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin
+ * unused by DPIA.
+ */
+ dpia_control.hpdsel = 6;
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ link_dpia_control(enc->ctx, &dpia_control);
}
}
@@ -398,14 +491,45 @@ void dcn31_link_encoder_disable_output(
struct link_encoder *enc,
enum signal_type signal)
{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
/* Disable transmitter and encoder. */
- if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) {
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
dcn10_link_encoder_disable_output(enc, signal);
} else {
- /** @todo Handle transmitter with programmable mapping to link encoder. */
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ if (!dcn10_is_dig_enabled(enc))
+ return;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */
+ } else if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */
+ } else {
+ DC_LOG_ERROR("%s: USB4 DPIA only supports DisplayPort.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ }
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
new file mode 100644
index 000000000000..6c08e21bb708
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn31_hpo_dp_link_encoder.h"
+#include "reg_helper.h"
+#include "dc_link.h"
+#include "stream_encoder.h"
+
+#define DC_LOGGER \
+ enc3->base.ctx->logger
+
+#define REG(reg)\
+ (enc3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name
+
+
+#define CTX \
+ enc3->base.ctx
+
+enum {
+ DP_SAT_UPDATE_MAX_RETRY = 200
+};
+
+void dcn31_hpo_dp_link_enc_enable(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t dp_link_enabled;
+
+ /* get current status of link enabled */
+ REG_GET(DP_DPHY_SYM32_STATUS,
+ STATUS, &dp_link_enabled);
+
+ /* Enable clocks first */
+ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1);
+
+ /* Reset DPHY. Only reset if going from disable to enable */
+ if (!dp_link_enabled) {
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0);
+ }
+
+ /* Configure DPHY settings */
+ REG_UPDATE_3(DP_DPHY_SYM32_CONTROL,
+ DPHY_ENABLE, 1,
+ PRECODER_ENABLE, 1,
+ NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3);
+}
+
+void dcn31_hpo_dp_link_enc_disable(
+ struct hpo_dp_link_encoder *enc)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+
+ /* Configure DPHY settings */
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ DPHY_ENABLE, 0);
+
+ /* Shut down clock last */
+ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0);
+}
+
+void dcn31_hpo_dp_link_enc_set_link_test_pattern(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t tp_custom;
+
+ switch (tp_params->dp_phy_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_ACTIVE);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_TRAINING_TPS1);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_TRAINING_TPS2);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT1, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT2, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT3, DP_DPHY_TP_SELECT_TPS1);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT1, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT2, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT3, DP_DPHY_TP_SELECT_TPS2);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS7);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS9);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS11);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS15);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS23);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS31);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom);
+
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM);
+
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_SQUARE_PULSE:
+ REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,
+ TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]);
+
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE);
+
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ default:
+ break;
+ }
+}
+
+static void fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots)
+{
+ const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc;
+
+ if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) {
+ *src = stream_enc->id - ENGINE_ID_HPO_DP_0;
+ *slots = stream_allocation->slot_count;
+ } else {
+ *src = 0;
+ *slots = 0;
+ }
+}
+
+/* programs DP VC payload allocation */
+void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t slots = 0;
+ uint32_t src = 0;
+
+ /* --- Set MSE Stream Attribute -
+ * Setup VC Payload Table on Tx Side,
+ * Issue allocation change trigger
+ * to commit payload on both tx and rx side
+ */
+
+ /* we should clean-up table each time */
+
+ if (table->stream_count >= 1) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[0],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 2) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[1],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 3) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[2],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 4) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[3],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ /* --- wait for transaction finish */
+
+ /* send allocation change trigger (ACT)
+ * this step first sends the ACT,
+ * then double buffers the SAT into the hardware
+ * making the new allocation active on the DP MST mode link
+ */
+
+ /* SAT_UPDATE:
+ * 0 - No Action
+ * 1 - Update SAT with trigger
+ * 2 - Update SAT without trigger
+ */
+ REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE,
+ SAT_UPDATE, 1);
+
+ /* wait for update to complete
+ * (i.e. SAT_UPDATE_PENDING field is set to 0)
+ * No need for HW to enforce keepout.
+ */
+ /* Best case and worst case wait time for SAT_UPDATE_PENDING
+ * best: 109 us
+ * worst: 868 us
+ */
+ REG_WAIT(DP_DPHY_SYM32_STATUS,
+ SAT_UPDATE_PENDING, 0,
+ 10, DP_SAT_UPDATE_MAX_RETRY);
+}
+
+void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dc_fixpt_ceil(
+ dc_fixpt_shl(
+ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 25));
+
+ switch (stream_encoder_inst) {
+ case 0:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 1:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 2:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 3:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /* Best case and worst case wait time for RATE_UPDATE_PENDING
+ * best: 116 ns
+ * worst: 903 ns
+ */
+ /* wait for update to be completed on the link */
+ REG_WAIT(DP_DPHY_SYM32_STATUS,
+ RATE_UPDATE_PENDING, 0,
+ 1, 10);
+}
+
+static bool dcn31_hpo_dp_link_enc_is_in_alt_mode(
+ struct hpo_dp_link_encoder *enc)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+
+ ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E));
+
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ return (dp_alt_mode_disable == 0);
+}
+
+void dcn31_hpo_dp_link_enc_read_state(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+
+ ASSERT(state);
+
+ REG_GET(DP_DPHY_SYM32_STATUS,
+ STATUS, &state->link_enc_enabled);
+ REG_GET(DP_DPHY_SYM32_CONTROL,
+ NUM_LANES, &state->lane_count);
+ REG_GET(DP_DPHY_SYM32_CONTROL,
+ MODE, (uint32_t *)&state->link_mode);
+
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC0,
+ SAT_STREAM_SOURCE, &state->stream_src[0],
+ SAT_SLOT_COUNT, &state->slot_count[0]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC1,
+ SAT_STREAM_SOURCE, &state->stream_src[1],
+ SAT_SLOT_COUNT, &state->slot_count[1]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC2,
+ SAT_STREAM_SOURCE, &state->stream_src[2],
+ SAT_SLOT_COUNT, &state->slot_count[2]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC3,
+ SAT_STREAM_SOURCE, &state->stream_src[3],
+ SAT_SLOT_COUNT, &state->slot_count[3]);
+
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0,
+ STREAM_VC_RATE_X, &state->vc_rate_x[0],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[0]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1,
+ STREAM_VC_RATE_X, &state->vc_rate_x[1],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[1]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2,
+ STREAM_VC_RATE_X, &state->vc_rate_x[2],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[2]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3,
+ STREAM_VC_RATE_X, &state->vc_rate_x[3],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[3]);
+}
+
+static enum bp_result link_transmitter_control(
+ struct dcn31_hpo_dp_link_encoder *enc3,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+ struct dc_bios *bp = enc3->base.ctx->dc_bios;
+
+ result = bp->funcs->transmitter_control(bp, cntl);
+
+ return result;
+}
+
+/* enables DP PHY output for 128b132b encoding */
+void dcn31_hpo_dp_link_enc_enable_dp_output(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Set the transmitter */
+ enc3->base.transmitter = transmitter;
+
+ /* Enable the PHY */
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc3->base.transmitter;
+ //cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc3->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate * 1000;
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+ cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+void dcn31_hpo_dp_link_enc_disable_output(
+ struct hpo_dp_link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* disable transmitter */
+ cntl.action = TRANSMITTER_CONTROL_DISABLE;
+ cntl.transmitter = enc3->base.transmitter;
+ cntl.hpd_sel = enc3->base.hpd_source;
+ cntl.signal = signal;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* disable encoder */
+ dcn31_hpo_dp_link_enc_disable(enc);
+}
+
+void dcn31_hpo_dp_link_enc_set_ffe(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* disable transmitter */
+ cntl.transmitter = enc3->base.transmitter;
+ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.pixel_clock = link_settings->link_rate * 1000;
+ cntl.lane_settings = ffe_preset;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = {
+ .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output,
+ .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output,
+ .link_enable = dcn31_hpo_dp_link_enc_enable,
+ .link_disable = dcn31_hpo_dp_link_enc_disable,
+ .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern,
+ .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table,
+ .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size,
+ .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode,
+ .read_state = dcn31_hpo_dp_link_enc_read_state,
+ .set_ffe = dcn31_hpo_dp_link_enc_set_ffe,
+};
+
+void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs,
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask)
+{
+ enc31->base.ctx = ctx;
+
+ enc31->base.inst = inst;
+ enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs;
+ enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc31->base.transmitter = TRANSMITTER_UNKNOWN;
+
+ enc31->regs = hpo_le_regs;
+ enc31->hpo_le_shift = hpo_le_shift;
+ enc31->hpo_le_mask = hpo_le_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
new file mode 100644
index 000000000000..0706ccaf6fec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__
+#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__
+
+#include "link_encoder.h"
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\
+ container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base)
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \
+ SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
+ SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id)
+
+#define DCN3_1_RDPCSTX_REG_LIST(id) \
+ SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id)
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_REGS \
+ uint32_t DP_LINK_ENC_CLOCK_CONTROL;\
+ uint32_t DP_DPHY_SYM32_CONTROL;\
+ uint32_t DP_DPHY_SYM32_STATUS;\
+ uint32_t DP_DPHY_SYM32_TP_CONFIG;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\
+ uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\
+ uint32_t DP_DPHY_SYM32_SAT_VC0;\
+ uint32_t DP_DPHY_SYM32_SAT_VC1;\
+ uint32_t DP_DPHY_SYM32_SAT_VC2;\
+ uint32_t DP_DPHY_SYM32_SAT_VC3;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\
+ uint32_t DP_DPHY_SYM32_SAT_UPDATE
+
+struct dcn31_hpo_dp_link_encoder_registers {
+ DCN3_1_HPO_DP_LINK_ENC_REGS;
+ uint32_t RDPCSTX_PHY_CNTL6[5];
+};
+
+#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\
+ SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
+ SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh)
+
+#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \
+ type DP_LINK_ENC_CLOCK_EN;\
+ type DPHY_RESET;\
+ type DPHY_ENABLE;\
+ type PRECODER_ENABLE;\
+ type NUM_LANES;\
+ type MODE;\
+ type STATUS;\
+ type SAT_UPDATE_PENDING;\
+ type RATE_UPDATE_PENDING;\
+ type TP_CUSTOM;\
+ type TP_SELECT0;\
+ type TP_SELECT1;\
+ type TP_SELECT2;\
+ type TP_SELECT3;\
+ type TP_PRBS_SEL0;\
+ type TP_PRBS_SEL1;\
+ type TP_PRBS_SEL2;\
+ type TP_PRBS_SEL3;\
+ type TP_SQ_PULSE_WIDTH;\
+ type SAT_STREAM_SOURCE;\
+ type SAT_SLOT_COUNT;\
+ type STREAM_VC_RATE_X;\
+ type STREAM_VC_RATE_Y;\
+ type SAT_UPDATE;\
+ type RDPCS_PHY_DPALT_DISABLE
+
+
+struct dcn31_hpo_dp_link_encoder_shift {
+ DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_hpo_dp_link_encoder_mask {
+ DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_hpo_dp_link_encoder {
+ struct hpo_dp_link_encoder base;
+ const struct dcn31_hpo_dp_link_encoder_registers *regs;
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift;
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask;
+};
+
+void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs,
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask);
+
+void dcn31_hpo_dp_link_enc_enable_dp_output(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter);
+
+void dcn31_hpo_dp_link_enc_disable_output(
+ struct hpo_dp_link_encoder *enc,
+ enum signal_type signal);
+
+void dcn31_hpo_dp_link_enc_enable(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes);
+
+void dcn31_hpo_dp_link_enc_disable(
+ struct hpo_dp_link_encoder *enc);
+
+void dcn31_hpo_dp_link_enc_set_link_test_pattern(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params);
+
+void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+void dcn31_hpo_dp_link_enc_read_state(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state);
+
+void dcn31_hpo_dp_link_enc_set_ffe(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset);
+
+#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
new file mode 100644
index 000000000000..565f12dd179a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn31_hpo_dp_stream_encoder.h"
+#include "reg_helper.h"
+#include "dc_link.h"
+
+#define DC_LOGGER \
+ enc3->base.ctx->logger
+
+#define REG(reg)\
+ (enc3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name
+
+#define CTX \
+ enc3->base.ctx
+
+
+enum dp2_pixel_encoding {
+ DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444,
+ DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422,
+ DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420,
+ DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY
+};
+
+enum dp2_uncompressed_component_depth {
+ DP_SYM32_ENC_COMPONENT_DEPTH_6BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_8BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_10BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_12BPC
+};
+
+
+static void dcn31_hpo_dp_stream_enc_enable_stream(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Enable all clocks in the DP_STREAM_ENC */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL,
+ DP_STREAM_ENC_CLOCK_EN, 1);
+
+ /* Assert reset to the DP_SYM32_ENC logic */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET, 1);
+ /* Wait for reset to complete (to assert) */
+ REG_WAIT(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET_DONE, 1,
+ 1, 10);
+
+ /* De-assert reset to the DP_SYM32_ENC logic */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET, 0);
+ /* Wait for reset to de-assert */
+ REG_WAIT(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET_DONE, 0,
+ 1, 10);
+
+ /* Enable idle pattern generation */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_dp_unblank(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_source)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Set the input mux for video stream source */
+ REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source);
+
+ /* Enable video transmission in main framer */
+ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, 1);
+
+ /* Reset and Enable Pixel to Symbol FIFO */
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET, 1);
+ REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET, 0);
+ REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */
+ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0,
+ 1, 10);
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_ENABLE, 1);
+
+ /* Reset and Enable Clock Ramp Adjuster FIFO */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET, 1);
+ REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET, 0);
+ REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET_DONE, 0,
+ 1, 10);
+
+ /* For Debug -- Enable CRC */
+ REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL,
+ CRC_ENABLE, 1,
+ CRC_CONT_MODE_ENABLE, 1);
+
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_dp_blank(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable video transmission */
+ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, 0);
+
+ /* Wait for video stream transmission disabled
+ * Larger delay to wait until VBLANK - use max retry of
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ // VID_STREAM_STATUS, 0,
+ // 10, 5000);
+
+ /* Disable SDP tranmission */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+
+ /* Disable Pixel to Symbol FIFO */
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_ENABLE, 0);
+
+ /* Disable Clock Ramp Adjuster FIFO */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_ENABLE, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_disable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable DP_SYM32_ENC */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, 0);
+
+ /* Disable clocks in the DP_STREAM_ENC */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL,
+ DP_STREAM_ENC_CLOCK_EN, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
+ struct hpo_dp_stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ bool compressed_format,
+ bool double_buffer_en)
+{
+ enum dp2_pixel_encoding pixel_encoding;
+ enum dp2_uncompressed_component_depth component_depth;
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint32_t h_width;
+ uint32_t v_height;
+ unsigned long long v_freq;
+ uint8_t misc0 = 0;
+ uint8_t misc1 = 0;
+ uint8_t hsp;
+ uint8_t vsp;
+
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
+
+ /* MISC0[0] = 0 video and link clocks are asynchronous
+ * MISC1[0] = 0 interlace not supported
+ * MISC1[2:1] = 0 stereo field is handled by hardware
+ * MISC1[5:3] = 0 Reserved
+ */
+
+ /* Interlaced not supported */
+ if (hw_crtc_timing.flags.INTERLACE) {
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* Double buffer enable for MSA and pixel format registers
+ * Only double buffer for changing stream attributes for active streams
+ * Do not double buffer when initially enabling a stream
+ */
+ REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL,
+ MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en);
+ REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL,
+ PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en);
+
+ /* Pixel Encoding */
+ switch (hw_crtc_timing.pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422;
+ misc0 = misc0 | 0x2; // MISC0[2:1] = 01
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444;
+ misc0 = misc0 | 0x4; // MISC0[2:1] = 10
+
+ if (hw_crtc_timing.flags.Y_ONLY) {
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY;
+ if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) {
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits
+ */
+ misc1 = misc1 | 0x80; // MISC1[7] = 1
+ }
+ }
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420;
+ misc1 = misc1 | 0x40; // MISC1[6] = 1
+ break;
+ case PIXEL_ENCODING_RGB:
+ default:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444;
+ break;
+ }
+
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if (use_vsc_sdp_for_colorimetry)
+ misc1 = misc1 | 0x40;
+ else
+ misc1 = misc1 & ~0x40;
+
+ /* Color depth */
+ switch (hw_crtc_timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC;
+ // MISC0[7:5] = 000
+ break;
+ case COLOR_DEPTH_888:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC;
+ misc0 = misc0 | 0x20; // MISC0[7:5] = 001
+ break;
+ case COLOR_DEPTH_101010:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC;
+ misc0 = misc0 | 0x40; // MISC0[7:5] = 010
+ break;
+ case COLOR_DEPTH_121212:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC;
+ misc0 = misc0 | 0x60; // MISC0[7:5] = 011
+ break;
+ default:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC;
+ break;
+ }
+
+ REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT,
+ PIXEL_ENCODING_TYPE, compressed_format,
+ UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding,
+ UNCOMPRESSED_COMPONENT_DEPTH, component_depth);
+
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_XR_RGB:
+ case COLOR_SPACE_MSREF_SCRGB:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_DCIP3:
+ case COLOR_SPACE_XV_YCC_709:
+ case COLOR_SPACE_XV_YCC_601:
+ case COLOR_SPACE_DISPLAYNATIVE:
+ case COLOR_SPACE_DOLBYVISION:
+ case COLOR_SPACE_APPCTRL:
+ case COLOR_SPACE_CUSTOMPOINTS:
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR709_BLACK:
+ /* do nothing */
+ break;
+ }
+
+ /* calculate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+ h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
+ hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
+
+ h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
+ hw_crtc_timing.h_sync_width;
+
+ /* start at beginning of left border */
+ h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
+
+ v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
+ hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
+ hw_crtc_timing.v_front_porch;
+
+ h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;
+ v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
+ hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+
+ /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
+ *
+ * Lane 0 Lane 1 Lane 2 Lane 3
+ * MSA[0] = { 0, 0, 0, VFREQ[47:40]}
+ * MSA[1] = { 0, 0, 0, VFREQ[39:32]}
+ * MSA[2] = { 0, 0, 0, VFREQ[31:24]}
+ * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]}
+ * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]}
+ * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]}
+ * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]}
+ * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]}
+ * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0}
+ */
+ REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, v_freq >> 40);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, (v_freq >> 32) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, (v_freq >> 24) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8,
+ MSA_DATA_LANE_1, h_active_start >> 8,
+ MSA_DATA_LANE_2, h_width >> 8,
+ MSA_DATA_LANE_3, (v_freq >> 16) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff,
+ MSA_DATA_LANE_1, h_active_start & 0xff,
+ MSA_DATA_LANE_2, h_width & 0xff,
+ MSA_DATA_LANE_3, (v_freq >> 8) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8,
+ MSA_DATA_LANE_1, v_active_start >> 8,
+ MSA_DATA_LANE_2, v_height >> 8,
+ MSA_DATA_LANE_3, v_freq & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff,
+ MSA_DATA_LANE_1, v_active_start & 0xff,
+ MSA_DATA_LANE_2, v_height & 0xff,
+ MSA_DATA_LANE_3, misc0);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0,
+ MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8),
+ MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8),
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, misc1);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff,
+ MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
+ struct hpo_dp_stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t dmdata_packet_enabled = 0;
+ bool sdp_stream_enable = false;
+
+ if (info_frame->vsc.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 0, /* packetIndex */
+ &info_frame->vsc,
+ true);
+ sdp_stream_enable = true;
+ }
+ if (info_frame->spd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 2, /* packetIndex */
+ &info_frame->spd,
+ true);
+ sdp_stream_enable = true;
+ }
+ if (info_frame->hdrsmd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd,
+ true);
+ sdp_stream_enable = true;
+ }
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid);
+
+ /* check if dynamic metadata packet transmission is enabled */
+ REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL,
+ METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
+
+ /* Enable secondary data path */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets(
+ struct hpo_dp_stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t asp_enable = 0;
+ uint32_t atp_enable = 0;
+ uint32_t aip_enable = 0;
+ uint32_t acm_enable = 0;
+
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+
+ /* Disable secondary data path if audio is also disabled */
+ REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ASP_ENABLE, &asp_enable,
+ ATP_ENABLE, &atp_enable,
+ AIP_ENABLE, &aip_enable,
+ ACM_ENABLE, &acm_enable);
+ if (!(asp_enable || atp_enable || aip_enable || acm_enable))
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+}
+
+static uint32_t hpo_dp_is_gsp_enabled(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t gsp0_enabled = 0;
+ uint32_t gsp2_enabled = 0;
+ uint32_t gsp3_enabled = 0;
+ uint32_t gsp11_enabled = 0;
+
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled);
+
+ return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled);
+}
+
+static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet(
+ struct hpo_dp_stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ if (enable) {
+ struct dc_info_packet pps_sdp;
+ int i;
+
+ /* Configure for PPS packet size (128 bytes) */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_PAYLOAD_SIZE, 3);
+
+ /* Load PPS into infoframe (SDP) registers */
+ pps_sdp.valid = true;
+ pps_sdp.hb0 = 0;
+ pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
+ pps_sdp.hb2 = 127;
+ pps_sdp.hb3 = 0;
+
+ for (i = 0; i < 4; i++) {
+ memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32);
+ enc3->base.vpg->funcs->update_generic_info_packet(
+ enc3->base.vpg,
+ 11 + i,
+ &pps_sdp,
+ immediate_update);
+ }
+
+ /* SW should make sure VBID[6] update line number is bigger
+ * than PPS transmit line number
+ */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_TRANSMISSION_LINE_NUMBER, 2);
+
+ REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL,
+ VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0,
+ VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3);
+
+ /* Send PPS data at the line number specified above. */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1);
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+ } else {
+ /* Disable Generic Stream Packet 11 (GSP) transmission */
+ REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0,
+ GSP_PAYLOAD_SIZE, 0);
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_map_stream_to_link(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ ASSERT(stream_enc_inst < 4 && link_enc_inst < 2);
+
+ switch (stream_enc_inst) {
+ case 0:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL0,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 1:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL1,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 2:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL2,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 3:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL3,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_mute_control(
+ struct hpo_dp_stream_encoder *enc,
+ bool mute)
+{
+ ASSERT(enc->apg);
+ enc->apg->funcs->audio_mute_control(enc->apg, mute);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_setup(
+ struct hpo_dp_stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Set the input mux for video stream source */
+ REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst);
+
+ ASSERT(enc->apg);
+ enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_enable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ATP_ENABLE, 1,
+ AIP_ENABLE, 1);
+
+ /* Enable secondary data path */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+
+ /* Enable APG block */
+ enc->apg->funcs->enable_apg(enc->apg);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_disable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable Audio packets */
+ REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ASP_ENABLE, 0,
+ ATP_ENABLE, 0,
+ AIP_ENABLE, 0,
+ ACM_ENABLE, 0);
+
+ /* Disable STP Stream Enable if other SDP GSP are also disabled */
+ if (!(hpo_dp_is_gsp_enabled(enc)))
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+
+ /* Disable APG block */
+ enc->apg->funcs->disable_apg(enc->apg);
+}
+
+static void dcn31_hpo_dp_stream_enc_read_state(
+ struct hpo_dp_stream_encoder *enc,
+ struct hpo_dp_stream_encoder_state *s)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ REG_GET(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled);
+ REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, &s->vid_stream_enabled);
+ REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst);
+
+ REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT,
+ PIXEL_ENCODING_TYPE, &s->compressed_format,
+ UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding,
+ UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth);
+
+ REG_GET(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, &s->sdp_enabled);
+
+ switch (enc->inst) {
+ case 0:
+ REG_GET(DP_STREAM_MAPPER_CONTROL0,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 1:
+ REG_GET(DP_STREAM_MAPPER_CONTROL1,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 2:
+ REG_GET(DP_STREAM_MAPPER_CONTROL2,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 3:
+ REG_GET(DP_STREAM_MAPPER_CONTROL3,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ }
+}
+
+static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
+ .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream,
+ .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank,
+ .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank,
+ .disable = dcn31_hpo_dp_stream_enc_disable,
+ .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute,
+ .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets,
+ .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets,
+ .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet,
+ .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link,
+ .audio_mute_control = dcn31_hpo_dp_stream_enc_mute_control,
+ .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup,
+ .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable,
+ .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable,
+ .read_state = dcn31_hpo_dp_stream_enc_read_state,
+};
+
+void dcn31_hpo_dp_stream_encoder_construct(
+ struct dcn31_hpo_dp_stream_encoder *enc3,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ uint32_t inst,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct apg *apg,
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs,
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift,
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask)
+{
+ enc3->base.funcs = &dcn30_str_enc_funcs;
+ enc3->base.ctx = ctx;
+ enc3->base.inst = inst;
+ enc3->base.id = eng_id;
+ enc3->base.bp = bp;
+ enc3->base.vpg = vpg;
+ enc3->base.apg = apg;
+ enc3->regs = regs;
+ enc3->hpo_se_shift = hpo_se_shift;
+ enc3->hpo_se_mask = hpo_se_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
new file mode 100644
index 000000000000..70b94fc25304
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__
+#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__
+
+#include "dcn30/dcn30_vpg.h"
+#include "dcn31/dcn31_apg.h"
+#include "stream_encoder.h"
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\
+ container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base)
+
+
+/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \
+ SR(DP_STREAM_MAPPER_CONTROL0),\
+ SR(DP_STREAM_MAPPER_CONTROL1),\
+ SR(DP_STREAM_MAPPER_CONTROL2),\
+ SR(DP_STREAM_MAPPER_CONTROL3),\
+ SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\
+ SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id)
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REGS \
+ uint32_t DP_STREAM_MAPPER_CONTROL0;\
+ uint32_t DP_STREAM_MAPPER_CONTROL1;\
+ uint32_t DP_STREAM_MAPPER_CONTROL2;\
+ uint32_t DP_STREAM_MAPPER_CONTROL3;\
+ uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\
+ uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\
+ uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\
+ uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\
+ uint32_t DP_SYM32_ENC_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\
+ uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_MSA0;\
+ uint32_t DP_SYM32_ENC_VID_MSA1;\
+ uint32_t DP_SYM32_ENC_VID_MSA2;\
+ uint32_t DP_SYM32_ENC_VID_MSA3;\
+ uint32_t DP_SYM32_ENC_VID_MSA4;\
+ uint32_t DP_SYM32_ENC_VID_MSA5;\
+ uint32_t DP_SYM32_ENC_VID_MSA6;\
+ uint32_t DP_SYM32_ENC_VID_MSA7;\
+ uint32_t DP_SYM32_ENC_VID_MSA8;\
+ uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\
+ uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\
+ uint32_t DP_SYM32_ENC_VID_CRC_CONTROL
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\
+ SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
+ type DP_STREAM_LINK_TARGET;\
+ type DP_STREAM_ENC_CLOCK_EN;\
+ type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\
+ type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\
+ type FIFO_RESET;\
+ type FIFO_RESET_DONE;\
+ type FIFO_ENABLE;\
+ type DP_SYM32_ENC_RESET;\
+ type DP_SYM32_ENC_RESET_DONE;\
+ type DP_SYM32_ENC_ENABLE;\
+ type PIXEL_ENCODING_TYPE;\
+ type UNCOMPRESSED_PIXEL_ENCODING;\
+ type UNCOMPRESSED_COMPONENT_DEPTH;\
+ type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\
+ type MSA_DOUBLE_BUFFER_ENABLE;\
+ type MSA_DATA_LANE_0;\
+ type MSA_DATA_LANE_1;\
+ type MSA_DATA_LANE_2;\
+ type MSA_DATA_LANE_3;\
+ type PIXEL_TO_SYMBOL_FIFO_RESET;\
+ type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\
+ type PIXEL_TO_SYMBOL_FIFO_ENABLE;\
+ type VID_STREAM_ENABLE;\
+ type VID_STREAM_STATUS;\
+ type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\
+ type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\
+ type SDP_STREAM_ENABLE;\
+ type AUDIO_MUTE;\
+ type ASP_ENABLE;\
+ type ATP_ENABLE;\
+ type AIP_ENABLE;\
+ type ACM_ENABLE;\
+ type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\
+ type GSP_PAYLOAD_SIZE;\
+ type GSP_TRANSMISSION_LINE_NUMBER;\
+ type GSP_SOF_REFERENCE;\
+ type METADATA_PACKET_ENABLE;\
+ type CRC_ENABLE;\
+ type CRC_CONT_MODE_ENABLE
+
+
+struct dcn31_hpo_dp_stream_encoder_registers {
+ DCN3_1_HPO_DP_STREAM_ENC_REGS;
+};
+
+struct dcn31_hpo_dp_stream_encoder_shift {
+ DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_hpo_dp_stream_encoder_mask {
+ DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_hpo_dp_stream_encoder {
+ struct hpo_dp_stream_encoder base;
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs;
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift;
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask;
+};
+
+
+void dcn31_hpo_dp_stream_encoder_construct(
+ struct dcn31_hpo_dp_stream_encoder *enc3,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ uint32_t inst,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct apg *apg,
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs,
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift,
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask);
+
+
+#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 3f2333ec67e2..d24ad7754d71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -48,6 +48,9 @@
#include "dc_link_dp.h"
#include "inc/link_dpcd.h"
#include "dcn10/dcn10_hw_sequencer.h"
+#include "inc/link_enc_cfg.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dce/dce_i2c_hw.h"
#define DC_LOGGER_INIT(logger)
@@ -71,15 +74,10 @@ void dcn31_init_hw(struct dc *dc)
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
int i, j;
- int edp_num;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- // Initialize the dccg
- if (res_pool->dccg->funcs->dccg_init)
- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
-
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
REG_WRITE(REFCLK_CNTL, 0);
@@ -106,6 +104,9 @@ void dcn31_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
hws->funcs.disable_vga(dc->hwseq);
}
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (dc->debug.enable_mem_low_power.bits.dmcu) {
// Force ERAM to shutdown if DMCU is not enabled
@@ -125,6 +126,18 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
+ // Power down VPGs
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+ for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
+ dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
+#endif
+ }
+#endif
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -157,6 +170,9 @@ void dcn31_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -170,6 +186,10 @@ void dcn31_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* Enables outbox notifications for usb4 dpia */
+ if (dc->res_pool->usb4_dpia_count)
+ dmub_enable_outbox_notification(dc);
+
/* we want to turn off all dp displays before doing detection */
if (dc->config.power_down_display_on_boot) {
uint8_t dpcd_power_state = '\0';
@@ -184,7 +204,8 @@ void dcn31_init_hw(struct dc *dc)
&dpcd_power_state, sizeof(dpcd_power_state));
if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
/* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY &&
+ dc->links[i]->link_enc->funcs->get_dig_frontend) {
unsigned int fe;
fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
@@ -194,7 +215,7 @@ void dcn31_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
@@ -218,47 +239,6 @@ void dcn31_init_hw(struct dc *dc)
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- /* In headless boot cases, DIG may be turned
- * on which causes HW/SW discrepancies.
- * To avoid this, power down hardware on boot
- * if DIG is turned on and seamless boot not enabled
- */
- if (dc->config.power_down_display_on_boot) {
- struct dc_link *edp_links[MAX_NUM_EDP];
- struct dc_link *edp_link;
- bool power_down = false;
-
- get_edp_links(dc, edp_links, &edp_num);
- if (edp_num) {
- for (i = 0; i < edp_num; i++) {
- edp_link = edp_links[i];
- if (edp_link->link_enc->funcs->is_dig_enabled &&
- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
- dc->hwss.edp_power_control) {
- dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
- dc->hwss.edp_power_control(edp_link, false);
- power_down = true;
- }
- }
- }
- if (!power_down) {
- for (i = 0; i < dc->link_count; i++) {
- struct dc_link *link = dc->links[i];
-
- if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
- break;
- }
-
- }
- }
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -280,6 +260,10 @@ void dcn31_init_hw(struct dc *dc)
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+ // Set i2c to light sleep until engine is setup
+ if (dc->debug.enable_mem_low_power.bits.i2c)
+ REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
+
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
@@ -303,8 +287,10 @@ void dcn31_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
+#endif
}
void dcn31_dsc_pg_control(
@@ -319,6 +305,12 @@ void dcn31_dsc_pg_control(
if (hws->ctx->dc->debug.disable_dsc_power_gate)
return;
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc &&
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc &&
+ power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
@@ -355,6 +347,13 @@ void dcn31_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) {
+ if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->disable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+ }
+
}
@@ -420,7 +419,7 @@ void dcn31_z10_save_init(struct dc *dc)
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
}
-void dcn31_z10_restore(struct dc *dc)
+void dcn31_z10_restore(const struct dc *dc)
{
union dmub_rb_cmd cmd;
@@ -594,19 +593,7 @@ void dcn31_reset_hw_ctx_wrap(
old_clk->funcs->cs_power_down(old_clk);
}
}
-}
-
-bool dcn31_is_abm_supported(struct dc *dc,
- struct dc_state *context, struct dc_stream_state *stream)
-{
- int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- (pipe_ctx->prev_odm_pipe == NULL && pipe_ctx->next_odm_pipe == NULL))
- return true;
- }
- return false;
+ /* New dc_state in the process of being applied to hardware. */
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index 140435e4f7ff..7ae45dd202d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -43,7 +43,7 @@ void dcn31_enable_power_gating_plane(
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
-void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_restore(const struct dc *dc);
void dcn31_z10_save_init(struct dc *dc);
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 40011cd3c8ef..c6a737781ad1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -34,6 +34,7 @@
static const struct hw_sequencer_funcs dcn31_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn31_init_hw,
+ .power_down_on_boot = dcn10_power_down_on_boot,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
@@ -93,12 +94,12 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
- .is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 0006bbac466c..87b2c2428842 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -52,7 +52,12 @@
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -96,8 +101,6 @@
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
#define DCN3_1_DEFAULT_DET_SIZE 384
@@ -217,8 +220,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 402.0,
- .sr_enter_plus_exit_z8_time_us = 520.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
@@ -363,7 +366,7 @@ static const struct dce110_clk_src_mask cs_mask = {
#define abm_regs(id)\
[id] = {\
- ABM_DCN301_REG_LIST(id)\
+ ABM_DCN302_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
@@ -411,10 +414,10 @@ static const struct dce_audio_mask audio_mask = {
#define vpg_regs(id)\
[id] = {\
- VPG_DCN3_REG_LIST(id)\
+ VPG_DCN31_REG_LIST(id)\
}
-static const struct dcn30_vpg_registers vpg_regs[] = {
+static const struct dcn31_vpg_registers vpg_regs[] = {
vpg_regs(0),
vpg_regs(1),
vpg_regs(2),
@@ -427,20 +430,20 @@ static const struct dcn30_vpg_registers vpg_regs[] = {
vpg_regs(9),
};
-static const struct dcn30_vpg_shift vpg_shift = {
- DCN3_VPG_MASK_SH_LIST(__SHIFT)
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
};
-static const struct dcn30_vpg_mask vpg_mask = {
- DCN3_VPG_MASK_SH_LIST(_MASK)
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
};
#define afmt_regs(id)\
[id] = {\
- AFMT_DCN3_REG_LIST(id)\
+ AFMT_DCN31_REG_LIST(id)\
}
-static const struct dcn30_afmt_registers afmt_regs[] = {
+static const struct dcn31_afmt_registers afmt_regs[] = {
afmt_regs(0),
afmt_regs(1),
afmt_regs(2),
@@ -449,12 +452,32 @@ static const struct dcn30_afmt_registers afmt_regs[] = {
afmt_regs(5)
};
-static const struct dcn30_afmt_shift afmt_shift = {
- DCN3_AFMT_MASK_SH_LIST(__SHIFT)
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
};
-static const struct dcn30_afmt_mask afmt_mask = {
- DCN3_AFMT_MASK_SH_LIST(_MASK)
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs(id)\
+[id] = {\
+ APG_DCN31_REG_LIST(id)\
+}
+
+static const struct dcn31_apg_registers apg_regs[] = {
+ apg_regs(0),
+ apg_regs(1),
+ apg_regs(2),
+ apg_regs(3)
+};
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
@@ -538,6 +561,49 @@ static const struct dcn10_link_enc_mask le_mask = {
DPCS_DCN31_MASK_SH_LIST(_MASK)
};
+#define hpo_dp_stream_encoder_reg_list(id)\
+[id] = {\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
+}
+
+static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
+ hpo_dp_stream_encoder_reg_list(0),
+ hpo_dp_stream_encoder_reg_list(1),
+ hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3),
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_list(id)\
+[id] = {\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
+ DCN3_1_RDPCSTX_REG_LIST(0),\
+ DCN3_1_RDPCSTX_REG_LIST(1),\
+ DCN3_1_RDPCSTX_REG_LIST(2),\
+ DCN3_1_RDPCSTX_REG_LIST(3),\
+ DCN3_1_RDPCSTX_REG_LIST(4)\
+}
+
+static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
+ hpo_dp_link_encoder_reg_list(0),
+ hpo_dp_link_encoder_reg_list(1),
+};
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
+};
+
#define dpp_regs(id)\
[id] = {\
DPP_REG_LIST_DCN30(id),\
@@ -831,7 +897,8 @@ static const struct dce_hwseq_registers hwseq_reg = {
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
- HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh)
+ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
@@ -879,6 +946,8 @@ static const struct resource_caps res_cap_dcn31 = {
.num_audio = 5,
.num_stream_encoder = 5,
.num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
@@ -928,7 +997,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 3840,/*upto 4K*/
+ .max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
@@ -939,13 +1008,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.use_max_lb = true,
.enable_mem_low_power = {
.bits = {
- .vga = false,
- .i2c = false,
+ .vga = true,
+ .i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
- .dscl = false,
- .cm = false,
- .mpc = false,
- .optc = false,
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
}
},
.optimize_edp_link_rate = true,
@@ -1230,34 +1301,53 @@ static struct vpg *dcn31_vpg_create(
struct dc_context *ctx,
uint32_t inst)
{
- struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL);
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
- if (!vpg3)
+ if (!vpg31)
return NULL;
- vpg3_construct(vpg3, ctx, inst,
+ vpg31_construct(vpg31, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
&vpg_mask);
- return &vpg3->base;
+ return &vpg31->base;
}
static struct afmt *dcn31_afmt_create(
struct dc_context *ctx,
uint32_t inst)
{
- struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL);
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
- if (!afmt3)
+ if (!afmt31)
return NULL;
- afmt3_construct(afmt3, ctx, inst,
+ afmt31_construct(afmt31, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
&afmt_mask);
- return &afmt3->base;
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
}
static struct stream_encoder *dcn31_stream_encoder_create(
@@ -1281,8 +1371,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
@@ -1298,6 +1392,72 @@ static struct stream_encoder *dcn31_stream_encoder_create(
return &enc1->base;
}
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
static struct dce_hwseq *dcn31_hwseq_create(
struct dc_context *ctx)
{
@@ -1308,6 +1468,13 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ /* DCN3.1 FPGA Workaround
+ * Need to enable HPO DP Stream Encoder before setting OTG master enable.
+ * To do so, move calling function enable_stream_timing to only be done AFTER calling
+ * function core_link_enable_stream
+ */
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ hws->wa.dp_hpo_and_otg_sequence = true;
}
return hws;
}
@@ -1315,6 +1482,8 @@ static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn31_create_audio,
.create_stream_encoder = dcn31_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn31_hwseq_create,
};
@@ -1322,6 +1491,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.read_dce_straps = NULL,
.create_audio = NULL,
.create_stream_encoder = NULL,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn31_hwseq_create,
};
@@ -1344,6 +1515,28 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool)
}
}
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
@@ -1590,6 +1783,13 @@ static int dcn31_populate_dml_pipes_from_context(
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
@@ -1632,7 +1832,7 @@ static int dcn31_populate_dml_pipes_from_context(
return pipe_cnt;
}
-static void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
@@ -1653,6 +1853,15 @@ static void dcn31_calculate_wm_and_dlg_fp(
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+ /* We don't recalculate clocks for 0 pipe configs, which can block
+ * S0i3 as high clocks will block low power states
+ * Override any clocks that can block S0i3 to min here
+ */
+ if (pipe_cnt == 0) {
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
+ return;
+ }
+
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
@@ -1767,7 +1976,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
}
-static void dcn31_calculate_wm_and_dlg(
+void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
@@ -1778,6 +1987,58 @@ static void dcn31_calculate_wm_and_dlg(
DC_FP_END();
}
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+
+ // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ if (pipe_cnt == 0)
+ fast_validate = false;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1860,7 +2121,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
- .validate_bandwidth = dcn30_validate_bandwidth,
+ .validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn31_populate_dml_pipes_from_context,
@@ -1935,6 +2196,7 @@ static bool dcn31_resource_construct(
dc->caps.max_slave_rgb_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.dp_hpo = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
@@ -2173,6 +2435,13 @@ static bool dcn31_resource_construct(
pool->base.sw_i2cs[i] = NULL;
}
+ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia) {
+ /* YELLOW CARP B0 has 4 DPIA's */
+ pool->base.usb4_dpia_count = 4;
+ }
+
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
@@ -2189,6 +2458,8 @@ static bool dcn31_resource_construct(
dc->cap_funcs = cap_funcs;
+ dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
+
DC_FP_END();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 93571c976996..416fe7a721d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -35,6 +35,16 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+void dcn31_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
new file mode 100644
index 000000000000..f1deb1c3c363
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn31_vpg.h"
+#include "reg_helper.h"
+#include "dc/dc.h"
+
+#define DC_LOGGER \
+ vpg31->base.ctx->logger
+
+#define REG(reg)\
+ (vpg31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ vpg31->vpg_shift->field_name, vpg31->vpg_mask->field_name
+
+
+#define CTX \
+ vpg31->base.ctx
+
+static struct vpg_funcs dcn31_vpg_funcs = {
+ .update_generic_info_packet = vpg3_update_generic_info_packet,
+ .vpg_poweron = vpg31_poweron,
+ .vpg_powerdown = vpg31_powerdown,
+};
+
+void vpg31_powerdown(struct vpg *vpg)
+{
+ struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ return;
+
+ REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 0, VPG_GSP_LIGHT_SLEEP_FORCE, 1);
+}
+
+void vpg31_poweron(struct vpg *vpg)
+{
+ struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ return;
+
+ REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0);
+}
+
+void vpg31_construct(struct dcn31_vpg *vpg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_vpg_registers *vpg_regs,
+ const struct dcn31_vpg_shift *vpg_shift,
+ const struct dcn31_vpg_mask *vpg_mask)
+{
+ vpg31->base.ctx = ctx;
+
+ vpg31->base.inst = inst;
+ vpg31->base.funcs = &dcn31_vpg_funcs;
+
+ vpg31->regs = vpg_regs;
+ vpg31->vpg_shift = vpg_shift;
+ vpg31->vpg_mask = vpg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
new file mode 100644
index 000000000000..0e76eabce441
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_VPG_H__
+#define __DAL_DCN31_VPG_H__
+
+
+#define DCN31_VPG_FROM_VPG(vpg)\
+ container_of(vpg, struct dcn31_vpg, base)
+
+#define VPG_DCN31_REG_LIST(id) \
+ SRI(VPG_GENERIC_STATUS, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
+ SRI(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \
+ SRI(VPG_MEM_PWR, VPG, id)
+
+struct dcn31_vpg_registers {
+ uint32_t VPG_GENERIC_STATUS;
+ uint32_t VPG_GENERIC_PACKET_ACCESS_CTRL;
+ uint32_t VPG_GENERIC_PACKET_DATA;
+ uint32_t VPG_GSP_FRAME_UPDATE_CTRL;
+ uint32_t VPG_GSP_IMMEDIATE_UPDATE_CTRL;
+ uint32_t VPG_MEM_PWR;
+};
+
+#define DCN31_VPG_MASK_SH_LIST(mask_sh)\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL, VPG_GENERIC_DATA_INDEX, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE0, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE1, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE2, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE3, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC8_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC9_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC10_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC11_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC12_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC13_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC14_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC8_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC9_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC10_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC11_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC12_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC13_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC14_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_LIGHT_SLEEP_FORCE, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, mask_sh)
+
+#define VPG_DCN31_REG_FIELD_LIST(type) \
+ type VPG_GENERIC_CONFLICT_OCCURED;\
+ type VPG_GENERIC_CONFLICT_CLR;\
+ type VPG_GENERIC_DATA_INDEX;\
+ type VPG_GENERIC_DATA_BYTE0;\
+ type VPG_GENERIC_DATA_BYTE1;\
+ type VPG_GENERIC_DATA_BYTE2;\
+ type VPG_GENERIC_DATA_BYTE3;\
+ type VPG_GENERIC0_FRAME_UPDATE;\
+ type VPG_GENERIC1_FRAME_UPDATE;\
+ type VPG_GENERIC2_FRAME_UPDATE;\
+ type VPG_GENERIC3_FRAME_UPDATE;\
+ type VPG_GENERIC4_FRAME_UPDATE;\
+ type VPG_GENERIC5_FRAME_UPDATE;\
+ type VPG_GENERIC6_FRAME_UPDATE;\
+ type VPG_GENERIC7_FRAME_UPDATE;\
+ type VPG_GENERIC8_FRAME_UPDATE;\
+ type VPG_GENERIC9_FRAME_UPDATE;\
+ type VPG_GENERIC10_FRAME_UPDATE;\
+ type VPG_GENERIC11_FRAME_UPDATE;\
+ type VPG_GENERIC12_FRAME_UPDATE;\
+ type VPG_GENERIC13_FRAME_UPDATE;\
+ type VPG_GENERIC14_FRAME_UPDATE;\
+ type VPG_GENERIC0_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC1_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC2_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC3_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC4_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC5_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC6_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC7_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC8_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC9_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC10_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC11_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC12_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC13_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC14_IMMEDIATE_UPDATE;\
+ type VPG_GSP_MEM_LIGHT_SLEEP_DIS;\
+ type VPG_GSP_LIGHT_SLEEP_FORCE;\
+ type VPG_GSP_MEM_PWR_STATE
+
+struct dcn31_vpg_shift {
+ VPG_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_vpg_mask {
+ VPG_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_vpg {
+ struct vpg base;
+ const struct dcn31_vpg_registers *regs;
+ const struct dcn31_vpg_shift *vpg_shift;
+ const struct dcn31_vpg_mask *vpg_mask;
+};
+
+void vpg31_poweron(
+ struct vpg *vpg);
+
+void vpg31_powerdown(
+ struct vpg *vpg);
+
+void vpg31_construct(struct dcn31_vpg *vpg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_vpg_registers *vpg_regs,
+ const struct dcn31_vpg_shift *vpg_shift,
+ const struct dcn31_vpg_mask *vpg_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index a9170b9f84d3..511f9e1159c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -35,8 +35,11 @@ struct cp_psp_stream_config {
uint8_t link_enc_idx;
uint8_t stream_enc_idx;
uint8_t phy_idx;
+ uint8_t dio_output_idx;
+ uint8_t dio_output_type;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t dp2_enabled;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 9ab854293ace..0fe66b080a03 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -160,6 +160,12 @@ void dm_set_dcn_clocks(
struct dc_context *ctx,
struct dc_clocks *clks);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable);
+#endif
+
+void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz);
+
bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable);
void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us);
@@ -173,4 +179,9 @@ int dm_helper_dmub_aux_transfer_sync(
const struct dc_link *link,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
+enum set_config_status;
+int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
+ const struct dc_link *link,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result);
#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 56055df2e8d2..eee6672bd32d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -58,7 +58,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
ifdef CONFIG_DRM_AMD_DC_DCN
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)
@@ -83,7 +85,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
@@ -93,12 +97,14 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
ifdef CONFIG_DRM_AMD_DC_DCN
+DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
-DML += dcn2x/dcn2x.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn301/dcn301_fpu.o
+DML += dsc/rc_calc_fpu.o
endif
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index c58522436291..d590dc917363 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -26,7 +26,7 @@
#include "resource.h"
-#include "dcn2x.h"
+#include "dcn20_fpu.h"
/**
* DOC: DCN2x FPU manipulation Overview
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index 331547ba0713..36f26126d574 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -24,11 +24,11 @@
*
*/
-#ifndef __DCN2X_H__
-#define __DCN2X_H__
+#ifndef __DCN20_FPU_H__
+#define __DCN20_FPU_H__
void dcn20_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes);
-#endif /* __DCN2X_H__ */
+#endif /* __DCN20_FPU_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 2091dd8c252d..246071c72f6b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -37,8 +37,8 @@
//
static void dml20_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
- display_rq_params_st * rq_param,
- const display_pipe_source_params_st pipe_src_param);
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st *pipe_src_param);
// Function: dml20_rq_dlg_get_dlg_params
// Calculate deadline related parameters
@@ -49,8 +49,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en);
/*
@@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
@@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_source_params_st pipe_src_param,
+ const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = false;
@@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
+ vp_width = pipe_src_param->viewport_width_c / ppe;
+ vp_height = pipe_src_param->viewport_height_c;
+ data_pitch = pipe_src_param->data_pitch_c;
+ meta_pitch = pipe_src_param->meta_pitch_c;
} else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
+ vp_width = pipe_src_param->viewport_width / ppe;
+ vp_height = pipe_src_param->viewport_height;
+ data_pitch = pipe_src_param->data_pitch;
+ meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
@@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
+ pipe_src_param->source_format,
+ pipe_src_param->sw_mode,
+ pipe_src_param->macro_tile_size,
+ pipe_src_param->source_scan,
is_chroma);
}
static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
+ || pipe_src_param->source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@@ -751,7 +751,7 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
pipe_src_param,
0);
- if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@@ -763,20 +763,20 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
- dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
@@ -787,8 +787,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
@@ -935,7 +935,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
@@ -995,20 +995,20 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// vinit_bot_l = scl.vinit_bot;
// vinit_bot_c = scl.vinit_bot_c;
-// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
-// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
-// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
-// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
-// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1137,16 +1137,16 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1542,14 +1542,14 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1579,20 +1579,20 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src);
dml20_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
index d0b90947f540..8b23867e97c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
@@ -43,7 +43,7 @@ struct display_mode_lib;
void dml20_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
@@ -61,7 +61,7 @@ void dml20_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 1a0c14e465fa..015e7f2c0b16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -38,7 +38,7 @@
static void dml20v2_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st * rq_param,
- const display_pipe_source_params_st pipe_src_param);
+ const display_pipe_source_params_st *pipe_src_param);
// Function: dml20v2_rq_dlg_get_dlg_params
// Calculate deadline related parameters
@@ -49,8 +49,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en);
/*
@@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
@@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_source_params_st pipe_src_param,
+ const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = false;
@@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
+ vp_width = pipe_src_param->viewport_width_c / ppe;
+ vp_height = pipe_src_param->viewport_height_c;
+ data_pitch = pipe_src_param->data_pitch_c;
+ meta_pitch = pipe_src_param->meta_pitch_c;
} else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
+ vp_width = pipe_src_param->viewport_width / ppe;
+ vp_height = pipe_src_param->viewport_height;
+ data_pitch = pipe_src_param->data_pitch;
+ meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
@@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
+ pipe_src_param->source_format,
+ pipe_src_param->sw_mode,
+ pipe_src_param->macro_tile_size,
+ pipe_src_param->source_scan,
is_chroma);
}
static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
+ || pipe_src_param->source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@@ -751,7 +751,7 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
pipe_src_param,
0);
- if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@@ -763,20 +763,20 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
- dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
@@ -787,8 +787,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
@@ -935,7 +935,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
@@ -996,20 +996,20 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// vinit_bot_l = scl.vinit_bot;
// vinit_bot_c = scl.vinit_bot_c;
-// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
-// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
-// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
-// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
-// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1138,16 +1138,16 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1543,14 +1543,14 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1580,20 +1580,20 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src);
dml20v2_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
index 27cf8bed9376..2b4e46ea1c3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -43,7 +43,7 @@ struct display_mode_lib;
void dml20v2_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
@@ -61,7 +61,7 @@ void dml20v2_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 4136eb8256cb..8a7485e21d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3394,6 +3394,127 @@ static unsigned int TruncToValidBPP(
}
}
+
+static noinline void CalculatePrefetchSchedulePerPlane(
+ struct display_mode_lib *mode_lib,
+ int i,
+ unsigned j,
+ unsigned k)
+{
+ struct vba_vars_st *locals = &mode_lib->vba;
+ Pipe myPipe;
+ HostVM myHostVM;
+
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ locals->SwathWidthYThisState[k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+
+ myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
+ myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
+ myPipe.PixelClock = mode_lib->vba.PixelClock[k];
+ myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
+ myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
+ myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
+ myPipe.SourceScan = mode_lib->vba.SourceScan[k];
+ myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k];
+ myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k];
+ myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k];
+ myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k];
+ myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
+ myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
+ myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
+ myPipe.HTotal = mode_lib->vba.HTotal[k];
+
+
+ myHostVM.Enable = mode_lib->vba.HostVMEnable;
+ myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
+ myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
+
+
+ mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ &myPipe,
+ locals->DSCDelayPerState[i][k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
+ locals->MaximumVStartup[0][0][k],
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ &myHostVM,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
+ locals->PrefetchLinesY[0][0][k],
+ locals->SwathWidthYThisState[k],
+ locals->BytePerPixelInDETY[k],
+ locals->PrefillY[k],
+ locals->MaxNumSwY[k],
+ locals->PrefetchLinesC[0][0][k],
+ locals->BytePerPixelInDETC[k],
+ locals->PrefillC[k],
+ locals->MaxNumSwC[k],
+ locals->SwathHeightYThisState[k],
+ locals->SwathHeightCThisState[k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &locals->dst_x_after_scaler,
+ &locals->dst_y_after_scaler,
+ &locals->LineTimesForPrefetch[k],
+ &locals->PrefetchBW[k],
+ &locals->LinesForMetaPTE[k],
+ &locals->LinesForMetaAndDPTERow[k],
+ &locals->VRatioPreY[i][j][k],
+ &locals->VRatioPreC[i][j][k],
+ &locals->RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &locals->RequiredPrefetchPixelDataBWChroma[i][j][k],
+ &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &locals->Tno_bw[k],
+ &locals->prefetch_vmrow_bw[k],
+ locals->swath_width_luma_ub,
+ locals->swath_width_chroma_ub,
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+}
void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *locals = &mode_lib->vba;
@@ -4676,120 +4797,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- Pipe myPipe;
- HostVM myHostVM;
-
- if (mode_lib->vba.XFCEnabled[k] == true) {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay =
- CalculateRemoteSurfaceFlipDelay(
- mode_lib,
- mode_lib->vba.VRatio[k],
- locals->SwathWidthYThisState[k],
- dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
- mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
- mode_lib->vba.XFCTSlvVupdateOffset,
- mode_lib->vba.XFCTSlvVupdateWidth,
- mode_lib->vba.XFCTSlvVreadyOffset,
- mode_lib->vba.XFCXBUFLatencyTolerance,
- mode_lib->vba.XFCFillBWOverhead,
- mode_lib->vba.XFCSlvChunkSize,
- mode_lib->vba.XFCBusTransportTime,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.TWait,
- &mode_lib->vba.SrcActiveDrainRate,
- &mode_lib->vba.TInitXFill,
- &mode_lib->vba.TslvChk);
- } else {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
- }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
+ CalculatePrefetchSchedulePerPlane(mode_lib, i, j, k);
- myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
- myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
- myPipe.PixelClock = mode_lib->vba.PixelClock[k];
- myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
- myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
- myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
- myPipe.SourceScan = mode_lib->vba.SourceScan[k];
- myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k];
- myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k];
- myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k];
- myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k];
- myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
- myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
- myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
- myPipe.HTotal = mode_lib->vba.HTotal[k];
-
-
- myHostVM.Enable = mode_lib->vba.HostVMEnable;
- myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
- myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
-
-
- mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule(
- mode_lib,
- mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- &myPipe,
- locals->DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
- locals->MaximumVStartup[0][0][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- &myHostVM,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
- locals->MetaRowBytes[0][0][k],
- locals->DPTEBytesPerRow[0][0][k],
- locals->PrefetchLinesY[0][0][k],
- locals->SwathWidthYThisState[k],
- locals->BytePerPixelInDETY[k],
- locals->PrefillY[k],
- locals->MaxNumSwY[k],
- locals->PrefetchLinesC[0][0][k],
- locals->BytePerPixelInDETC[k],
- locals->PrefillC[k],
- locals->MaxNumSwC[k],
- locals->SwathHeightYThisState[k],
- locals->SwathHeightCThisState[k],
- mode_lib->vba.TWait,
- mode_lib->vba.XFCEnabled[k],
- mode_lib->vba.XFCRemoteSurfaceFlipDelay,
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- &locals->dst_x_after_scaler,
- &locals->dst_y_after_scaler,
- &locals->LineTimesForPrefetch[k],
- &locals->PrefetchBW[k],
- &locals->LinesForMetaPTE[k],
- &locals->LinesForMetaAndDPTERow[k],
- &locals->VRatioPreY[i][j][k],
- &locals->VRatioPreC[i][j][k],
- &locals->RequiredPrefetchPixelDataBWLuma[i][j][k],
- &locals->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
- &locals->Tno_bw[k],
- &locals->prefetch_vmrow_bw[k],
- locals->swath_width_luma_ub,
- locals->swath_width_chroma_ub,
- &mode_lib->vba.VUpdateOffsetPix[k],
- &mode_lib->vba.VUpdateWidthPix[k],
- &mode_lib->vba.VReadyOffsetPix[k]);
- }
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 287e31052b30..46c433c0bcb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -141,55 +141,55 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(
struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(
- dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(
- dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -198,9 +198,9 @@ static void extract_rq_regs(
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple(
@@ -215,7 +215,7 @@ static void extract_rq_regs(
static void handle_det_buf_split(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -224,8 +224,8 @@ static void handle_det_buf_split(
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -694,7 +694,7 @@ static void get_surf_rq_param(
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_params_st pipe_param,
+ const display_pipe_params_st *pipe_param,
bool is_chroma)
{
bool mode_422 = false;
@@ -706,30 +706,30 @@ static void get_surf_rq_param(
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
- vp_width = pipe_param.src.viewport_width_c / ppe;
- vp_height = pipe_param.src.viewport_height_c;
- data_pitch = pipe_param.src.data_pitch_c;
- meta_pitch = pipe_param.src.meta_pitch_c;
+ vp_width = pipe_param->src.viewport_width_c / ppe;
+ vp_height = pipe_param->src.viewport_height_c;
+ data_pitch = pipe_param->src.data_pitch_c;
+ meta_pitch = pipe_param->src.meta_pitch_c;
} else {
- vp_width = pipe_param.src.viewport_width / ppe;
- vp_height = pipe_param.src.viewport_height;
- data_pitch = pipe_param.src.data_pitch;
- meta_pitch = pipe_param.src.meta_pitch;
+ vp_width = pipe_param->src.viewport_width / ppe;
+ vp_height = pipe_param->src.viewport_height;
+ data_pitch = pipe_param->src.data_pitch;
+ meta_pitch = pipe_param->src.meta_pitch;
}
- if (pipe_param.dest.odm_combine) {
+ if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_half;
unsigned int src_hactive_half;
- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
- hactive_half = pipe_param.dest.hactive / 2;
+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_half = pipe_param->dest.hactive / 2;
if (is_chroma) {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
- src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
+ src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half;
} else {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
- src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
+ src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half;
}
if (access_dir == 0) {
@@ -754,7 +754,7 @@ static void get_surf_rq_param(
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
- if (pipe_param.src.hostvm)
+ if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@@ -768,23 +768,23 @@ static void get_surf_rq_param(
vp_height,
data_pitch,
meta_pitch,
- pipe_param.src.source_format,
- pipe_param.src.sw_mode,
- pipe_param.src.macro_tile_size,
- pipe_param.src.source_scan,
- pipe_param.src.hostvm,
+ pipe_param->src.source_format,
+ pipe_param->src.sw_mode,
+ pipe_param->src.macro_tile_size,
+ pipe_param->src.source_scan,
+ pipe_param->src.hostvm,
is_chroma);
}
static void dml_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
- || pipe_param.src.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
+ || pipe_param->src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
@@ -794,7 +794,7 @@ static void dml_rq_dlg_get_rq_params(
pipe_param,
0);
- if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) {
+ if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(
mode_lib,
@@ -806,22 +806,22 @@ static void dml_rq_dlg_get_rq_params(
}
// calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
- print__rq_params_st(mode_lib, *rq_param);
+ handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
@@ -833,8 +833,8 @@ static void dml_rq_dlg_get_dlg_params(
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
@@ -981,7 +981,7 @@ static void dml_rq_dlg_get_dlg_params(
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
@@ -1042,13 +1042,13 @@ static void dml_rq_dlg_get_dlg_params(
scl_enable = scl->scl_enable;
line_time_in_us = (htotal / pclk_freq_in_mhz);
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1189,16 +1189,16 @@ static void dml_rq_dlg_get_dlg_params(
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1650,15 +1650,15 @@ static void dml_rq_dlg_get_dlg_params(
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1691,12 +1691,12 @@ void dml21_rq_dlg_get_dlg_reg(
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
@@ -1704,8 +1704,8 @@ void dml21_rq_dlg_get_dlg_reg(
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
index e8f7785e3fc6..af6ad0ca9cf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
@@ -44,7 +44,7 @@ struct display_mode_lib;
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@@ -61,7 +61,7 @@ void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 0d934fae1c3a..aef854270054 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -89,52 +89,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -143,9 +143,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double)rq_param.misc.rq_l.stored_swath_bytes
- / (double)rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double)rq_param->misc.rq_l.stored_swath_bytes
+ / (double)rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0),
@@ -158,7 +158,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -167,8 +167,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -747,7 +747,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_params_st pipe_param,
+ const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
@@ -761,32 +761,32 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
- vp_width = pipe_param.src.viewport_width_c / ppe;
- vp_height = pipe_param.src.viewport_height_c;
- data_pitch = pipe_param.src.data_pitch_c;
- meta_pitch = pipe_param.src.meta_pitch_c;
- surface_height = pipe_param.src.surface_height_y / 2.0;
+ vp_width = pipe_param->src.viewport_width_c / ppe;
+ vp_height = pipe_param->src.viewport_height_c;
+ data_pitch = pipe_param->src.data_pitch_c;
+ meta_pitch = pipe_param->src.meta_pitch_c;
+ surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
- vp_width = pipe_param.src.viewport_width / ppe;
- vp_height = pipe_param.src.viewport_height;
- data_pitch = pipe_param.src.data_pitch;
- meta_pitch = pipe_param.src.meta_pitch;
- surface_height = pipe_param.src.surface_height_y;
+ vp_width = pipe_param->src.viewport_width / ppe;
+ vp_height = pipe_param->src.viewport_height;
+ data_pitch = pipe_param->src.data_pitch;
+ meta_pitch = pipe_param->src.meta_pitch;
+ surface_height = pipe_param->src.surface_height_y;
}
- if (pipe_param.dest.odm_combine) {
+ if (pipe_param->dest.odm_combine) {
unsigned int access_dir = 0;
unsigned int full_src_vp_width = 0;
unsigned int hactive_odm = 0;
unsigned int src_hactive_odm = 0;
- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
- hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2);
+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2);
if (is_chroma) {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
@@ -815,7 +815,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
- if (pipe_param.src.hostvm)
+ if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@@ -828,28 +828,28 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
- pipe_param.src.source_format,
- pipe_param.src.sw_mode,
- pipe_param.src.macro_tile_size,
- pipe_param.src.source_scan,
- pipe_param.src.hostvm,
+ pipe_param->src.source_format,
+ pipe_param->src.sw_mode,
+ pipe_param->src.macro_tile_size,
+ pipe_param->src.source_scan,
+ pipe_param->src.hostvm,
is_chroma,
surface_height);
}
static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
- || pipe_param.src.source_format == dm_420_10
- || pipe_param.src.source_format == dm_rgbe_alpha
- || pipe_param.src.source_format == dm_420_12;
+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
+ || pipe_param->src.source_format == dm_420_10
+ || pipe_param->src.source_format == dm_rgbe_alpha
+ || pipe_param->src.source_format == dm_420_12;
- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
- rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0;
+ rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@@ -859,7 +859,7 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
0,
0);
- if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) {
+ if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@@ -871,21 +871,21 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
}
// calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
- print__rq_params_st(mode_lib, *rq_param);
+ handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = { 0 };
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
@@ -1824,14 +1824,14 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1861,12 +1861,12 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
index c04965cceff3..625e41f8d575 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
@@ -41,7 +41,7 @@ struct display_mode_lib;
// See also: <display_rq_regs_st>
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@@ -57,7 +57,7 @@ void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
new file mode 100644
index 000000000000..94c32832a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn301/dcn301_resource.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dcn301_fpu.h"
+
+#define TO_DCN301_RES_POOL(pool)\
+ container_of(pool, struct dcn301_resource_pool, base)
+
+/* Based on: //vidip/dc/dcn3/doc/architecture/DCN3x_Display_Mode.xlsm#83 */
+struct _vcs_dpi_ip_params_st dcn3_01_ip = {
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_max_page_table_levels = 2,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 3,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 32,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 48, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 656640,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 4,
+ .max_num_dpp = 4,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 0,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dram_speed_mts = 2400.0,
+ .fabricclk_mhz = 600,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 400.0,
+ .dscclk_mhz = 206.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 600.0,
+ },
+
+ {
+ .state = 1,
+ .dram_speed_mts = 2400.0,
+ .fabricclk_mhz = 688,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 400.0,
+ .dscclk_mhz = 206.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 600.0,
+ },
+
+ {
+ .state = 2,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 608.0,
+ .dscclk_mhz = 296.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+
+ {
+ .state = 3,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 715.0,
+ .dcfclk_mhz = 676.0,
+ .dscclk_mhz = 338.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+
+ {
+ .state = 4,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 953.0,
+ .dcfclk_mhz = 810.0,
+ .dscclk_mhz = 338.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+ },
+
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 4,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 4,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 23.84,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3550,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .num_states = 5,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+static void calculate_wm_set_for_vlevel(int vlevel,
+ struct wm_range_table_entry *table_entry,
+ struct dcn_watermarks *wm_set,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
+
+ ASSERT(vlevel < dml->soc.num_states);
+ /* only pipe 0 is read for voltage and dcf/soc clocks */
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
+ pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
+
+ dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
+
+ wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
+ dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
+
+}
+
+void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ /* Default clock levels are used for diags, which may lead to overclocking. */
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
+ dcn3_01_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+
+ clock_limits[i].state = i;
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_01_soc.clock_limits[i] = clock_limits[i];
+
+ if (clk_table->num_entries) {
+ dcn3_01_soc.num_states = clk_table->num_entries;
+ /* duplicate last level */
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+ }
+ }
+
+ dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
+}
+
+void dcn301_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
+{
+ dc_assert_fp_enabled();
+
+ ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
+ ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
+}
+
+void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
+{
+ dc_assert_fp_enabled();
+
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+}
+
+void dcn301_calculate_wm_and_dlg(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req)
+{
+ int i, pipe_idx;
+ int vlevel, vlevel_max;
+ struct wm_range_table_entry *table_entry;
+ struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
+
+ ASSERT(bw_params);
+ dc_assert_fp_enabled();
+
+ vlevel_max = bw_params->clk_table.num_entries - 1;
+
+ /* WM Set D */
+ table_entry = &bw_params->wm_table.entries[WM_D];
+ if (table_entry->wm_type == WM_TYPE_RETRAINING)
+ vlevel = 0;
+ else
+ vlevel = vlevel_max;
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set C */
+ table_entry = &bw_params->wm_table.entries[WM_C];
+ vlevel = min(max(vlevel_req, 2), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set B */
+ table_entry = &bw_params->wm_table.entries[WM_B];
+ vlevel = min(max(vlevel_req, 1), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ /* WM Set A */
+ table_entry = &bw_params->wm_table.entries[WM_A];
+ vlevel = min(vlevel_req, vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+ pipe_idx++;
+ }
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
new file mode 100644
index 000000000000..fc7065d17842
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN301_FPU_H__
+#define __DCN301_FPU_H__
+
+void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+void dcn301_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
+
+void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
+
+void dcn301_calculate_wm_and_dlg(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req);
+#endif /* __DCN301_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ce55c9caf9a2..d58925cff420 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -5398,9 +5398,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaximumReadBandwidthWithPrefetch =
v->MaximumReadBandwidthWithPrefetch
- + dml_max4(
- v->VActivePixelBandwidth[i][j][k],
- v->VActiveCursorBandwidth[i][j][k]
+ + dml_max3(
+ v->VActivePixelBandwidth[i][j][k]
+ + v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k]
* (v->meta_row_bandwidth[i][j][k]
+ v->dpte_row_bandwidth[i][j][k]),
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c23905bc733a..e0fecf127bd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -175,47 +175,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
return (4 * 1024);
}
-static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st rq_sizing)
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st *rq_sizing)
{
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
-static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st rq_param)
+static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), 1) - 3;
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), 1) - 3;
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -225,8 +225,8 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s
rq_regs->crq_expansion_mode = 1;
// Note: detile_buf_plane1_addr is in unit of 1KB
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
@@ -244,14 +244,14 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes);
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d\n", __func__, detile_buf_plane1_addr);
dml_print("DML_DLG: %s: plane1_base_address = %0d\n", __func__, rq_regs->plane1_base_address);
- dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_l.stored_swath_bytes);
- dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_c.stored_swath_bytes);
- dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param.dlg.rq_l.swath_height);
- dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param.dlg.rq_c.swath_height);
+ dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes);
+ dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes);
+ dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param->dlg.rq_l.swath_height);
+ dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param->dlg.rq_c.swath_height);
#endif
}
-static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st pipe_src_param)
+static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -260,8 +260,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_p
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -738,7 +738,7 @@ static void get_surf_rq_param(
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_params_st pipe_param,
+ const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
@@ -752,33 +752,33 @@ static void get_surf_rq_param(
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
- vp_width = pipe_param.src.viewport_width_c / ppe;
- vp_height = pipe_param.src.viewport_height_c;
- data_pitch = pipe_param.src.data_pitch_c;
- meta_pitch = pipe_param.src.meta_pitch_c;
- surface_height = pipe_param.src.surface_height_y / 2.0;
+ vp_width = pipe_param->src.viewport_width_c / ppe;
+ vp_height = pipe_param->src.viewport_height_c;
+ data_pitch = pipe_param->src.data_pitch_c;
+ meta_pitch = pipe_param->src.meta_pitch_c;
+ surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
- vp_width = pipe_param.src.viewport_width / ppe;
- vp_height = pipe_param.src.viewport_height;
- data_pitch = pipe_param.src.data_pitch;
- meta_pitch = pipe_param.src.meta_pitch;
- surface_height = pipe_param.src.surface_height_y;
+ vp_width = pipe_param->src.viewport_width / ppe;
+ vp_height = pipe_param->src.viewport_height;
+ data_pitch = pipe_param->src.data_pitch;
+ meta_pitch = pipe_param->src.meta_pitch;
+ surface_height = pipe_param->src.surface_height_y;
}
- if (pipe_param.dest.odm_combine) {
+ if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_odm;
unsigned int src_hactive_odm;
- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
- hactive_odm = pipe_param.dest.hactive / ((unsigned int) pipe_param.dest.odm_combine * 2);
+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2);
if (is_chroma) {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
@@ -808,7 +808,7 @@ static void get_surf_rq_param(
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
- if (pipe_param.src.hostvm)
+ if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@@ -822,46 +822,46 @@ static void get_surf_rq_param(
vp_height,
data_pitch,
meta_pitch,
- pipe_param.src.source_format,
- pipe_param.src.sw_mode,
- pipe_param.src.macro_tile_size,
- pipe_param.src.source_scan,
- pipe_param.src.hostvm,
+ pipe_param->src.source_format,
+ pipe_param->src.sw_mode,
+ pipe_param->src.macro_tile_size,
+ pipe_param->src.source_scan,
+ pipe_param->src.hostvm,
is_chroma,
surface_height);
}
-static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st pipe_param)
+static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 || pipe_param.src.source_format == dm_420_10 || pipe_param.src.source_format == dm_rgbe_alpha
- || pipe_param.src.source_format == dm_420_12;
+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha
+ || pipe_param->src.source_format == dm_420_12;
- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
- rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha) ? 1 : 0;
+ rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0;
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0);
- if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) {
+ if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha);
}
// calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
- print__rq_params_st(mode_lib, *rq_param);
+ handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
+ print__rq_params_st(mode_lib, rq_param);
}
-void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st pipe_param)
+void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(
@@ -943,8 +943,8 @@ static void dml_rq_dlg_get_dlg_params(
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
@@ -1112,13 +1112,13 @@ static void dml_rq_dlg_get_dlg_params(
vratio_c = scl->vscl_ratio_c;
scl_enable = scl->scl_enable;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1239,16 +1239,16 @@ static void dml_rq_dlg_get_dlg_params(
dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1669,15 +1669,15 @@ static void dml_rq_dlg_get_dlg_params(
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml31_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1699,12 +1699,12 @@ void dml31_rq_dlg_get_dlg_reg(
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes);
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
@@ -1712,8 +1712,8 @@ void dml31_rq_dlg_get_dlg_reg(
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en,
vm_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h
index adf8518f761f..8ee991351699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h
@@ -41,7 +41,7 @@ struct display_mode_lib;
// See also: <display_rq_regs_st>
void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@@ -57,7 +57,7 @@ void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
void dml31_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 1051ca1a23b8..edb9f7567d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -80,11 +80,11 @@ enum dm_swizzle_mode {
dm_sw_SPARE_13 = 24,
dm_sw_64kb_s_x = 25,
dm_sw_64kb_d_x = 26,
- dm_sw_SPARE_14 = 27,
+ dm_sw_64kb_r_x = 27,
dm_sw_SPARE_15 = 28,
dm_sw_var_s_x = 29,
dm_sw_var_d_x = 30,
- dm_sw_64kb_r_x,
+ dm_sw_var_r_x = 31,
dm_sw_gfx7_2d_thin_l_vp,
dm_sw_gfx7_2d_thin_gl,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 8a5bd919aec8..30db51fbd8cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -82,6 +82,7 @@ void dml_init_instance(struct display_mode_lib *lib,
lib->project = project;
switch (project) {
case DML_PROJECT_NAVI10:
+ case DML_PROJECT_DCN201:
lib->funcs = dml20_funcs;
break;
case DML_PROJECT_NAVI10v2:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index d42a0aeca6be..6905ef1e75a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -36,6 +36,7 @@ enum dml_project {
DML_PROJECT_RAVEN1,
DML_PROJECT_NAVI10,
DML_PROJECT_NAVI10v2,
+ DML_PROJECT_DCN201,
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
@@ -49,7 +50,7 @@ struct dml_funcs {
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -60,7 +61,7 @@ struct dml_funcs {
void (*rq_dlg_get_rq_reg)(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
void (*recalculate)(struct display_mode_lib *mode_lib);
void (*validate)(struct display_mode_lib *mode_lib);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index e2d82aacd3bc..71ea503cb32f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -26,371 +26,371 @@
#include "display_rq_dlg_helpers.h"
#include "dml_logger.h"
-void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
+void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param)
{
dml_print("DML_RQ_DLG_CALC: ***************************\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l);
+ print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n");
- print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c);
+ print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_c);
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l);
+ print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c);
+ print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_c);
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l);
+ print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c);
+ print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_c);
dml_print("DML_RQ_DLG_CALC: ***************************\n");
}
-void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing)
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes);
- dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes);
- dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing->min_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing->meta_chunk_bytes);
dml_print(
"DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n",
- rq_sizing.min_meta_chunk_bytes);
- dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes);
- dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes);
+ rq_sizing->min_meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing->mpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing->dpte_group_bytes);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param)
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
dml_print(
"DML_RQ_DLG_CALC: swath_width_ub = %0d\n",
- rq_dlg_param.swath_width_ub);
+ rq_dlg_param->swath_width_ub);
dml_print(
"DML_RQ_DLG_CALC: swath_height = %0d\n",
- rq_dlg_param.swath_height);
+ rq_dlg_param->swath_height);
dml_print(
"DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n",
- rq_dlg_param.req_per_swath_ub);
+ rq_dlg_param->req_per_swath_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n",
- rq_dlg_param.meta_pte_bytes_per_frame_ub);
+ rq_dlg_param->meta_pte_bytes_per_frame_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n",
- rq_dlg_param.dpte_req_per_row_ub);
+ rq_dlg_param->dpte_req_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n",
- rq_dlg_param.dpte_groups_per_row_ub);
+ rq_dlg_param->dpte_groups_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_row_height = %0d\n",
- rq_dlg_param.dpte_row_height);
+ rq_dlg_param->dpte_row_height);
dml_print(
"DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n",
- rq_dlg_param.dpte_bytes_per_row_ub);
+ rq_dlg_param->dpte_bytes_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n",
- rq_dlg_param.meta_chunks_per_row_ub);
+ rq_dlg_param->meta_chunks_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n",
- rq_dlg_param.meta_req_per_row_ub);
+ rq_dlg_param->meta_req_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_row_height = %0d\n",
- rq_dlg_param.meta_row_height);
+ rq_dlg_param->meta_row_height);
dml_print(
"DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n",
- rq_dlg_param.meta_bytes_per_row_ub);
+ rq_dlg_param->meta_bytes_per_row_ub);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param)
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
dml_print(
"DML_RQ_DLG_CALC: full_swath_bytes = %0d\n",
- rq_misc_param.full_swath_bytes);
+ rq_misc_param->full_swath_bytes);
dml_print(
"DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n",
- rq_misc_param.stored_swath_bytes);
- dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width);
- dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height);
- dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width);
- dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height);
+ rq_misc_param->stored_swath_bytes);
+ dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param->blk256_width);
+ dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param->blk256_height);
+ dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param->req_width);
+ dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param->req_height);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param)
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l);
+ print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c);
+ print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param)
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us);
- dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us);
- dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us);
- dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us);
+ dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
dml_print(
"DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
- dlg_sys_param.t_srx_delay_us);
+ dlg_sys_param->t_srx_delay_us);
dml_print(
"DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
- dlg_sys_param.deepsleep_dcfclk_mhz);
+ dlg_sys_param->deepsleep_dcfclk_mhz);
dml_print(
"DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n",
- dlg_sys_param.total_flip_bw);
+ dlg_sys_param->total_flip_bw);
dml_print(
"DML_RQ_DLG_CALC: total_flip_bytes = %i\n",
- dlg_sys_param.total_flip_bytes);
+ dlg_sys_param->total_flip_bytes);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs)
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
- dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size);
- dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size);
- dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs->min_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs->meta_chunk_size);
dml_print(
"DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n",
- rq_regs.min_meta_chunk_size);
- dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size);
- dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size);
- dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height);
+ rq_regs->min_meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs->dpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs->mpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs->swath_height);
dml_print(
"DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n",
- rq_regs.pte_row_height_linear);
+ rq_regs->pte_row_height_linear);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs)
+void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l);
+ print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c);
- dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address);
+ print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_c);
+ dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs)
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
dml_print(
"DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n",
- dlg_regs.refcyc_h_blank_end);
+ dlg_regs->refcyc_h_blank_end);
dml_print(
"DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n",
- dlg_regs.dlg_vblank_end);
+ dlg_regs->dlg_vblank_end);
dml_print(
"DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n",
- dlg_regs.min_dst_y_next_start);
+ dlg_regs->min_dst_y_next_start);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n",
- dlg_regs.refcyc_per_htotal);
+ dlg_regs->refcyc_per_htotal);
dml_print(
"DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n",
- dlg_regs.refcyc_x_after_scaler);
+ dlg_regs->refcyc_x_after_scaler);
dml_print(
"DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n",
- dlg_regs.dst_y_after_scaler);
+ dlg_regs->dst_y_after_scaler);
dml_print(
"DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n",
- dlg_regs.dst_y_prefetch);
+ dlg_regs->dst_y_prefetch);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n",
- dlg_regs.dst_y_per_vm_vblank);
+ dlg_regs->dst_y_per_vm_vblank);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n",
- dlg_regs.dst_y_per_row_vblank);
+ dlg_regs->dst_y_per_row_vblank);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n",
- dlg_regs.dst_y_per_vm_flip);
+ dlg_regs->dst_y_per_vm_flip);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n",
- dlg_regs.dst_y_per_row_flip);
+ dlg_regs->dst_y_per_row_flip);
dml_print(
"DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n",
- dlg_regs.ref_freq_to_pix_freq);
+ dlg_regs->ref_freq_to_pix_freq);
dml_print(
"DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n",
- dlg_regs.vratio_prefetch);
+ dlg_regs->vratio_prefetch);
dml_print(
"DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n",
- dlg_regs.vratio_prefetch_c);
+ dlg_regs->vratio_prefetch_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_vblank_l);
+ dlg_regs->refcyc_per_pte_group_vblank_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_vblank_c);
+ dlg_regs->refcyc_per_pte_group_vblank_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_vblank_l);
+ dlg_regs->refcyc_per_meta_chunk_vblank_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_vblank_c);
+ dlg_regs->refcyc_per_meta_chunk_vblank_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_flip_l);
+ dlg_regs->refcyc_per_pte_group_flip_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_flip_c);
+ dlg_regs->refcyc_per_pte_group_flip_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_flip_l);
+ dlg_regs->refcyc_per_meta_chunk_flip_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_flip_c);
+ dlg_regs->refcyc_per_meta_chunk_flip_c);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n",
- dlg_regs.dst_y_per_pte_row_nom_l);
+ dlg_regs->dst_y_per_pte_row_nom_l);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n",
- dlg_regs.dst_y_per_pte_row_nom_c);
+ dlg_regs->dst_y_per_pte_row_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_nom_l);
+ dlg_regs->refcyc_per_pte_group_nom_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_nom_c);
+ dlg_regs->refcyc_per_pte_group_nom_c);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n",
- dlg_regs.dst_y_per_meta_row_nom_l);
+ dlg_regs->dst_y_per_meta_row_nom_l);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n",
- dlg_regs.dst_y_per_meta_row_nom_c);
+ dlg_regs->dst_y_per_meta_row_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_nom_l);
+ dlg_regs->refcyc_per_meta_chunk_nom_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_nom_c);
+ dlg_regs->refcyc_per_meta_chunk_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_pre_l);
+ dlg_regs->refcyc_per_line_delivery_pre_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_pre_c);
+ dlg_regs->refcyc_per_line_delivery_pre_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_l);
+ dlg_regs->refcyc_per_line_delivery_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_c);
+ dlg_regs->refcyc_per_line_delivery_c);
dml_print(
"DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n",
- dlg_regs.chunk_hdl_adjust_cur0);
+ dlg_regs->chunk_hdl_adjust_cur0);
dml_print(
"DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n",
- dlg_regs.dst_y_offset_cur1);
+ dlg_regs->dst_y_offset_cur1);
dml_print(
"DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n",
- dlg_regs.chunk_hdl_adjust_cur1);
+ dlg_regs->chunk_hdl_adjust_cur1);
dml_print(
"DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n",
- dlg_regs.vready_after_vcount0);
+ dlg_regs->vready_after_vcount0);
dml_print(
"DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n",
- dlg_regs.dst_y_delta_drq_limit);
+ dlg_regs->dst_y_delta_drq_limit);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n",
- dlg_regs.xfc_reg_transfer_delay);
+ dlg_regs->xfc_reg_transfer_delay);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n",
- dlg_regs.xfc_reg_precharge_delay);
+ dlg_regs->xfc_reg_precharge_delay);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
- dlg_regs.xfc_reg_remote_surface_flip_latency);
+ dlg_regs->xfc_reg_remote_surface_flip_latency);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
- dlg_regs.refcyc_per_vm_dmdata);
+ dlg_regs->refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs)
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
dml_print(
"DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n",
- ttu_regs.qos_level_low_wm);
+ ttu_regs->qos_level_low_wm);
dml_print(
"DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n",
- ttu_regs.qos_level_high_wm);
+ ttu_regs->qos_level_high_wm);
dml_print(
"DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n",
- ttu_regs.min_ttu_vblank);
+ ttu_regs->min_ttu_vblank);
dml_print(
"DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n",
- ttu_regs.qos_level_flip);
+ ttu_regs->qos_level_flip);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_l);
+ ttu_regs->refcyc_per_req_delivery_pre_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_l);
+ ttu_regs->refcyc_per_req_delivery_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_c);
+ ttu_regs->refcyc_per_req_delivery_pre_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_c);
+ ttu_regs->refcyc_per_req_delivery_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_cur0);
+ ttu_regs->refcyc_per_req_delivery_cur0);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_cur0);
+ ttu_regs->refcyc_per_req_delivery_pre_cur0);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_cur1);
+ ttu_regs->refcyc_per_req_delivery_cur1);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_cur1);
+ ttu_regs->refcyc_per_req_delivery_pre_cur1);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n",
- ttu_regs.qos_level_fixed_l);
+ ttu_regs->qos_level_fixed_l);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n",
- ttu_regs.qos_ramp_disable_l);
+ ttu_regs->qos_ramp_disable_l);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n",
- ttu_regs.qos_level_fixed_c);
+ ttu_regs->qos_level_fixed_c);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n",
- ttu_regs.qos_ramp_disable_c);
+ ttu_regs->qos_ramp_disable_c);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n",
- ttu_regs.qos_level_fixed_cur0);
+ ttu_regs->qos_level_fixed_cur0);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n",
- ttu_regs.qos_ramp_disable_cur0);
+ ttu_regs->qos_ramp_disable_cur0);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n",
- ttu_regs.qos_level_fixed_cur1);
+ ttu_regs->qos_level_fixed_cur1);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n",
- ttu_regs.qos_ramp_disable_cur1);
+ ttu_regs->qos_ramp_disable_cur1);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index 2555ef0358c2..ebcd717744e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -31,16 +31,16 @@
/* Function: Printer functions
* Print various struct
*/
-void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param);
-void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing);
-void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param);
-void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param);
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param);
-void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param);
+void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param);
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing);
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param);
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param);
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param);
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param);
-void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs);
-void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs);
-void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs);
-void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs);
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs);
+void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs);
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs);
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 8f2b1684c231..59dc2c5b58dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -206,47 +206,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_data_rq_regs_st *rq_regs,
- const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing)
+ const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
DTRACE("DLG: %s: rq_sizing param", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
void dml1_extract_rq_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
- const struct _vcs_dpi_display_rq_params_st rq_param)
+ const struct _vcs_dpi_display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
- if (rq_param.yuv420)
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
+ if (rq_param->yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
/* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -255,9 +255,9 @@ void dml1_extract_rq_regs(
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */
} else {
detile_buf_plane1_addr = dml_round_to_multiple(
@@ -272,7 +272,7 @@ void dml1_extract_rq_regs(
static void handle_det_buf_split(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -281,8 +281,8 @@ static void handle_det_buf_split(
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -556,7 +556,7 @@ static void get_surf_rq_param(
struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param,
struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param,
struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param,
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = 0;
@@ -622,15 +622,15 @@ static void get_surf_rq_param(
/* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
+ vp_width = pipe_src_param->viewport_width_c / ppe;
+ vp_height = pipe_src_param->viewport_height_c;
+ data_pitch = pipe_src_param->data_pitch_c;
+ meta_pitch = pipe_src_param->meta_pitch_c;
} else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
+ vp_width = pipe_src_param->viewport_width / ppe;
+ vp_height = pipe_src_param->viewport_height;
+ data_pitch = pipe_src_param->data_pitch;
+ meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
@@ -645,11 +645,11 @@ static void get_surf_rq_param(
rq_sizing_param->mpte_group_bytes = 2048;
- surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- surf_vert = (pipe_src_param.source_scan == dm_vert);
+ surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ surf_vert = (pipe_src_param->source_scan == dm_vert);
bytes_per_element = get_bytes_per_element(
- (enum source_format_class) pipe_src_param.source_format,
+ (enum source_format_class) pipe_src_param->source_format,
is_chroma);
log2_bytes_per_element = dml_log2(bytes_per_element);
blk256_width = 0;
@@ -671,7 +671,7 @@ static void get_surf_rq_param(
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes =
surf_linear ? 256 : get_blk_size_bytes(
- (enum source_macro_tile_size) pipe_src_param.macro_tile_size);
+ (enum source_macro_tile_size) pipe_src_param->macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
@@ -682,7 +682,7 @@ static void get_surf_rq_param(
* "/2" is like square root
* blk is vertical biased
*/
- if (pipe_src_param.sw_mode != dm_sw_linear)
+ if (pipe_src_param->sw_mode != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
@@ -930,10 +930,10 @@ static void get_surf_rq_param(
&func_meta_row_height,
vp_width,
data_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
+ pipe_src_param->source_format,
+ pipe_src_param->sw_mode,
+ pipe_src_param->macro_tile_size,
+ pipe_src_param->source_scan,
is_chroma);
/* Just a check to make sure this function and the new one give the same
@@ -960,12 +960,12 @@ static void get_surf_rq_param(
void dml1_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param)
{
/* get param for luma surface */
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
+ || pipe_src_param->source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
@@ -975,7 +975,7 @@ void dml1_rq_dlg_get_rq_params(
pipe_src_param,
0);
- if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) {
+ if (is_dual_plane((enum source_format_class) pipe_src_param->source_format)) {
/* get param for chroma surface */
get_surf_rq_param(
mode_lib,
@@ -988,7 +988,7 @@ void dml1_rq_dlg_get_rq_params(
/* calculate how to split the det buffer space between luma and chroma */
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
+ print__rq_params_st(mode_lib, rq_param);
}
/* Note: currently taken in as is.
@@ -998,26 +998,26 @@ void dml1_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs,
- const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
- const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
- const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool iflip_en)
{
/* Timing */
- unsigned int htotal = e2e_pipe_param.pipe.dest.htotal;
- unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end;
- unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start;
- unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end;
- bool interlaced = e2e_pipe_param.pipe.dest.interlaced;
+ unsigned int htotal = e2e_pipe_param->pipe.dest.htotal;
+ unsigned int hblank_end = e2e_pipe_param->pipe.dest.hblank_end;
+ unsigned int vblank_start = e2e_pipe_param->pipe.dest.vblank_start;
+ unsigned int vblank_end = e2e_pipe_param->pipe.dest.vblank_end;
+ bool interlaced = e2e_pipe_param->pipe.dest.interlaced;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
- double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz;
- double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz;
- double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz;
- double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz;
+ double pclk_freq_in_mhz = e2e_pipe_param->pipe.dest.pixel_rate_mhz;
+ double refclk_freq_in_mhz = e2e_pipe_param->clks_cfg.refclk_mhz;
+ double dppclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dppclk_mhz;
+ double dispclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dispclk_mhz;
double ref_freq_to_pix_freq;
double prefetch_xy_calc_in_dcfclk;
@@ -1160,13 +1160,13 @@ void dml1_rq_dlg_get_dlg_params(
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
- min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
+ min_ttu_vblank = dlg_sys_param->t_urg_wm_us;
if (cstate_en)
- min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank);
+ min_ttu_vblank = dml_max(dlg_sys_param->t_sr_wm_us, min_ttu_vblank);
if (pstate_en)
- min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank);
+ min_ttu_vblank = dml_max(dlg_sys_param->t_mclk_wm_us, min_ttu_vblank);
min_ttu_vblank = min_ttu_vblank + t_calc_us;
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
@@ -1197,59 +1197,59 @@ void dml1_rq_dlg_get_dlg_params(
/* ------------------------- */
/* Prefetch Calc */
/* Source */
- dcc_en = e2e_pipe_param.pipe.src.dcc;
+ dcc_en = e2e_pipe_param->pipe.src.dcc;
dual_plane = is_dual_plane(
- (enum source_format_class) e2e_pipe_param.pipe.src.source_format);
+ (enum source_format_class) e2e_pipe_param->pipe.src.source_format);
mode_422 = 0; /* TODO */
- access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
+ access_dir = (e2e_pipe_param->pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
- (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ (enum source_format_class) e2e_pipe_param->pipe.src.source_format,
0);
bytes_per_element_c = get_bytes_per_element(
- (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ (enum source_format_class) e2e_pipe_param->pipe.src.source_format,
1);
- vp_height_l = e2e_pipe_param.pipe.src.viewport_height;
- vp_width_l = e2e_pipe_param.pipe.src.viewport_width;
- vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c;
- vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c;
+ vp_height_l = e2e_pipe_param->pipe.src.viewport_height;
+ vp_width_l = e2e_pipe_param->pipe.src.viewport_width;
+ vp_height_c = e2e_pipe_param->pipe.src.viewport_height_c;
+ vp_width_c = e2e_pipe_param->pipe.src.viewport_width_c;
/* Scaling */
- htaps_l = e2e_pipe_param.pipe.scale_taps.htaps;
- htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c;
- hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
- hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c;
- vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio;
- vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c;
+ htaps_l = e2e_pipe_param->pipe.scale_taps.htaps;
+ htaps_c = e2e_pipe_param->pipe.scale_taps.htaps_c;
+ hratios_l = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio;
+ hratios_c = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio_c;
+ vratio_l = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio;
+ vratio_c = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio_c;
line_time_in_us = (htotal / pclk_freq_in_mhz);
- vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit;
- vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c;
- vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot;
- vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c;
-
- swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
- dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
- meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
- meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
-
- swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
-
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset;
- vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width;
- vready_offset = e2e_pipe_param.pipe.dest.vready_offset;
+ vinit_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit;
+ vinit_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_c;
+ vinit_bot_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot;
+ vinit_bot_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot_c;
+
+ swath_height_l = rq_dlg_param->rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+ dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+ meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
+ meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
+
+ swath_height_c = rq_dlg_param->rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ vupdate_offset = e2e_pipe_param->pipe.dest.vupdate_offset;
+ vupdate_width = e2e_pipe_param->pipe.dest.vupdate_width;
+ vready_offset = e2e_pipe_param->pipe.dest.vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
- vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start;
+ vstartup_start = e2e_pipe_param->pipe.dest.vstartup_start;
if (interlaced)
vstartup_start = vstartup_start / 2;
@@ -1276,13 +1276,13 @@ void dml1_rq_dlg_get_dlg_params(
dst_x_after_scaler = 0;
dst_y_after_scaler = 0;
- if (e2e_pipe_param.pipe.src.is_hsplit)
+ if (e2e_pipe_param->pipe.src.is_hsplit)
dst_x_after_scaler = pixel_rate_delay_subtotal
- + e2e_pipe_param.pipe.dest.recout_width;
+ + e2e_pipe_param->pipe.dest.recout_width;
else
dst_x_after_scaler = pixel_rate_delay_subtotal;
- if (e2e_pipe_param.dout.output_format == dm_420)
+ if (e2e_pipe_param->dout.output_format == dm_420)
dst_y_after_scaler = 1;
else
dst_y_after_scaler = 0;
@@ -1334,7 +1334,7 @@ void dml1_rq_dlg_get_dlg_params(
DTRACE(
"DLG: %s: t_srx_delay_us = %3.2f",
__func__,
- (double) dlg_sys_param.t_srx_delay_us);
+ (double) dlg_sys_param->t_srx_delay_us);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
@@ -1408,12 +1408,12 @@ void dml1_rq_dlg_get_dlg_params(
DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes);
prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us;
- flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw)
- / (double) dlg_sys_param.total_flip_bytes;
+ flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param->total_flip_bw)
+ / (double) dlg_sys_param->total_flip_bytes;
t_vm_us = line_time_in_us / 4.0;
if (vm_en && dcc_en) {
t_vm_us = dml_max(
- dlg_sys_param.t_extra_us,
+ dlg_sys_param->t_extra_us,
dml_max((double) vm_bytes / prefetch_bw, t_vm_us));
if (iflip_en && !dual_plane) {
@@ -1423,12 +1423,12 @@ void dml1_rq_dlg_get_dlg_params(
}
}
- t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
+ t_r0_us = dml_max(dlg_sys_param->t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
if (vm_en || dcc_en) {
t_r0_us = dml_max(
(double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw,
- dlg_sys_param.t_extra_us);
+ dlg_sys_param->t_extra_us);
t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us);
if (iflip_en && !dual_plane) {
@@ -1550,15 +1550,15 @@ void dml1_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
/* Active */
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
@@ -1650,14 +1650,14 @@ void dml1_rq_dlg_get_dlg_params(
refcyc_per_req_delivery_cur0 = 0.;
full_recout_width = 0;
- if (e2e_pipe_param.pipe.src.is_hsplit) {
- if (e2e_pipe_param.pipe.dest.full_recout_width == 0) {
+ if (e2e_pipe_param->pipe.src.is_hsplit) {
+ if (e2e_pipe_param->pipe.dest.full_recout_width == 0) {
DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__);
- full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */
+ full_recout_width = e2e_pipe_param->pipe.dest.recout_width * 2; /* assume half split for dcn1 */
} else
- full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width;
+ full_recout_width = e2e_pipe_param->pipe.dest.full_recout_width;
} else
- full_recout_width = e2e_pipe_param.pipe.dest.recout_width;
+ full_recout_width = e2e_pipe_param->pipe.dest.recout_width;
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
@@ -1824,9 +1824,9 @@ void dml1_rq_dlg_get_dlg_params(
}
/* TTU - Cursor */
- hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
- cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */
- cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp;
+ hratios_cur0 = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio;
+ cur0_src_width = e2e_pipe_param->pipe.src.cur0_src_width; /* cursor source width */
+ cur0_bpp = (enum cursor_bpp) e2e_pipe_param->pipe.src.cur0_bpp;
cur0_req_size = 0;
cur0_req_width = 0;
cur0_width_ub = 0.0;
@@ -1927,6 +1927,6 @@ void dml1_rq_dlg_get_dlg_params(
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 9c06913ad767..e19ee3bde45f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -33,7 +33,7 @@ struct display_mode_lib;
void dml1_extract_rq_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
- const struct _vcs_dpi_display_rq_params_st rq_param);
+ const struct _vcs_dpi_display_rq_params_st *rq_param);
/* Function: dml_rq_dlg_get_rq_params
* Calculate requestor related parameters that register definition agnostic
* (i.e. this layer does try to separate real values from register definition)
@@ -45,7 +45,7 @@ void dml1_extract_rq_regs(
void dml1_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param);
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param);
/* Function: dml_rq_dlg_get_dlg_params
@@ -55,9 +55,9 @@ void dml1_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
- const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
- const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
- const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h
index e5fac9f4181d..e5fac9f4181d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
new file mode 100644
index 000000000000..3ee858f311d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "rc_calc_fpu.h"
+
+#include "qp_tables.h"
+#include "amdgpu_dm/dc_fpu.h"
+
+#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
+
+#define MODE_SELECT(val444, val422, val420) \
+ (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
+
+
+#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
+ table = qp_table_##mode##_##bpc##bpc_##max; \
+ table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
+ break
+
+static int median3(int a, int b, int c)
+{
+ if (a > b)
+ swap(a, b);
+ if (b > c)
+ swap(b, c);
+ if (a > b)
+ swap(b, c);
+
+ return b;
+}
+
+static double dsc_roundf(double num)
+{
+ if (num < 0.0)
+ num = num - 0.5;
+ else
+ num = num + 0.5;
+
+ return (int)(num);
+}
+
+static double dsc_ceil(double num)
+{
+ double retval = (int)num;
+
+ if (retval != num && num > 0)
+ retval = num + 1;
+
+ return (int)retval;
+}
+
+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
+ enum max_min max_min, float bpp)
+{
+ int mode = MODE_SELECT(444, 422, 420);
+ int sel = table_hash(mode, bpc, max_min);
+ int table_size = 0;
+ int index;
+ const struct qp_entry *table = 0L;
+
+ // alias enum
+ enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
+ switch (sel) {
+ TABLE_CASE(444, 8, max);
+ TABLE_CASE(444, 8, min);
+ TABLE_CASE(444, 10, max);
+ TABLE_CASE(444, 10, min);
+ TABLE_CASE(444, 12, max);
+ TABLE_CASE(444, 12, min);
+ TABLE_CASE(422, 8, max);
+ TABLE_CASE(422, 8, min);
+ TABLE_CASE(422, 10, max);
+ TABLE_CASE(422, 10, min);
+ TABLE_CASE(422, 12, max);
+ TABLE_CASE(422, 12, min);
+ TABLE_CASE(420, 8, max);
+ TABLE_CASE(420, 8, min);
+ TABLE_CASE(420, 10, max);
+ TABLE_CASE(420, 10, min);
+ TABLE_CASE(420, 12, max);
+ TABLE_CASE(420, 12, min);
+ }
+
+ if (table == 0)
+ return;
+
+ index = (bpp - table[0].bpp) * 2;
+
+ /* requested size is bigger than the table */
+ if (index >= table_size) {
+ dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
+ return;
+ }
+
+ memcpy(qps, table[index].qps, sizeof(qp_set));
+}
+
+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
+{
+ int *p = ofs;
+
+ if (mode == CM_444 || mode == CM_RGB) {
+ *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
+ *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ } else if (mode == CM_422) {
+ *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
+ *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ } else {
+ *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ }
+}
+
+void _do_calc_rc_params(struct rc_params *rc,
+ enum colour_mode cm,
+ enum bits_per_comp bpc,
+ u16 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width,
+ int slice_height,
+ int minor_version)
+{
+ float bpp;
+ float bpp_group;
+ float initial_xmit_delay_factor;
+ int padding_pixels;
+ int i;
+
+ dc_assert_fp_enabled();
+
+ bpp = ((float)drm_bpp / 16.0);
+ /* in native_422 or native_420 modes, the bits_per_pixel is double the
+ * target bpp (the latter is what calc_rc_params expects)
+ */
+ if (is_navite_422_or_420)
+ bpp /= 2.0;
+
+ rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+
+ bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
+
+ switch (cm) {
+ case CM_420:
+ rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
+ break;
+ case CM_422:
+ rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = 0;
+ break;
+ case CM_444:
+ case CM_RGB:
+ rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = 0;
+ break;
+ }
+
+ initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
+ rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
+
+ if (cm == CM_422 || cm == CM_420)
+ slice_width /= 2;
+
+ padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
+ if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
+ if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
+ rc->initial_xmit_delay++;
+ }
+
+ rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->flatness_det_thresh = 2 << (bpc - 8);
+
+ get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
+ get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
+ if (cm == CM_444 && minor_version == 1) {
+ for (i = 0; i < QP_SET_SIZE; ++i) {
+ rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
+ rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
+ }
+ }
+ get_ofs_set(rc->ofs, cm, bpp);
+
+ /* fixed parameters */
+ rc->rc_model_size = 8192;
+ rc->rc_edge_factor = 6;
+ rc->rc_tgt_offset_hi = 3;
+ rc->rc_tgt_offset_lo = 3;
+
+ rc->rc_buf_thresh[0] = 896;
+ rc->rc_buf_thresh[1] = 1792;
+ rc->rc_buf_thresh[2] = 2688;
+ rc->rc_buf_thresh[3] = 3584;
+ rc->rc_buf_thresh[4] = 4480;
+ rc->rc_buf_thresh[5] = 5376;
+ rc->rc_buf_thresh[6] = 6272;
+ rc->rc_buf_thresh[7] = 6720;
+ rc->rc_buf_thresh[8] = 7168;
+ rc->rc_buf_thresh[9] = 7616;
+ rc->rc_buf_thresh[10] = 7744;
+ rc->rc_buf_thresh[11] = 7872;
+ rc->rc_buf_thresh[12] = 8000;
+ rc->rc_buf_thresh[13] = 8064;
+}
+
+u32 _do_bytes_per_pixel_calc(int slice_width,
+ u16 drm_bpp,
+ bool is_navite_422_or_420)
+{
+ float bpp;
+ u32 bytes_per_pixel;
+ double d_bytes_per_pixel;
+
+ dc_assert_fp_enabled();
+
+ bpp = ((float)drm_bpp / 16.0);
+ d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
+ // TODO: Make sure the formula for calculating this is precise (ceiling
+ // vs. floor, and at what point they should be applied)
+ if (is_navite_422_or_420)
+ d_bytes_per_pixel /= 2;
+
+ bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
+
+ return bytes_per_pixel;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
new file mode 100644
index 000000000000..b93b95409fbe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __RC_CALC_FPU_H__
+#define __RC_CALC_FPU_H__
+
+#include "os_types.h"
+#include <drm/drm_dsc.h>
+
+#define QP_SET_SIZE 15
+
+typedef int qp_set[QP_SET_SIZE];
+
+struct rc_params {
+ int rc_quant_incr_limit0;
+ int rc_quant_incr_limit1;
+ int initial_fullness_offset;
+ int initial_xmit_delay;
+ int first_line_bpg_offset;
+ int second_line_bpg_offset;
+ int flatness_min_qp;
+ int flatness_max_qp;
+ int flatness_det_thresh;
+ qp_set qp_min;
+ qp_set qp_max;
+ qp_set ofs;
+ int rc_model_size;
+ int rc_edge_factor;
+ int rc_tgt_offset_hi;
+ int rc_tgt_offset_lo;
+ int rc_buf_thresh[QP_SET_SIZE - 1];
+};
+
+enum colour_mode {
+ CM_RGB, /* 444 RGB */
+ CM_444, /* 444 YUV or simple 422 */
+ CM_422, /* native 422 */
+ CM_420 /* native 420 */
+};
+
+enum bits_per_comp {
+ BPC_8 = 8,
+ BPC_10 = 10,
+ BPC_12 = 12
+};
+
+enum max_min {
+ DAL_MM_MIN = 0,
+ DAL_MM_MAX = 1
+};
+
+struct qp_entry {
+ float bpp;
+ const qp_set qps;
+};
+
+typedef struct qp_entry qp_table[];
+
+u32 _do_bytes_per_pixel_calc(int slice_width,
+ u16 drm_bpp,
+ bool is_navite_422_or_420);
+
+void _do_calc_rc_params(struct rc_params *rc,
+ enum colour_mode cm,
+ enum bits_per_comp bpc,
+ u16 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width,
+ int slice_height,
+ int minor_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 8d31eb75c6a6..a2537229ee88 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,35 +1,6 @@
# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
-
-ifdef CONFIG_X86
-dsc_ccflags := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-dsc_ccflags := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-dsc_ccflags += -mpreferred-stack-boundary=4
-else
-dsc_ccflags += -msse2
-endif
-endif
-
-CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags)
-
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC))
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index f5b7da0e64c0..0321b4446e05 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -40,8 +40,15 @@ static bool dsc_policy_enable_dsc_when_not_needed;
static bool dsc_policy_disable_dsc_stream_overhead;
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
/* Forward Declerations */
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -76,11 +83,6 @@ static bool setup_dsc_config(
int max_dsc_target_bpp_limit_override_x16,
struct dc_dsc_config *dsc_cfg);
-static struct fixed31_32 compute_dsc_max_bandwidth_overhead(
- const struct dc_crtc_timing *timing,
- const int num_slices_h,
- const bool is_dp);
-
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -361,7 +363,7 @@ bool dc_dsc_compute_bandwidth_range(
dsc_min_slice_height_override, max_bpp_x16, &config);
if (is_dsc_possible)
- get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
+ is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
config.num_slices_h, &dsc_common_caps, timing, range);
return is_dsc_possible;
@@ -462,32 +464,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
return (value + 9) / 10;
}
-static struct fixed31_32 compute_dsc_max_bandwidth_overhead(
- const struct dc_crtc_timing *timing,
- const int num_slices_h,
- const bool is_dp)
-{
- struct fixed31_32 max_dsc_overhead;
- struct fixed31_32 refresh_rate;
-
- if (dsc_policy_disable_dsc_stream_overhead || !is_dp)
- return dc_fixpt_from_int(0);
-
- /* use target bpp that can take entire target bandwidth */
- refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz);
- refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total);
- refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total);
- refresh_rate = dc_fixpt_mul_int(refresh_rate, 100);
-
- max_dsc_overhead = dc_fixpt_from_int(num_slices_h);
- max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total);
- max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256);
- max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000);
- max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate);
-
- return max_dsc_overhead;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -495,14 +471,14 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bpp_increment_div,
const bool is_dp)
{
- struct fixed31_32 overhead_in_kbps;
+ uint32_t overhead_in_kbps;
struct fixed31_32 effective_bandwidth_in_kbps;
struct fixed31_32 bpp_x16;
- overhead_in_kbps = compute_dsc_max_bandwidth_overhead(
+ overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps);
- effective_bandwidth_in_kbps = dc_fixpt_sub(effective_bandwidth_in_kbps,
+ effective_bandwidth_in_kbps = dc_fixpt_sub_int(effective_bandwidth_in_kbps,
overhead_in_kbps);
bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10);
bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz);
@@ -512,10 +488,12 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
return dc_fixpt_floor(bpp_x16);
}
-/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock
- * and uncompressed bandwidth.
+/* Decide DSC bandwidth range based on signal, timing, specs specific and input min and max
+ * requirements.
+ * The range output includes decided min/max target bpp, the respective bandwidth requirements
+ * and native timing bandwidth requirement when DSC is not used.
*/
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -523,39 +501,45 @@ static void get_dsc_bandwidth_range(
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range)
{
- /* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
-
- /* max dsc target bpp */
- range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
- max_bpp_x16, num_slices_h, dsc_caps->is_dp);
- range->max_target_bpp_x16 = max_bpp_x16;
- if (range->max_kbps > range->stream_kbps) {
- /* max dsc target bpp is capped to native bandwidth */
- range->max_kbps = range->stream_kbps;
- range->max_target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
- range->max_kbps, timing, num_slices_h,
- dsc_caps->bpp_increment_div,
- dsc_caps->is_dp);
+ uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
+
+ memset(range, 0, sizeof(*range));
+
+ /* apply signal, timing, specs and explicitly specified DSC range requirements */
+ if (preferred_bpp_x16) {
+ if (preferred_bpp_x16 <= max_bpp_x16 &&
+ preferred_bpp_x16 >= min_bpp_x16) {
+ range->max_target_bpp_x16 = preferred_bpp_x16;
+ range->min_target_bpp_x16 = preferred_bpp_x16;
+ }
}
+ else {
+ range->max_target_bpp_x16 = max_bpp_x16;
+ range->min_target_bpp_x16 = min_bpp_x16;
+ }
+
+ /* populate output structure */
+ if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
+ /* native stream bandwidth */
+ range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+
+ /* max dsc target bpp */
+ range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
+ range->max_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
- /* min dsc target bpp */
- range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
- min_bpp_x16, num_slices_h, dsc_caps->is_dp);
- range->min_target_bpp_x16 = min_bpp_x16;
- if (range->min_kbps > range->max_kbps) {
- /* min dsc target bpp is capped to max dsc bandwidth*/
- range->min_kbps = range->max_kbps;
- range->min_target_bpp_x16 = range->max_target_bpp_x16;
+ /* min dsc target bpp */
+ range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
+ range->min_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
}
+
+ return range->max_kbps >= range->min_kbps && range->min_kbps > 0;
}
/* Decides if DSC should be used and calculates target bpp if it should, applying DSC policy.
*
* Returns:
- * - 'true' if DSC was required by policy and was successfully applied
- * - 'false' if DSC was not necessary (e.g. if uncompressed stream fits 'target_bandwidth_kbps'),
- * or if it couldn't be applied based on DSC policy.
+ * - 'true' if target bpp is decided
+ * - 'false' if target bpp cannot be decided (e.g. cannot fit even with min DSC bpp),
*/
static bool decide_dsc_target_bpp_x16(
const struct dc_dsc_policy *policy,
@@ -565,40 +549,29 @@ static bool decide_dsc_target_bpp_x16(
const int num_slices_h,
int *target_bpp_x16)
{
- bool should_use_dsc = false;
struct dc_dsc_bw_range range;
- memset(&range, 0, sizeof(range));
-
- get_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
- num_slices_h, dsc_common_caps, timing, &range);
- if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) {
- /* enough bandwidth without dsc */
- *target_bpp_x16 = 0;
- should_use_dsc = false;
- } else if (policy->preferred_bpp_x16 > 0 &&
- policy->preferred_bpp_x16 <= range.max_target_bpp_x16 &&
- policy->preferred_bpp_x16 >= range.min_target_bpp_x16) {
- *target_bpp_x16 = policy->preferred_bpp_x16;
- should_use_dsc = true;
- } else if (target_bandwidth_kbps >= range.max_kbps) {
- /* use max target bpp allowed */
- *target_bpp_x16 = range.max_target_bpp_x16;
- should_use_dsc = true;
- } else if (target_bandwidth_kbps >= range.min_kbps) {
- /* use target bpp that can take entire target bandwidth */
- *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
- target_bandwidth_kbps, timing, num_slices_h,
- dsc_common_caps->bpp_increment_div,
- dsc_common_caps->is_dp);
- should_use_dsc = true;
- } else {
- /* not enough bandwidth to fulfill minimum requirement */
- *target_bpp_x16 = 0;
- should_use_dsc = false;
+ *target_bpp_x16 = 0;
+
+ if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
+ num_slices_h, dsc_common_caps, timing, &range)) {
+ if (target_bandwidth_kbps >= range.stream_kbps) {
+ if (policy->enable_dsc_when_not_needed)
+ /* enable max bpp even dsc is not needed */
+ *target_bpp_x16 = range.max_target_bpp_x16;
+ } else if (target_bandwidth_kbps >= range.max_kbps) {
+ /* use max target bpp allowed */
+ *target_bpp_x16 = range.max_target_bpp_x16;
+ } else if (target_bandwidth_kbps >= range.min_kbps) {
+ /* use target bpp that can take entire target bandwidth */
+ *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
+ target_bandwidth_kbps, timing, num_slices_h,
+ dsc_common_caps->bpp_increment_div,
+ dsc_common_caps->is_dp);
+ }
}
- return should_use_dsc;
+ return *target_bpp_x16 != 0;
}
#define MIN_AVAILABLE_SLICES_SIZE 4
@@ -994,19 +967,45 @@ bool dc_dsc_compute_config(
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp)
{
- struct fixed31_32 overhead_in_kbps;
+ uint32_t overhead_in_kbps;
struct fixed31_32 bpp;
struct fixed31_32 actual_bandwidth_in_kbps;
- overhead_in_kbps = compute_dsc_max_bandwidth_overhead(
+ overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
bpp = dc_fixpt_from_fraction(bpp_x16, 16);
actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10);
actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp);
- actual_bandwidth_in_kbps = dc_fixpt_add(actual_bandwidth_in_kbps, overhead_in_kbps);
+ actual_bandwidth_in_kbps = dc_fixpt_add_int(actual_bandwidth_in_kbps, overhead_in_kbps);
return dc_fixpt_ceil(actual_bandwidth_in_kbps);
}
+uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+ const struct dc_crtc_timing *timing,
+ const int num_slices_h,
+ const bool is_dp)
+{
+ struct fixed31_32 max_dsc_overhead;
+ struct fixed31_32 refresh_rate;
+
+ if (dsc_policy_disable_dsc_stream_overhead || !is_dp)
+ return 0;
+
+ /* use target bpp that can take entire target bandwidth */
+ refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz);
+ refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total);
+ refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total);
+ refresh_rate = dc_fixpt_mul_int(refresh_rate, 100);
+
+ max_dsc_overhead = dc_fixpt_from_int(num_slices_h);
+ max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total);
+ max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256);
+ max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000);
+ max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate);
+
+ return dc_fixpt_ceil(max_dsc_overhead);
+}
+
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy)
@@ -1064,8 +1063,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
return;
}
- policy->preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
-
/* internal upper limit, default 16 bpp */
if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 7b294f637881..b19d3aeb5962 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -23,266 +23,7 @@
* Authors: AMD
*
*/
-#include <drm/drm_dsc.h>
-
-#include "os_types.h"
#include "rc_calc.h"
-#include "qp_tables.h"
-
-#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
-
-#define MODE_SELECT(val444, val422, val420) \
- (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
-
-
-#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
- table = qp_table_##mode##_##bpc##bpc_##max; \
- table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
- break
-
-
-static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
- enum max_min max_min, float bpp)
-{
- int mode = MODE_SELECT(444, 422, 420);
- int sel = table_hash(mode, bpc, max_min);
- int table_size = 0;
- int index;
- const struct qp_entry *table = 0L;
-
- // alias enum
- enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
- switch (sel) {
- TABLE_CASE(444, 8, max);
- TABLE_CASE(444, 8, min);
- TABLE_CASE(444, 10, max);
- TABLE_CASE(444, 10, min);
- TABLE_CASE(444, 12, max);
- TABLE_CASE(444, 12, min);
- TABLE_CASE(422, 8, max);
- TABLE_CASE(422, 8, min);
- TABLE_CASE(422, 10, max);
- TABLE_CASE(422, 10, min);
- TABLE_CASE(422, 12, max);
- TABLE_CASE(422, 12, min);
- TABLE_CASE(420, 8, max);
- TABLE_CASE(420, 8, min);
- TABLE_CASE(420, 10, max);
- TABLE_CASE(420, 10, min);
- TABLE_CASE(420, 12, max);
- TABLE_CASE(420, 12, min);
- }
-
- if (table == 0)
- return;
-
- index = (bpp - table[0].bpp) * 2;
-
- /* requested size is bigger than the table */
- if (index >= table_size) {
- dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
- return;
- }
-
- memcpy(qps, table[index].qps, sizeof(qp_set));
-}
-
-static double dsc_roundf(double num)
-{
- if (num < 0.0)
- num = num - 0.5;
- else
- num = num + 0.5;
-
- return (int)(num);
-}
-
-static double dsc_ceil(double num)
-{
- double retval = (int)num;
-
- if (retval != num && num > 0)
- retval = num + 1;
-
- return (int)retval;
-}
-
-static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
-{
- int *p = ofs;
-
- if (mode == CM_444 || mode == CM_RGB) {
- *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
- *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
- *p++ = -10;
- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- } else if (mode == CM_422) {
- *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
- *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
- *p++ = -10;
- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- } else {
- *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = -10;
- *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- }
-}
-
-static int median3(int a, int b, int c)
-{
- if (a > b)
- swap(a, b);
- if (b > c)
- swap(b, c);
- if (a > b)
- swap(b, c);
-
- return b;
-}
-
-static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
- enum bits_per_comp bpc, u16 drm_bpp,
- bool is_navite_422_or_420,
- int slice_width, int slice_height,
- int minor_version)
-{
- float bpp;
- float bpp_group;
- float initial_xmit_delay_factor;
- int padding_pixels;
- int i;
-
- bpp = ((float)drm_bpp / 16.0);
- /* in native_422 or native_420 modes, the bits_per_pixel is double the
- * target bpp (the latter is what calc_rc_params expects)
- */
- if (is_navite_422_or_420)
- bpp /= 2.0;
-
- rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
-
- bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
-
- switch (cm) {
- case CM_420:
- rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
- rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
- break;
- case CM_422:
- rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
- rc->second_line_bpg_offset = 0;
- break;
- case CM_444:
- case CM_RGB:
- rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
- rc->second_line_bpg_offset = 0;
- break;
- }
-
- initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
- rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
-
- if (cm == CM_422 || cm == CM_420)
- slice_width /= 2;
-
- padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
- if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
- if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
- rc->initial_xmit_delay++;
- }
-
- rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->flatness_det_thresh = 2 << (bpc - 8);
-
- get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
- get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
- if (cm == CM_444 && minor_version == 1) {
- for (i = 0; i < QP_SET_SIZE; ++i) {
- rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
- rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
- }
- }
- get_ofs_set(rc->ofs, cm, bpp);
-
- /* fixed parameters */
- rc->rc_model_size = 8192;
- rc->rc_edge_factor = 6;
- rc->rc_tgt_offset_hi = 3;
- rc->rc_tgt_offset_lo = 3;
-
- rc->rc_buf_thresh[0] = 896;
- rc->rc_buf_thresh[1] = 1792;
- rc->rc_buf_thresh[2] = 2688;
- rc->rc_buf_thresh[3] = 3584;
- rc->rc_buf_thresh[4] = 4480;
- rc->rc_buf_thresh[5] = 5376;
- rc->rc_buf_thresh[6] = 6272;
- rc->rc_buf_thresh[7] = 6720;
- rc->rc_buf_thresh[8] = 7168;
- rc->rc_buf_thresh[9] = 7616;
- rc->rc_buf_thresh[10] = 7744;
- rc->rc_buf_thresh[11] = 7872;
- rc->rc_buf_thresh[12] = 8000;
- rc->rc_buf_thresh[13] = 8064;
-}
-
-static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp,
- bool is_navite_422_or_420)
-{
- float bpp;
- u32 bytes_per_pixel;
- double d_bytes_per_pixel;
-
- bpp = ((float)drm_bpp / 16.0);
- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
- // TODO: Make sure the formula for calculating this is precise (ceiling
- // vs. floor, and at what point they should be applied)
- if (is_navite_422_or_420)
- d_bytes_per_pixel /= 2;
-
- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- return bytes_per_pixel;
-}
/**
* calc_rc_params - reads the user's cmdline mode
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index 262f06afcbf9..c2340e001b57 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -27,55 +27,7 @@
#ifndef __RC_CALC_H__
#define __RC_CALC_H__
-
-#define QP_SET_SIZE 15
-
-typedef int qp_set[QP_SET_SIZE];
-
-struct rc_params {
- int rc_quant_incr_limit0;
- int rc_quant_incr_limit1;
- int initial_fullness_offset;
- int initial_xmit_delay;
- int first_line_bpg_offset;
- int second_line_bpg_offset;
- int flatness_min_qp;
- int flatness_max_qp;
- int flatness_det_thresh;
- qp_set qp_min;
- qp_set qp_max;
- qp_set ofs;
- int rc_model_size;
- int rc_edge_factor;
- int rc_tgt_offset_hi;
- int rc_tgt_offset_lo;
- int rc_buf_thresh[QP_SET_SIZE - 1];
-};
-
-enum colour_mode {
- CM_RGB, /* 444 RGB */
- CM_444, /* 444 YUV or simple 422 */
- CM_422, /* native 422 */
- CM_420 /* native 420 */
-};
-
-enum bits_per_comp {
- BPC_8 = 8,
- BPC_10 = 10,
- BPC_12 = 12
-};
-
-enum max_min {
- DAL_MM_MIN = 0,
- DAL_MM_MAX = 1
-};
-
-struct qp_entry {
- float bpp;
- const qp_set qps;
-};
-
-typedef struct qp_entry qp_table[];
+#include "dml/dsc/rc_calc_fpu.h"
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index ef830aded5b1..1e19dd674e5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#include "os_types.h"
#include <drm/drm_dsc.h>
#include "dscc_types.h"
#include "rc_calc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index c5c840a06050..5029d4e42dbf 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -105,6 +105,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_2_0:
dal_hw_factory_dcn20_init(factory);
return true;
+ case DCN_VERSION_2_01:
case DCN_VERSION_2_1:
dal_hw_factory_dcn21_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 4a9848308766..904bd30bed68 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -100,6 +100,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_2_0:
dal_hw_translate_dcn20_init(translate);
return true;
+ case DCN_VERSION_2_01:
case DCN_VERSION_2_1:
dal_hw_translate_dcn21_init(translate);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 45a6216dfa2a..6fc6488c54c0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -62,6 +62,7 @@ struct link_init_data {
uint32_t connector_index; /* this will be mapped to the HPD pins */
uint32_t link_index; /* this is mapped to DAL display_index
TODO: remove it when DC is complete. */
+ bool is_dpia_link;
};
struct dc_link *link_create(const struct link_init_data *init_params);
@@ -245,8 +246,16 @@ struct resource_pool {
* entries in link_encoders array.
*/
unsigned int dig_link_enc_count;
+ /* Number of USB4 DPIA (DisplayPort Input Adapter) link objects created.*/
+ unsigned int usb4_dpia_count;
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ unsigned int hpo_dp_stream_enc_count;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS];
+ unsigned int hpo_dp_link_enc_count;
+ struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
#endif
@@ -298,6 +307,9 @@ struct stream_resource {
struct display_stream_compressor *dsc;
struct timing_generator *tg;
struct stream_encoder *stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+#endif
struct audio *audio;
struct pixel_clk_params pix_clk_params;
@@ -366,6 +378,9 @@ struct pipe_ctx {
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
+ struct _vcs_dpi_display_rq_params_st dml_rq_param;
+ struct _vcs_dpi_display_dlg_sys_params_st dml_dlg_sys_param;
+ struct _vcs_dpi_display_e2e_pipe_params_st dml_input;
int det_buffer_size_kb;
bool unbounded_req;
#endif
@@ -375,6 +390,17 @@ struct pipe_ctx {
bool vtp_locked;
};
+/* Data used for dynamic link encoder assignment.
+ * Tracks current and future assignments; available link encoders;
+ * and mode of operation (whether to use current or future assignments).
+ */
+struct link_enc_cfg_context {
+ enum link_enc_cfg_mode mode;
+ struct link_enc_assignment link_enc_assignments[MAX_PIPES];
+ enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ struct link_enc_assignment transient_assignments[MAX_PIPES];
+};
+
struct resource_context {
struct pipe_ctx pipe_ctx[MAX_PIPES];
bool is_stream_enc_acquired[MAX_PIPES * 2];
@@ -382,12 +408,10 @@ struct resource_context {
uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
- /* A table/array of encoder-to-link assignments. One entry per stream.
- * Indexed by stream index in dc_state.
- */
- struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- /* List of available link encoders. Uses engine ID as encoder identifier. */
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ struct link_enc_cfg_context link_enc_cfg_ctx;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
+#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_mpc_3dlut_acquired[MAX_PIPES];
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 4d7b271b6409..95fb61d62778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -69,6 +69,7 @@ struct ddc_service_init_data {
struct graphics_object_id id;
struct dc_context *ctx;
struct dc_link *link;
+ bool is_dpia_link;
};
struct ddc_service *dal_ddc_service_create(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 01c3a31be191..a6d3d859754a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -30,6 +30,7 @@
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
+#define MAX_MTP_SLOT_COUNT 64
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#define TRAINING_AUX_RD_INTERVAL 100 //us
@@ -120,12 +121,12 @@ enum dc_status dpcd_set_lane_settings(
const struct link_training_settings *link_training_setting,
uint32_t offset);
/* Read training status and adjustment requests from DPCD. */
-enum dc_status dp_get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_lane_adjust(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
- union lane_status *ln_status,
- union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
uint32_t offset);
void dp_wait_for_training_aux_rd_interval(
@@ -146,10 +147,15 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status);
bool dp_is_max_vs_reached(
const struct link_training_settings *lt_settings);
-
-void dp_update_drive_settings(
- struct link_training_settings *dest,
- struct link_training_settings src);
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
@@ -165,7 +171,7 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
@@ -189,5 +195,26 @@ enum dc_status dpcd_configure_lttpr_mode(
struct link_training_settings *lt_settings);
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
+bool dpcd_write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate);
+
+enum dc_status dpcd_configure_channel_coding(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
+
+struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link);
+void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings);
+void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal);
+void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
+void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);
+
bool dp_retrieve_lttpr_cap(struct dc_link *link);
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
new file mode 100644
index 000000000000..974d703e3771
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DPIA_H__
+#define __DC_LINK_DPIA_H__
+
+/* This module implements functionality for training DPIA links. */
+
+struct dc_link;
+struct dc_link_settings;
+
+/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
+#define DPIA_CLK_SYNC_DELAY 16000
+
+/* Extend interval between training status checks for manual testing. */
+#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
+
+/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */
+/* DPCD DP Tunneling over USB4 */
+#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d
+#define DP_IN_ADAPTER_INFO 0xe000e
+#define DP_USB4_DRIVER_ID 0xe000f
+#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b
+
+/* SET_CONFIG message types sent by driver. */
+enum dpia_set_config_type {
+ DPIA_SET_CFG_SET_LINK = 0x01,
+ DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05,
+ DPIA_SET_CFG_SET_TRAINING = 0x18,
+ DPIA_SET_CFG_SET_VSPE = 0x19
+};
+
+/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */
+enum dpia_set_config_ts {
+ DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */
+ DPIA_TS_TPS1 = 0x01,
+ DPIA_TS_TPS2 = 0x02,
+ DPIA_TS_TPS3 = 0x03,
+ DPIA_TS_TPS4 = 0x07,
+ DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */
+};
+
+/* SET_CONFIG message data associated with messages sent by driver. */
+union dpia_set_config_data {
+ struct {
+ uint8_t mode : 1;
+ uint8_t reserved : 7;
+ } set_link;
+ struct {
+ uint8_t stage;
+ } set_training;
+ struct {
+ uint8_t swing : 2;
+ uint8_t max_swing_reached : 1;
+ uint8_t pre_emph : 2;
+ uint8_t max_pre_emph_reached : 1;
+ uint8_t reserved : 2;
+ } set_vspe;
+ uint8_t raw;
+};
+
+/* Read tunneling device capability from DPCD and update link capability
+ * accordingly.
+ */
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
+
+/* Train DP tunneling link for USB4 DPIA display endpoint.
+ * DPIA equivalent of dc_link_dp_perfrorm_link_training.
+ * Aborts link training upon detection of sink unplug.
+ */
+enum link_training_result
+dc_link_dpia_perform_link_training(struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 142753644377..ecb4191b6e64 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -54,6 +54,7 @@ struct abm_funcs {
const char *src,
unsigned int bytes,
unsigned int inst);
+ bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index a262f3278c21..1391c20f1852 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -102,6 +102,11 @@ enum dentist_divider_range {
.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
+#define CLK_COMMON_REG_LIST_DCN_201() \
+ SR(DENTIST_DISPCLK_CNTL), \
+ CLK_SRI(CLK4_CLK_PLL_REQ, CLK4, 0), \
+ CLK_SRI(CLK4_CLK2_CURRENT_CNT, CLK4, 0)
+
#define CLK_REG_LIST_NV10() \
SR(DENTIST_DISPCLK_CNTL), \
CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \
@@ -144,6 +149,12 @@ enum dentist_divider_range {
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh)
+#define CLK_COMMON_MASK_SH_LIST_DCN201_BASE(mask_sh) \
+ CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh),\
+ CLK_SF(CLK4_0_CLK4_CLK_PLL_REQ, FbMult_int, mask_sh)
+
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
type DENTIST_DPREFCLK_WDIVIDER; \
@@ -179,6 +190,8 @@ struct clk_mgr_mask {
struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t CLK4_CLK2_CURRENT_CNT;
+ uint32_t CLK4_CLK_PLL_REQ;
uint32_t CLK3_CLK2_DFS_CNTL;
uint32_t CLK3_CLK_PLL_REQ;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 0afa2364a986..c940fdfda144 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -79,7 +79,30 @@ struct dccg_funcs {
void (*otg_drop_pixel)(struct dccg *dccg,
uint32_t otg_inst);
void (*dccg_init)(struct dccg *dccg);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ void (*set_dpstreamclk)(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst);
+
+ void (*enable_symclk32_se)(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk);
+ void (*disable_symclk32_se)(
+ struct dccg *dccg,
+ int hpo_se_inst);
+
+ void (*enable_symclk32_le)(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+ void (*disable_symclk32_le)(
+ struct dccg *dccg,
+ int hpo_le_inst);
+#endif
void (*set_physymclk)(
struct dccg *dccg,
int phy_inst,
@@ -100,6 +123,15 @@ struct dccg_funcs {
void (*set_dispclk_change_mode)(
struct dccg *dccg,
enum dentist_dispclk_change_mode change_mode);
+
+ void (*disable_dsc)(
+ struct dccg *dccg,
+ int inst);
+
+ void (*enable_dsc)(
+ struct dccg *dccg,
+ int inst);
+
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 00fc81431b43..3ef7faa92052 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -29,6 +29,17 @@
#include "transform.h"
+union defer_reg_writes {
+ struct {
+ bool disable_blnd_lut:1;
+ bool disable_3dlut:1;
+ bool disable_shaper:1;
+ bool disable_gamcor:1;
+ bool disable_dscl:1;
+ } bits;
+ uint32_t raw;
+};
+
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -43,6 +54,7 @@ struct dpp {
struct pwl_params regamma_params;
struct pwl_params degamma_params;
struct dpp_cursor_attributes cur_attr;
+ union defer_reg_writes deferred_reg_writes;
struct pwl_params shaper_params;
bool cm_bypass_mode;
@@ -245,6 +257,8 @@ struct dpp_funcs {
bool dppclk_div,
bool enable);
+ void (*dpp_deferred_update)(
+ struct dpp *dpp);
bool (*dpp_program_blnd_lut)(
struct dpp *dpp,
const struct pwl_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index ec28cb9c3a8e..fd6572ba3fb2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -171,10 +171,9 @@ struct dwbc {
bool dwb_is_efc_transition;
bool dwb_is_drc;
int wb_src_plane_inst;/*hubp, mpcc, inst*/
- bool update_privacymask;
uint32_t mask_id;
- int otg_inst;
- bool mvc_cfg;
+ int otg_inst;
+ bool mvc_cfg;
};
struct dwbc_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 31a1713bb49f..10ecbc667ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -38,6 +38,10 @@
#define MAX_PIPES 6
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define MAX_HPO_DP2_ENCODERS 4
+#define MAX_HPO_DP2_LINK_ENCODERS 2
+#endif
struct gamma_curve {
uint32_t offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 9eaf345aa2a1..bb0e91756ddd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -59,6 +59,10 @@ struct encoder_feature_support {
uint32_t IS_TPS3_CAPABLE:1;
uint32_t IS_TPS4_CAPABLE:1;
uint32_t HDMI_6GB_EN:1;
+ uint32_t IS_DP2_CAPABLE:1;
+ uint32_t IS_UHBR10_CAPABLE:1;
+ uint32_t IS_UHBR13_5_CAPABLE:1;
+ uint32_t IS_UHBR20_CAPABLE:1;
uint32_t DP_IS_USB_C:1;
} bits;
uint32_t raw;
@@ -208,6 +212,99 @@ struct link_enc_assignment {
bool valid;
struct display_endpoint_id ep_id;
enum engine_id eng_id;
+ struct dc_stream_state *stream;
};
+enum link_enc_cfg_mode {
+ LINK_ENC_CFG_STEADY, /* Normal operation - use current_state. */
+ LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp2_link_mode {
+ DP2_LINK_TRAINING_TPS1,
+ DP2_LINK_TRAINING_TPS2,
+ DP2_LINK_ACTIVE,
+ DP2_TEST_PATTERN
+};
+
+enum dp2_phy_tp_select {
+ DP_DPHY_TP_SELECT_TPS1,
+ DP_DPHY_TP_SELECT_TPS2,
+ DP_DPHY_TP_SELECT_PRBS,
+ DP_DPHY_TP_SELECT_CUSTOM,
+ DP_DPHY_TP_SELECT_SQUARE
+};
+
+enum dp2_phy_tp_prbs {
+ DP_DPHY_TP_PRBS7,
+ DP_DPHY_TP_PRBS9,
+ DP_DPHY_TP_PRBS11,
+ DP_DPHY_TP_PRBS15,
+ DP_DPHY_TP_PRBS23,
+ DP_DPHY_TP_PRBS31
+};
+
+struct hpo_dp_link_enc_state {
+ uint32_t link_enc_enabled;
+ uint32_t link_mode;
+ uint32_t lane_count;
+ uint32_t slot_count[4];
+ uint32_t stream_src[4];
+ uint32_t vc_rate_x[4];
+ uint32_t vc_rate_y[4];
+};
+
+struct hpo_dp_link_encoder {
+ const struct hpo_dp_link_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ enum engine_id preferred_engine;
+ enum transmitter transmitter;
+ enum hpd_source_id hpd_source;
+};
+
+struct hpo_dp_link_encoder_funcs {
+
+ void (*enable_link_phy)(struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter);
+
+ void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc,
+ enum signal_type signal);
+
+ void (*link_enable)(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes);
+
+ void (*link_disable)(
+ struct hpo_dp_link_encoder *enc);
+
+ void (*set_link_test_pattern)(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params);
+
+ void (*update_stream_allocation_table)(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+ void (*set_throttled_vcp_size)(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+ bool (*is_in_alt_mode) (
+ struct hpo_dp_link_encoder *enc);
+
+ void (*read_state)(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state);
+
+ void (*set_ffe)(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset);
+};
+#endif
+
#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 640bb432bd6a..04d6ec3f021f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -281,6 +281,7 @@ struct mpc_funcs {
struct mpcc* (*get_mpcc_for_dpp_from_secondary)(
struct mpc_tree *tree,
int dpp_id);
+
struct mpcc* (*get_mpcc_for_dpp)(
struct mpc_tree *tree,
int dpp_id);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 564ea6a727b0..c88e113b94d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -165,9 +165,11 @@ struct stream_encoder_funcs {
struct stream_encoder *enc);
void (*dp_blank)(
+ struct dc_link *link,
struct stream_encoder *enc);
void (*dp_unblank)(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
@@ -227,7 +229,8 @@ struct stream_encoder_funcs {
void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps);
+ uint8_t *dsc_packed_pps,
+ bool immediate_update);
void (*set_dynamic_metadata)(struct stream_encoder *enc,
bool enable,
@@ -242,4 +245,86 @@ struct stream_encoder_funcs {
struct stream_encoder *enc);
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct hpo_dp_stream_encoder_state {
+ uint32_t stream_enc_enabled;
+ uint32_t vid_stream_enabled;
+ uint32_t otg_inst;
+ uint32_t pixel_encoding;
+ uint32_t component_depth;
+ uint32_t compressed_format;
+ uint32_t sdp_enabled;
+ uint32_t mapped_to_link_enc;
+};
+
+struct hpo_dp_stream_encoder {
+ const struct hpo_dp_stream_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_bios *bp;
+ uint32_t inst;
+ enum engine_id id;
+ struct vpg *vpg;
+ struct apg *apg;
+};
+
+struct hpo_dp_stream_encoder_funcs {
+ void (*enable_stream)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*dp_unblank)(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_source);
+
+ void (*dp_blank)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*disable)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*set_stream_attribute)(
+ struct hpo_dp_stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ bool compressed_format,
+ bool double_buffer_en);
+
+ void (*update_dp_info_packets)(
+ struct hpo_dp_stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_dp_info_packets)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*dp_set_dsc_pps_info_packet)(
+ struct hpo_dp_stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update);
+
+ void (*map_stream_to_link)(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
+
+ void (*audio_mute_control)(
+ struct hpo_dp_stream_encoder *enc, bool mute);
+
+ void (*dp_audio_setup)(
+ struct hpo_dp_stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+ void (*dp_audio_enable)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*dp_audio_disable)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*read_state)(
+ struct hpo_dp_stream_encoder *enc,
+ struct hpo_dp_stream_encoder_state *state);
+};
+#endif
+
#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 03f47f23fb65..7390baf916b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -100,6 +100,9 @@ enum crc_selection {
enum otg_out_mux_dest {
OUT_MUX_DIO = 0,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ OUT_MUX_HPO_DP = 2,
+#endif
};
enum h_timing_div_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index ad5f2adcc40d..d50f4bd06b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -236,7 +236,7 @@ struct hw_sequencer_funcs {
const struct tg_color *solid_color,
int width, int height, int offset);
- void (*z10_restore)(struct dc *dc);
+ void (*z10_restore)(const struct dc *dc);
void (*z10_save_init)(struct dc *dc);
void (*update_visual_confirm_color)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index f7f7e4fff0c2..f324285394be 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -41,6 +41,9 @@ struct dce_hwseq_wa {
bool DEGVIDCN10_254;
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool dp_hpo_and_otg_sequence;
+#endif
};
struct hwseq_wa_state {
@@ -151,6 +154,10 @@ struct dce_hwseq {
struct hwseq_wa_state wa_state;
struct hwseq_private_funcs funcs;
+ PHYSICAL_ADDRESS_LOC fb_base;
+ PHYSICAL_ADDRESS_LOC fb_top;
+ PHYSICAL_ADDRESS_LOC fb_offset;
+ PHYSICAL_ADDRESS_LOC uma_top;
};
#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index 883dd8733ea4..10dcf6a5e9b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -70,22 +70,36 @@ void link_enc_cfg_link_enc_unassign(
* endpoint.
*/
bool link_enc_cfg_is_transmitter_mappable(
- struct dc_state *state,
+ struct dc *dc,
struct link_encoder *link_enc);
+/* Return stream using DIG link encoder resource. NULL if unused. */
+struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc(
+ struct dc *dc,
+ enum engine_id eng_id);
+
/* Return link using DIG link encoder resource. NULL if unused. */
struct dc_link *link_enc_cfg_get_link_using_link_enc(
- struct dc_state *state,
+ struct dc *dc,
enum engine_id eng_id);
/* Return DIG link encoder used by link. NULL if unused. */
struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
- struct dc_state *state,
+ struct dc *dc,
const struct dc_link *link);
/* Return next available DIG link encoder. NULL if none available. */
-struct link_encoder *link_enc_cfg_get_next_avail_link_enc(
- const struct dc *dc,
- const struct dc_state *state);
+struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc);
+
+/* Return DIG link encoder used by stream. NULL if unused. */
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
+ struct dc *dc,
+ const struct dc_stream_state *stream);
+
+/* Return true if encoder available to use. */
+bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link);
+
+/* Returns true if encoder assignments in supplied state pass validity checks. */
+bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state);
#endif /* DC_INC_LINK_ENC_CFG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index fc1d289bb9fe..ba664bc49595 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -37,6 +37,7 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
void edp_add_delay_for_T9(struct dc_link *link);
bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index fe1e5833c96a..372c0898facd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -49,6 +49,11 @@ struct resource_caps {
int num_vmid;
int num_dsc;
unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output).
+ unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters).
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ int num_hpo_dp_stream_encoder;
+ int num_hpo_dp_link_encoder;
+#endif
int num_mpc_3dlut;
};
@@ -68,6 +73,15 @@ struct resource_create_funcs {
struct stream_encoder *(*create_stream_encoder)(
enum engine_id eng_id, struct dc_context *ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)(
+ enum engine_id eng_id, struct dc_context *ctx);
+
+ struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)(
+ uint8_t inst,
+ struct dc_context *ctx);
+#endif
+
struct dce_hwseq *(*create_hwseq)(
struct dc_context *ctx);
};
@@ -187,4 +201,9 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);
int get_num_odm_splits(struct pipe_ctx *pipe);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
+ const struct resource_pool *pool);
+#endif
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 0d09181227c5..fd739aecf104 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -93,6 +93,16 @@ IRQ_DCN21 = irq_service_dcn21.o
AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN21)
+
+###############################################################################
+# DCN 201
+###############################################################################
+IRQ_DCN201 = irq_service_dcn201.o
+
+AMD_DAL_IRQ_DCN201 = $(addprefix $(AMDDALPATH)/dc/irq/dcn201/,$(IRQ_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN201)
+
###############################################################################
# DCN 30
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index c4b067d01895..9ccafe007b23 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -132,6 +132,31 @@ enum dc_irq_source to_dal_irq_source_dcn20(
}
}
+uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source)
+{
+ const struct irq_source_info *info;
+ uint32_t addr;
+ uint32_t value;
+ uint32_t current_status;
+
+ info = find_irq_source_info(irq_service, source);
+ if (!info)
+ return 0;
+
+ addr = info->status_reg;
+ if (!addr)
+ return 0;
+
+ value = dm_read_reg(irq_service->ctx, addr);
+ current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE);
+
+ return current_status;
+}
+
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
index aee4b37999f1..4d69ab24ca25 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
@@ -31,4 +31,6 @@
struct irq_service *dal_irq_service_dcn20_create(
struct irq_service_init_data *init_data);
+uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
new file mode 100644
index 000000000000..a47f68634fc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+#include "dcn/dcn_2_0_3_offset.h"
+#include "dcn/dcn_2_0_3_sh_mask.h"
+
+#include "cyan_skillfish_ip_offset.h"
+#include "soc15_hw_ip.h"
+
+#include "irq_service_dcn201.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn201(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ return DC_IRQ_SOURCE_INVALID;
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
+ }
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vline0_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vline0_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn201[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ vline0_int_entry(0),
+ vline0_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn201 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn201
+};
+
+static void dcn201_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn201;
+ irq_service->funcs = &irq_service_funcs_dcn201;
+}
+
+struct irq_service *dal_irq_service_dcn201_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn201_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
new file mode 100644
index 000000000000..8e27c5e219a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCN201_H__
+#define __DAL_IRQ_SERVICE_DCN201_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn201_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index ed54e1c819be..78940cb20e10 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -135,6 +135,31 @@ enum dc_irq_source to_dal_irq_source_dcn21(
return DC_IRQ_SOURCE_INVALID;
}
+uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source)
+{
+ const struct irq_source_info *info;
+ uint32_t addr;
+ uint32_t value;
+ uint32_t current_status;
+
+ info = find_irq_source_info(irq_service, source);
+ if (!info)
+ return 0;
+
+ addr = info->status_reg;
+ if (!addr)
+ return 0;
+
+ value = dm_read_reg(irq_service->ctx, addr);
+ current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE);
+
+ return current_status;
+}
+
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
index da2bd0e93d7a..616470e32380 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
@@ -31,4 +31,6 @@
struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data);
+uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index a2a4fbeb83f8..4db1133e4466 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-static const struct irq_source_info *find_irq_source_info(
+const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index dbfcb096eedd..e60b82480093 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -69,6 +69,10 @@ struct irq_service {
const struct irq_service_funcs *funcs;
};
+const struct irq_source_info *find_irq_source_info(
+ struct irq_service *irq_service,
+ enum dc_irq_source source);
+
void dal_irq_service_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data);
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index f50cae252de4..5df1d80c8341 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -31,10 +31,12 @@
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <asm/byteorder.h>
#include <drm/drm_print.h>
+#include <drm/drm_dp_helper.h>
#include "cgs_common.h"
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 1053b165c139..1e39aae6b1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -69,9 +69,11 @@ static void virtual_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc) {}
static void virtual_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc) {}
static void virtual_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param) {}
@@ -102,7 +104,8 @@ static void virtual_setup_stereo_sync(
static void virtual_stream_encoder_set_dsc_pps_info_packet(
struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps)
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
{}
static const struct stream_encoder_funcs virtual_str_enc_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index caf961bb633f..717c0e572d2f 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -84,6 +84,7 @@ enum dmub_status {
DMUB_STATUS_QUEUE_FULL,
DMUB_STATUS_TIMEOUT,
DMUB_STATUS_INVALID,
+ DMUB_STATUS_HW_FAILURE,
};
/* enum dmub_asic - dmub asic identifier */
@@ -96,6 +97,7 @@ enum dmub_asic {
DMUB_ASIC_DCN302,
DMUB_ASIC_DCN303,
DMUB_ASIC_DCN31,
+ DMUB_ASIC_DCN31B,
DMUB_ASIC_MAX,
};
@@ -118,6 +120,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_AUX_REPLY,
DMUB_NOTIFICATION_HPD,
DMUB_NOTIFICATION_HPD_IRQ,
+ DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_MAX
};
@@ -235,6 +238,8 @@ struct dmub_srv_hw_params {
bool load_inst_const;
bool skip_panel_power_sequence;
bool disable_z10;
+ bool dpia_supported;
+ bool disable_dpia;
};
/**
@@ -358,6 +363,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_current_time)(struct dmub_srv *dmub);
void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+
+ bool (*should_detect)(struct dmub_srv *dmub);
};
/**
@@ -437,6 +444,7 @@ struct dmub_notification {
union {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
+ enum set_config_status sc_status;
};
};
@@ -724,6 +732,8 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_srv_should_detect(struct dmub_srv *dmub);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 7efe9ba8706e..0293c58f0701 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -46,10 +46,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc
+#define DMUB_FW_VERSION_GIT_HASH 0x9525efb5
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 79
+#define DMUB_FW_VERSION_REVISION 90
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -368,10 +368,15 @@ union dmub_fw_boot_options {
uint32_t disable_clk_gate: 1; /**< 1 if clock gating should be disabled */
uint32_t skip_phy_init_panel_sequence: 1; /**< 1 to skip panel init seq */
uint32_t z10_disable: 1; /**< 1 to disable z10 */
- uint32_t reserved2: 1; /**< reserved for an unreleased feature */
- uint32_t reserved_unreleased1: 1; /**< reserved for an unreleased feature */
+ uint32_t enable_dpia: 1; /**< 1 if DPIA should be enabled */
uint32_t invalid_vbios_data: 1; /**< 1 if VBIOS data table is invalid */
- uint32_t reserved : 23; /**< reserved */
+ uint32_t dpia_supported: 1; /**< 1 if DPIA is supported on this platform */
+ uint32_t sel_mux_phy_c_d_phy_f_g: 1; /**< 1 if PHYF/PHYG should be enabled */
+ /**< 1 if all root clock gating is enabled and low power memory is enabled*/
+ uint32_t power_optimization: 1;
+ uint32_t diag_env: 1; /* 1 if diagnostic environment */
+
+ uint32_t reserved : 19; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -653,6 +658,10 @@ enum dmub_cmd_type {
*/
DMUB_CMD__PANEL_CNTL = 74,
/**
+ * Command type used for interfacing with DPIA.
+ */
+ DMUB_CMD__DPIA = 77,
+ /**
* Command type used for EDID CEA parsing
*/
DMUB_CMD__EDID_CEA = 79,
@@ -674,6 +683,21 @@ enum dmub_out_cmd_type {
* Command type used for DP AUX Reply data notification
*/
DMUB_OUT_CMD__DP_AUX_REPLY = 1,
+ /**
+ * Command type used for DP HPD event notification
+ */
+ DMUB_OUT_CMD__DP_HPD_NOTIFY = 2,
+ /**
+ * Command type used for SET_CONFIG Reply notification
+ */
+ DMUB_OUT_CMD__SET_CONFIG_REPLY = 3,
+};
+
+/* DMUB_CMD__DPIA command sub-types. */
+enum dmub_cmd_dpia_type {
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0,
+ DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1,
+ DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
#pragma pack(push, 1)
@@ -973,7 +997,7 @@ struct dmub_dig_transmitter_control_data_v1_7 {
uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */
uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
- uint8_t reserved0; /**< For future use */
+ uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */
uint8_t reserved1; /**< For future use */
uint8_t reserved2[3]; /**< For future use */
uint32_t reserved3[11]; /**< For future use */
@@ -996,6 +1020,77 @@ struct dmub_rb_cmd_dig1_transmitter_control {
};
/**
+ * DPIA tunnel command parameters.
+ */
+struct dmub_cmd_dig_dpia_control_data {
+ uint8_t enc_id; /** 0 = ENGINE_ID_DIGA, ... */
+ uint8_t action; /** ATOM_TRANSMITER_ACTION_DISABLE/ENABLE/SETUP_VSEMPH */
+ union {
+ uint8_t digmode; /** enum atom_encode_mode_def */
+ uint8_t dplaneset; /** DP voltage swing and pre-emphasis value */
+ } mode_laneset;
+ uint8_t lanenum; /** Lane number 1, 2, 4, 8 */
+ uint32_t symclk_10khz; /** Symbol Clock in 10Khz */
+ uint8_t hpdsel; /** =0: HPD is not assigned */
+ uint8_t digfe_sel; /** DIG stream( front-end ) selection, bit0 - DIG0 FE */
+ uint8_t dpia_id; /** Index of DPIA */
+ uint8_t fec_rdy : 1;
+ uint8_t reserved : 7;
+ uint32_t reserved1;
+};
+
+/**
+ * DMUB command for DPIA tunnel control.
+ */
+struct dmub_rb_cmd_dig1_dpia_control {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_dig_dpia_control_data dpia_control;
+};
+
+/**
+ * SET_CONFIG Command Payload
+ */
+struct set_config_cmd_payload {
+ uint8_t msg_type; /* set config message type */
+ uint8_t msg_data; /* set config message data */
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ */
+struct dmub_cmd_set_config_control_data {
+ struct set_config_cmd_payload cmd_pkt;
+ uint8_t instance; /* DPIA instance */
+ uint8_t immed_status; /* Immediate status returned in case of error */
+};
+
+/**
+ * DMUB command structure for SET_CONFIG command.
+ */
+struct dmub_rb_cmd_set_config_access {
+ struct dmub_cmd_header header; /* header */
+ struct dmub_cmd_set_config_control_data set_config_control; /* set config data */
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
+ */
+struct dmub_cmd_mst_alloc_slots_control_data {
+ uint8_t mst_alloc_slots; /* mst slots to be allotted */
+ uint8_t instance; /* DPIA instance */
+ uint8_t immed_status; /* Immediate status returned as there is no outbox msg posted */
+ uint8_t mst_slots_in_use; /* returns slots in use for error cases */
+};
+
+/**
+ * DMUB command structure for SET_ command.
+ */
+struct dmub_rb_cmd_set_mst_alloc_slots {
+ struct dmub_cmd_header header; /* header */
+ struct dmub_cmd_mst_alloc_slots_control_data mst_slots_control; /* mst slots control */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -1242,6 +1337,33 @@ struct dmub_rb_cmd_dp_hpd_notify {
struct dp_hpd_data hpd_data;
};
+/**
+ * Definition of a SET_CONFIG reply from DPOA.
+ */
+enum set_config_status {
+ SET_CONFIG_PENDING = 0,
+ SET_CONFIG_ACK_RECEIVED,
+ SET_CONFIG_RX_TIMEOUT,
+ SET_CONFIG_UNKNOWN_ERROR,
+};
+
+/**
+ * Definition of a set_config reply
+ */
+struct set_config_reply_control_data {
+ uint8_t instance; /* DPIA Instance */
+ uint8_t status; /* Set Config reply */
+ uint16_t pad; /* Alignment */
+};
+
+/**
+ * Definition of a DMUB_OUT_CMD__SET_CONFIG_REPLY command.
+ */
+struct dmub_rb_cmd_dp_set_config_reply {
+ struct dmub_cmd_header header;
+ struct set_config_reply_control_data set_config_reply_control;
+};
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -1280,6 +1402,10 @@ enum dmub_cmd_psr_type {
* Forces PSR enabled until an explicit PSR disable call.
*/
DMUB_CMD__PSR_FORCE_STATIC = 5,
+ /**
+ * Set PSR power option
+ */
+ DMUB_CMD__SET_PSR_POWER_OPT = 7,
};
/**
@@ -1578,6 +1704,44 @@ struct dmub_rb_cmd_psr_force_static {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+struct dmub_cmd_psr_set_power_opt_data {
+ /**
+ * PSR control version.
+ */
+ uint8_t cmd_version;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
+ /**
+ * PSR power option
+ */
+ uint32_t power_opt;
+};
+
+/**
+ * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+struct dmub_rb_cmd_psr_set_power_opt {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+ struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -1730,6 +1894,11 @@ enum dmub_cmd_abm_type {
* Enable/disable fractional duty cycle for backlight PWM.
*/
DMUB_CMD__ABM_SET_PWM_FRAC = 5,
+
+ /**
+ * unregister vertical interrupt after steady state is reached
+ */
+ DMUB_CMD__ABM_PAUSE = 6,
};
/**
@@ -2086,6 +2255,50 @@ struct dmub_rb_cmd_abm_init_config {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__ABM_PAUSE command.
+ */
+
+struct dmub_cmd_abm_pause_data {
+
+ /**
+ * Panel Control HW instance mask.
+ * Bit 0 is Panel Control HW instance 0.
+ * Bit 1 is Panel Control HW instance 1.
+ */
+ uint8_t panel_mask;
+
+ /**
+ * OTG hw instance
+ */
+ uint8_t otg_inst;
+
+ /**
+ * Enable or disable ABM pause
+ */
+ uint8_t enable;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[1];
+};
+
+/**
+ * Definition of a DMUB_CMD__ABM_PAUSE command.
+ */
+struct dmub_rb_cmd_abm_pause {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__ABM_PAUSE command.
+ */
+ struct dmub_cmd_abm_pause_data abm_pause_data;
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_cmd_query_feature_caps_data {
@@ -2312,6 +2525,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_psr_force_static psr_force_static;
/**
+ * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+ struct dmub_rb_cmd_psr_set_power_opt psr_set_power_opt;
+ /**
* Definition of a DMUB_CMD__PLAT_54186_WA command.
*/
struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
@@ -2364,6 +2581,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_init_config abm_init_config;
/**
+ * Definition of a DMUB_CMD__ABM_PAUSE command.
+ */
+ struct dmub_rb_cmd_abm_pause abm_pause;
+
+ /**
* Definition of a DMUB_CMD__DP_AUX_ACCESS command.
*/
struct dmub_rb_cmd_dp_aux_access dp_aux_access;
@@ -2383,6 +2605,18 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_lvtma_control lvtma_control;
/**
+ * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
+ */
+ struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
+ /**
+ * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ */
+ struct dmub_rb_cmd_set_config_access set_config_access;
+ /**
+ * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
+ */
+ struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots;
+ /**
* Definition of a DMUB_CMD__EDID_CEA command.
*/
struct dmub_rb_cmd_edid_cea edid_cea;
@@ -2404,6 +2638,10 @@ union dmub_rb_out_cmd {
* HPD notify command.
*/
struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify;
+ /**
+ * SET_CONFIG reply command.
+ */
+ struct dmub_rb_cmd_dp_set_config_reply set_config_reply;
};
#pragma pack(pop)
@@ -2484,14 +2722,16 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
- const uint8_t *src = (const uint8_t *)cmd;
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t i;
if (dmub_rb_full(rb))
return false;
// copying data
- dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -2600,14 +2840,16 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,
static inline bool dmub_rb_out_front(struct dmub_rb *rb,
union dmub_rb_out_cmd *cmd)
{
- const uint8_t *src = (const uint8_t *)(rb->base_address) + rb->rptr;
- uint8_t *dst = (uint8_t *)cmd;
+ const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+ uint64_t *dst = (uint64_t *)cmd;
+ uint8_t i;
if (dmub_rb_empty(rb))
return false;
// copying data
- dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
return true;
}
@@ -2642,14 +2884,17 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
*/
static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
{
- uint8_t buf[DMUB_RB_CMD_SIZE];
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- const uint8_t *data = (const uint8_t *)rb->base_address + rptr;
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ uint8_t i;
- dmub_memcpy(buf, data, DMUB_RB_CMD_SIZE);
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
rptr += DMUB_RB_CMD_SIZE;
if (rptr >= rb->capacity)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index fc667cb17eb0..10ebf20eaa41 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -338,6 +338,10 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
union dmub_fw_boot_options boot_options = {0};
boot_options.bits.z10_disable = params->disable_z10;
+ boot_options.bits.dpia_supported = params->dpia_supported;
+ boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
+
+ boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -432,3 +436,11 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
}
+
+bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
+{
+ uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0);
+ bool should_detect = fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED;
+ return should_detect;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index bb62605d2ac8..59ddc81b5a0e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -245,4 +245,6 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_dcn31_should_detect(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN31_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 75a91cfaf036..56d400ffa7ac 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -208,6 +208,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
break;
case DMUB_ASIC_DCN31:
+ case DMUB_ASIC_DCN31B:
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
@@ -234,7 +235,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
-
+ funcs->should_detect = dmub_dcn31_should_detect;
funcs->get_current_time = dmub_dcn31_get_current_time;
break;
@@ -655,13 +656,19 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i;
+ uint32_t i, rptr;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
for (i = 0; i <= timeout_us; ++i) {
- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1_rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ dmub->inbox1_rb.rptr = rptr;
+
if (dmub_rb_empty(&dmub->inbox1_rb))
return DMUB_STATUS_OK;
@@ -816,3 +823,11 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_
dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
return true;
}
+
+bool dmub_srv_should_detect(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init || !dmub->hw_funcs.should_detect)
+ return false;
+
+ return dmub->hw_funcs.should_detect(dmub);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 70766d534c9c..44502ec919a2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -76,6 +76,22 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
break;
+ case DMUB_OUT_CMD__DP_HPD_NOTIFY:
+ if (cmd.dp_hpd_notify.hpd_data.hpd_type == DP_HPD) {
+ notify->type = DMUB_NOTIFICATION_HPD;
+ notify->hpd_status = cmd.dp_hpd_notify.hpd_data.hpd_status;
+ } else {
+ notify->type = DMUB_NOTIFICATION_HPD_IRQ;
+ }
+
+ notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->result = AUX_RET_SUCCESS;
+ break;
+ case DMUB_OUT_CMD__SET_CONFIG_REPLY:
+ notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
+ notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 76a87b682883..b8ffb216ebc4 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -152,6 +152,10 @@ struct bp_transmitter_control {
enum signal_type signal;
enum dc_color_depth color_depth; /* not used for DCE6.0 */
enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum tx_ffe_id txffe_sel; /* used for DCN3 */
+ enum engine_id hpo_engine_id; /* used for DCN3 */
+#endif
struct graphics_object_id connector_obj_id;
/* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
* be pixel clock * deep_color_ratio (in KHz)
@@ -319,6 +323,10 @@ struct bp_encoder_cap_info {
uint32_t DP_HBR2_EN:1;
uint32_t DP_HBR3_EN:1;
uint32_t HDMI_6GB_EN:1;
+ uint32_t IS_DP2_CAPABLE:1;
+ uint32_t DP_UHBR10_EN:1;
+ uint32_t DP_UHBR13_5_EN:1;
+ uint32_t DP_UHBR20_EN:1;
uint32_t DP_IS_USB_C:1;
uint32_t RESERVED:27;
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5adc471bef57..e4a2dfacab4c 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -211,6 +211,7 @@ enum {
#ifndef ASICREV_IS_GREEN_SARDINE
#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
#endif
+#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define VANGOGH_A0 0x01
@@ -227,7 +228,7 @@ enum {
#define FAMILY_YELLOW_CARP 146
#define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x1A
+#define YELLOW_CARP_B0 0x20
#define YELLOW_CARP_UNKNOWN 0xFF
#ifndef ASICREV_IS_YELLOW_CARP
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fe75ec834892..012b7c61798c 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -50,6 +50,7 @@ enum dce_version {
DCN_VERSION_1_0,
DCN_VERSION_1_01,
DCN_VERSION_2_0,
+ DCN_VERSION_2_01,
DCN_VERSION_2_1,
DCN_VERSION_3_0,
DCN_VERSION_3_01,
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index aec7389aff37..ffd0df1701e6 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -80,6 +80,15 @@ enum dpcd_phy_test_patterns {
PHY_TEST_PATTERN_CP2520_1,
PHY_TEST_PATTERN_CP2520_2,
PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */
+ PHY_TEST_PATTERN_128b_132b_TPS1 = 0x8,
+ PHY_TEST_PATTERN_128b_132b_TPS2 = 0x10,
+ PHY_TEST_PATTERN_PRBS9 = 0x18,
+ PHY_TEST_PATTERN_PRBS11 = 0x20,
+ PHY_TEST_PATTERN_PRBS15 = 0x28,
+ PHY_TEST_PATTERN_PRBS23 = 0x30,
+ PHY_TEST_PATTERN_PRBS31 = 0x38,
+ PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40,
+ PHY_TEST_PATTERN_SQUARE_PULSE = 0x48,
};
enum dpcd_test_dyn_range {
@@ -135,7 +144,14 @@ enum dpcd_training_patterns {
DPCD_TRAINING_PATTERN_1,
DPCD_TRAINING_PATTERN_2,
DPCD_TRAINING_PATTERN_3,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DPCD_TRAINING_PATTERN_4 = 7,
+ DPCD_128b_132b_TPS1 = 1,
+ DPCD_128b_132b_TPS2 = 2,
+ DPCD_128b_132b_TPS2_CDS = 3,
+#else
DPCD_TRAINING_PATTERN_4 = 7
+#endif
};
/* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
@@ -149,6 +165,7 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
+#define DP_SOURCE_SEQUENCE 0x30c
#define DP_SOURCE_TABLE_REVISION 0x310
#define DP_SOURCE_PAYLOAD_SIZE 0x311
#define DP_SOURCE_SINK_CAP 0x317
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 792652236c61..dd974c428d23 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -328,6 +328,7 @@ struct integrated_info {
uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID];
uint8_t checksum;
+ uint8_t fixdpvoltageswing;
} ext_disp_conn_info; /* exiting long long time */
struct available_s_clk_list {
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
index 58bb42ed85ca..84b299ff500a 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -140,6 +140,18 @@ enum sync_source {
SYNC_SOURCE_DUAL_GPU_PIN
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum tx_ffe_id {
+ TX_FFE0 = 0,
+ TX_FFE1,
+ TX_FFE2,
+ TX_FFE3,
+ TX_FFE_DeEmphasis_Only,
+ TX_FFE_PreShoot_Only,
+ TX_FFE_No_FFE,
+};
+#endif
+
/* connector sizes in millimeters - from BiosParserTypes.hpp */
#define CONNECTOR_SIZE_DVI 40
#define CONNECTOR_SIZE_VGA 32
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 33b3d755fe65..01775417cf4b 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -184,6 +184,14 @@ enum engine_id {
ENGINE_ID_DACA,
ENGINE_ID_DACB,
ENGINE_ID_VCE, /* wireless display pseudo-encoder */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ ENGINE_ID_HPO_0,
+ ENGINE_ID_HPO_1,
+ ENGINE_ID_HPO_DP_0,
+ ENGINE_ID_HPO_DP_1,
+ ENGINE_ID_HPO_DP_2,
+ ENGINE_ID_HPO_DP_3,
+#endif
ENGINE_ID_VIRTUAL,
ENGINE_ID_COUNT,
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
index c7fbb9c3ad6b..418fbf8c5c3a 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -41,6 +41,8 @@ struct aux_payload {
* reset it to read data */
bool write;
bool mot;
+ bool write_status_update;
+
uint32_t address;
uint32_t length;
uint8_t *data;
@@ -53,6 +55,7 @@ struct aux_payload {
* zero means "use default value"
*/
uint32_t defer_delay;
+
};
struct aux_command {
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 32f5274ed34e..424bccd36434 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -53,7 +53,11 @@ enum edp_revision {
};
enum {
- LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/
+ LINK_RATE_REF_FREQ_IN_KHZ = 27000, /*27MHz*/
+ BITS_PER_DP_BYTE = 10,
+ DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */
+ DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */
+ DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
};
enum link_training_result {
@@ -70,6 +74,12 @@ enum link_training_result {
LINK_TRAINING_LINK_LOSS,
/* Abort link training (because sink unplugged) */
LINK_TRAINING_ABORT,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_128b_132b_LT_FAILED,
+ DP_128b_132b_MAX_LOOP_COUNT_REACHED,
+ DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
+ DP_128b_132b_CDS_DONE_TIMEOUT,
+#endif
};
enum lttpr_mode {
@@ -80,21 +90,58 @@ enum lttpr_mode {
struct link_training_settings {
struct dc_link_settings link_settings;
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+ /* TODO: turn lane settings below into mandatory fields
+ * as initial lane configuration
+ */
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
bool should_set_fec_ready;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - factor lane_settings out because it changes during LT */
+ union dc_dp_ffe_preset *ffe_preset;
+#endif
uint16_t cr_pattern_time;
uint16_t eq_pattern_time;
+ uint16_t cds_pattern_time;
enum dc_dp_training_pattern pattern_for_cr;
enum dc_dp_training_pattern pattern_for_eq;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dc_dp_training_pattern pattern_for_cds;
+
+ uint32_t eq_wait_time_limit;
+ uint8_t eq_loop_count_limit;
+ uint32_t cds_wait_time_limit;
+#endif
bool enhanced_framing;
- bool allow_invalid_msa_timing_param;
enum lttpr_mode lttpr_mode;
+
+ /* disallow different lanes to have different lane settings */
+ bool disallow_per_lane_settings;
+ /* dpcd lane settings will always use the same hw lane settings
+ * even if it doesn't match requested lane adjust */
+ bool always_match_dpcd_with_hw_lane_settings;
+
+ /*****************************************************************
+ * training states - parameters that can change in link training
+ *****************************************************************/
+ /* TODO: Move hw_lane_settings and dpcd_lane_settings
+ * along with lane adjust, lane align, offset and all
+ * other training states into a new structure called
+ * training states, so link_training_settings becomes
+ * a constant input pre-decided prior to link training.
+ *
+ * The goal is to strictly decouple link training settings
+ * decision making process from link training states to
+ * prevent it from messy code practice of changing training
+ * decision on the fly.
+ */
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX];
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX];
};
/*TODO: Move this enum test harness*/
@@ -114,13 +161,30 @@ enum dp_test_pattern {
DP_TEST_PATTERN_CP2520_2,
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2,
DP_TEST_PATTERN_CP2520_3,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_TEST_PATTERN_128b_132b_TPS1,
+ DP_TEST_PATTERN_128b_132b_TPS2,
+ DP_TEST_PATTERN_PRBS9,
+ DP_TEST_PATTERN_PRBS11,
+ DP_TEST_PATTERN_PRBS15,
+ DP_TEST_PATTERN_PRBS23,
+ DP_TEST_PATTERN_PRBS31,
+ DP_TEST_PATTERN_264BIT_CUSTOM,
+ DP_TEST_PATTERN_SQUARE_PULSE,
+#endif
/* Link Training Patterns */
DP_TEST_PATTERN_TRAINING_PATTERN1,
DP_TEST_PATTERN_TRAINING_PATTERN2,
DP_TEST_PATTERN_TRAINING_PATTERN3,
DP_TEST_PATTERN_TRAINING_PATTERN4,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE,
+ DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE,
+ DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE,
+#else
DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4,
+#endif
/* link test patterns*/
DP_TEST_PATTERN_COLOR_SQUARES,
@@ -152,6 +216,22 @@ enum dp_panel_mode {
DP_PANEL_MODE_SPECIAL
};
+enum dpcd_source_sequence {
+ DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG = 1, /*done in apply_single_controller_ctx_to_hw */
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR, /*done in core_link_enable_stream */
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME, /*done in core_link_enable_stream/dcn20_enable_stream */
+ DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE, /*done in perform_link_training_with_retries/dcn20_enable_stream */
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY, /*done in dp_enable_link_phy */
+ DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN, /*done in dp_set_hw_test_pattern */
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM, /*done in dce110_enable_audio_stream */
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_unblank */
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_blank */
+ DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET, /*done in enc1_stream_encoder_dp_blank */
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM, /*done in dce110_disable_audio_stream */
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY, /*done in dp_disable_link_phy */
+ DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE, /*done in dce110_disable_stream */
+};
+
/* DPCD_ADDR_TRAINING_LANEx_SET registers value */
union dpcd_training_lane_set {
struct {
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 571fcf23cea9..370fad883e33 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -72,6 +72,9 @@
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#endif
struct dal_logger;
@@ -123,6 +126,9 @@ enum dc_log_type {
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ LOG_DP2,
+#endif
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index ef742d95ef05..64a38f08f497 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -54,12 +54,17 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
* just multiply with 2^gamma which can be computed once, and save the result so we
* recursively compute all the values.
*/
- /*sRGB 709 2.2 2.4 P3*/
-static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0};
-static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0};
-static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0};
-static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0};
-static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600};
+
+/*
+ * Regamma coefficients are used for both regamma and degamma. Degamma
+ * coefficients are calculated in our formula using the regamma coefficients.
+ */
+ /*sRGB 709 2.2 2.4 P3*/
+static const int32_t numerator01[] = { 31308, 180000, 0, 0, 0};
+static const int32_t numerator02[] = { 12920, 4500, 0, 0, 0};
+static const int32_t numerator03[] = { 55, 99, 0, 0, 0};
+static const int32_t numerator04[] = { 55, 99, 0, 0, 0};
+static const int32_t numerator05[] = { 2400, 2200, 2200, 2400, 2600};
/* one-time setup of X points */
void setup_x_points_distribution(void)
@@ -288,7 +293,8 @@ struct dividers {
};
-static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_transfer_func_predefined type)
+static bool build_coefficients(struct gamma_coefficients *coefficients,
+ enum dc_transfer_func_predefined type)
{
uint32_t i = 0;
@@ -312,15 +318,15 @@ static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_
do {
coefficients->a0[i] = dc_fixpt_from_fraction(
- gamma_numerator01[index], 10000000);
+ numerator01[index], 10000000);
coefficients->a1[i] = dc_fixpt_from_fraction(
- gamma_numerator02[index], 1000);
+ numerator02[index], 1000);
coefficients->a2[i] = dc_fixpt_from_fraction(
- gamma_numerator03[index], 1000);
+ numerator03[index], 1000);
coefficients->a3[i] = dc_fixpt_from_fraction(
- gamma_numerator04[index], 1000);
+ numerator04[index], 1000);
coefficients->user_gamma[i] = dc_fixpt_from_fraction(
- gamma_numerator05[index], 1000);
+ numerator05[index], 1000);
++i;
} while (i != ARRAY_SIZE(coefficients->a0));
@@ -1685,7 +1691,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
+ build_coefficients(&coeff, true);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b99aa232bd8b..bd1d1dc93629 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -155,9 +155,18 @@ static unsigned int calc_v_total_from_duration(
if (duration_in_us > vrr->max_duration_in_us)
duration_in_us = vrr->max_duration_in_us;
- v_total = div64_u64(div64_u64(((unsigned long long)(
- duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
- stream->timing.h_total), 1000);
+ if (dc_is_hdmi_signal(stream->signal)) {
+ uint32_t h_total_up_scaled;
+
+ h_total_up_scaled = stream->timing.h_total * 10000;
+ v_total = div_u64((unsigned long long)duration_in_us
+ * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
+ h_total_up_scaled);
+ } else {
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total), 1000);
+ }
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index e9bd84ec027d..be61975f1470 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -105,6 +105,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = remove_display_from_topology_v2(hdcp, index);
@@ -115,8 +116,6 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return status;
}
@@ -205,6 +204,7 @@ static enum mod_hdcp_status add_display_to_topology_v3(
dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = add_display_to_topology_v2(hdcp, display);
@@ -214,8 +214,6 @@ static enum mod_hdcp_status add_display_to_topology_v3(
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return status;
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f37101f5a777..6d648c889866 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -249,6 +249,8 @@ struct mod_hdcp_link {
uint8_t ddc_line;
uint8_t link_enc_idx;
uint8_t phy_idx;
+ uint8_t dio_output_type;
+ uint8_t dio_output_id;
uint8_t hdcp_supported_informational;
union {
struct mod_hdcp_displayport dp;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 257f280d3d53..f1a46d16f7ea 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -228,7 +228,7 @@ enum DC_FEATURE_MASK {
DC_FBC_MASK = (1 << 0), //0x1, disabled by default
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
- DC_PSR_MASK = (1 << 3), //0x8, disabled by default
+ DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
};
@@ -236,7 +236,8 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PIPE_SPLIT = 0x1,
DC_DISABLE_STUTTER = 0x2,
DC_DISABLE_DSC = 0x4,
- DC_DISABLE_CLOCK_GATING = 0x8
+ DC_DISABLE_CLOCK_GATING = 0x8,
+ DC_DISABLE_PSR = 0x10,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h
new file mode 100755
index 000000000000..c56ca9740933
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clk_11_0_1_OFFSET_HEADER
+#define _clk_11_0_1_OFFSET_HEADER
+
+#define mmCLK4_0_CLK4_CLK_PLL_REQ 0x460e
+#define mmCLK4_0_CLK4_CLK_PLL_REQ_BASE_IDX 0
+
+#define mmCLK4_0_CLK4_CLK2_CURRENT_CNT 0x467f
+#define mmCLK4_0_CLK4_CLK2_CURRENT_CNT_BASE_IDX 0
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h
new file mode 100755
index 000000000000..168fbf9fcd48
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _clk_11_0_1_SH_MASK_HEADER
+#define _clk_11_0_1_SH_MASK_HEADER
+
+//CLK4_0_CLK4_CLK_PLL_REQ
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK4_0_CLK4_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK4_0_CLK4_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+//CLK4_0_CLK4_CLK2_CURRENT_CNT
+#define CLK4_0_CLK4_CLK2_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0
+#define CLK4_0_CLK4_CLK2_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
new file mode 100755
index 000000000000..cae1a7e74323
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
@@ -0,0 +1,6193 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _dcn_2_0_3_OFFSET_HEADER
+#define _dcn_2_0_3_OFFSET_HEADER
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+// base address: 0x0
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmDP_DTO_DBUF_EN 0x0044
+#define mmDP_DTO_DBUF_EN_BASE_IDX 1
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG 0x0048
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmREFCLK_CNTL 0x0049
+#define mmREFCLK_CNTL_BASE_IDX 1
+#define mmREFCLK_CGTT_BLK_CTRL_REG 0x004b
+#define mmREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_PERFMON_CNTL2 0x004e
+#define mmDCCG_PERFMON_CNTL2_BASE_IDX 1
+#define mmDCCG_DS_DTO_INCR 0x0053
+#define mmDCCG_DS_DTO_INCR_BASE_IDX 1
+#define mmDCCG_DS_DTO_MODULO 0x0054
+#define mmDCCG_DS_DTO_MODULO_BASE_IDX 1
+#define mmDCCG_DS_CNTL 0x0055
+#define mmDCCG_DS_CNTL_BASE_IDX 1
+#define mmDCCG_DS_HW_CAL_INTERVAL 0x0056
+#define mmDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1
+#define mmDPREFCLK_CNTL 0x0058
+#define mmDPREFCLK_CNTL_BASE_IDX 1
+#define mmDCE_VERSION 0x005e
+#define mmDCE_VERSION_BASE_IDX 1
+#define mmDCCG_GTC_CNTL 0x0060
+#define mmDCCG_GTC_CNTL_BASE_IDX 1
+#define mmDCCG_GTC_DTO_INCR 0x0061
+#define mmDCCG_GTC_DTO_INCR_BASE_IDX 1
+#define mmDCCG_GTC_DTO_MODULO 0x0062
+#define mmDCCG_GTC_DTO_MODULO_BASE_IDX 1
+#define mmDCCG_GTC_CURRENT 0x0063
+#define mmDCCG_GTC_CURRENT_BASE_IDX 1
+#define mmDSCCLK0_DTO_PARAM 0x006c
+#define mmDSCCLK0_DTO_PARAM_BASE_IDX 1
+#define mmMILLISECOND_TIME_BASE_DIV 0x0070
+#define mmMILLISECOND_TIME_BASE_DIV_BASE_IDX 1
+#define mmDISPCLK_FREQ_CHANGE_CNTL 0x0071
+#define mmDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1
+#define mmDCCG_PERFMON_CNTL 0x0073
+#define mmDCCG_PERFMON_CNTL_BASE_IDX 1
+#define mmDCCG_GATE_DISABLE_CNTL 0x0074
+#define mmDCCG_GATE_DISABLE_CNTL_BASE_IDX 1
+#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x0075
+#define mmDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmSOCCLK_CGTT_BLK_CTRL_REG 0x0076
+#define mmSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_CAC_STATUS 0x0077
+#define mmDCCG_CAC_STATUS_BASE_IDX 1
+#define mmMICROSECOND_TIME_BASE_DIV 0x007b
+#define mmMICROSECOND_TIME_BASE_DIV_BASE_IDX 1
+#define mmDCCG_GATE_DISABLE_CNTL2 0x007c
+#define mmDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1
+#define mmSYMCLK_CGTT_BLK_CTRL_REG 0x007d
+#define mmSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_DISP_CNTL_REG 0x007f
+#define mmDCCG_DISP_CNTL_REG_BASE_IDX 1
+#define mmOTG0_PIXEL_RATE_CNTL 0x0080
+#define mmOTG0_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO0_PHASE 0x0081
+#define mmDP_DTO0_PHASE_BASE_IDX 1
+#define mmDP_DTO0_MODULO 0x0082
+#define mmDP_DTO0_MODULO_BASE_IDX 1
+#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083
+#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmOTG1_PIXEL_RATE_CNTL 0x0084
+#define mmOTG1_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO1_PHASE 0x0085
+#define mmDP_DTO1_PHASE_BASE_IDX 1
+#define mmDP_DTO1_MODULO 0x0086
+#define mmDP_DTO1_MODULO_BASE_IDX 1
+#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087
+#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDPPCLK_CGTT_BLK_CTRL_REG 0x0098
+#define mmDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDPPCLK0_DTO_PARAM 0x0099
+#define mmDPPCLK0_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK1_DTO_PARAM 0x009a
+#define mmDPPCLK1_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK2_DTO_PARAM 0x009b
+#define mmDPPCLK2_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK3_DTO_PARAM 0x009c
+#define mmDPPCLK3_DTO_PARAM_BASE_IDX 1
+#define mmDCCG_CAC_STATUS2 0x009f
+#define mmDCCG_CAC_STATUS2_BASE_IDX 1
+#define mmSYMCLKA_CLOCK_ENABLE 0x00a0
+#define mmSYMCLKA_CLOCK_ENABLE_BASE_IDX 1
+#define mmSYMCLKB_CLOCK_ENABLE 0x00a1
+#define mmSYMCLKB_CLOCK_ENABLE_BASE_IDX 1
+#define mmDCCG_SOFT_RESET 0x00a6
+#define mmDCCG_SOFT_RESET_BASE_IDX 1
+#define mmDSCCLK_DTO_CTRL 0x00a7
+#define mmDSCCLK_DTO_CTRL_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO_SOURCE 0x00ab
+#define mmDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO0_PHASE 0x00ac
+#define mmDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO0_MODULE 0x00ad
+#define mmDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO1_PHASE 0x00ae
+#define mmDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO1_MODULE 0x00af
+#define mmDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0
+#define mmDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1
+#define mmDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2
+#define mmDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3
+#define mmDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4
+#define mmDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5
+#define mmDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1
+#define mmDPPCLK_DTO_CTRL 0x00b6
+#define mmDPPCLK_DTO_CTRL_BASE_IDX 1
+#define mmDCCG_VSYNC_CNT_CTRL 0x00b8
+#define mmDCCG_VSYNC_CNT_CTRL_BASE_IDX 1
+#define mmDCCG_VSYNC_CNT_INT_CTRL 0x00b9
+#define mmDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+// base address: 0x0
+#define mmDENTIST_DISPCLK_CNTL 0x0064
+#define mmDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+// base address: 0x0
+#define mmRBBMIF_TIMEOUT 0x005b
+#define mmRBBMIF_TIMEOUT_BASE_IDX 2
+#define mmRBBMIF_STATUS 0x005c
+#define mmRBBMIF_STATUS_BASE_IDX 2
+#define mmRBBMIF_STATUS_2 0x005d
+#define mmRBBMIF_STATUS_2_BASE_IDX 2
+#define mmRBBMIF_INT_STATUS 0x005e
+#define mmRBBMIF_INT_STATUS_BASE_IDX 2
+#define mmRBBMIF_TIMEOUT_DIS 0x005f
+#define mmRBBMIF_TIMEOUT_DIS_BASE_IDX 2
+#define mmRBBMIF_TIMEOUT_DIS_2 0x0060
+#define mmRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2
+#define mmRBBMIF_STATUS_FLAG 0x0061
+#define mmRBBMIF_STATUS_FLAG_BASE_IDX 2
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+// base address: 0x0
+#define mmAZ_CLOCK_CNTL 0x0372
+#define mmAZ_CLOCK_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+// base address: 0x0
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+// base address: 0x18
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+// base address: 0x0
+#define mmAZALIA_CONTROLLER_CLOCK_GATING 0x03c2
+#define mmAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2
+#define mmAZALIA_AUDIO_DTO 0x03c3
+#define mmAZALIA_AUDIO_DTO_BASE_IDX 2
+#define mmAZALIA_AUDIO_DTO_CONTROL 0x03c4
+#define mmAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2
+#define mmAZALIA_SOCCLK_CONTROL 0x03c5
+#define mmAZALIA_SOCCLK_CONTROL_BASE_IDX 2
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2
+#define mmAZALIA_DATA_DMA_CONTROL 0x03c7
+#define mmAZALIA_DATA_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_BDL_DMA_CONTROL 0x03c8
+#define mmAZALIA_BDL_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_RIRB_AND_DP_CONTROL 0x03c9
+#define mmAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2
+#define mmAZALIA_CORB_DMA_CONTROL 0x03ca
+#define mmAZALIA_CORB_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x03d1
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER_BASE_IDX 2
+#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x03d2
+#define mmAZALIA_CYCLIC_BUFFER_SYNC_BASE_IDX 2
+#define mmAZALIA_GLOBAL_CAPABILITIES 0x03d3
+#define mmAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL0 0x03d9
+#define mmAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL1 0x03da
+#define mmAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL2 0x03db
+#define mmAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL3 0x03dc
+#define mmAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_RESULT 0x03dd
+#define mmAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL0 0x03de
+#define mmAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL1 0x03df
+#define mmAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL2 0x03e0
+#define mmAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL3 0x03e1
+#define mmAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_RESULT 0x03e2
+#define mmAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2
+#define mmAZALIA_MEM_PWR_CTRL 0x03ee
+#define mmAZALIA_MEM_PWR_CTRL_BASE_IDX 2
+#define mmAZALIA_MEM_PWR_STATUS 0x03ef
+#define mmAZALIA_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+// base address: 0x0
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2
+#define mmREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c
+#define mmREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d
+#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+// base address: 0x0
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+// base address: 0x10
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_RET_PATH_DCC_CFG 0x04cf
+#define mmDCHUBBUB_RET_PATH_DCC_CFG_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0 0x04d0
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1 0x04d1
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0 0x04d2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1 0x04d3
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0 0x04d4
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1 0x04d5
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0 0x04d6
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1 0x04d7
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0 0x04d8
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1 0x04d9
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0 0x04da
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1 0x04db
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0 0x04dc
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1 0x04dd
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0 0x04de
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1 0x04df
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04ef
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04f0
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_CRC_CTRL 0x04f1
+#define mmDCHUBBUB_CRC_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_CRC0_VAL_R_G 0x04f2
+#define mmDCHUBBUB_CRC0_VAL_R_G_BASE_IDX 2
+#define mmDCHUBBUB_CRC0_VAL_B_A 0x04f3
+#define mmDCHUBBUB_CRC0_VAL_B_A_BASE_IDX 2
+#define mmDCHUBBUB_CRC1_VAL_R_G 0x04f4
+#define mmDCHUBBUB_CRC1_VAL_R_G_BASE_IDX 2
+#define mmDCHUBBUB_CRC1_VAL_B_A 0x04f5
+#define mmDCHUBBUB_CRC1_VAL_B_A_BASE_IDX 2
+
+// addressBlock: dce_dc_dchubbub_hubbub_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x0505
+#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2
+#define mmDCHUBBUB_ARB_SAT_LEVEL 0x0506
+#define mmDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_QOS_FORCE 0x0507
+#define mmDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL 0x0508
+#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x0509
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A 0x050a
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A 0x050d
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x050e
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B 0x050f
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B 0x0512
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0513
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C 0x0514
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C 0x0517
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x0518
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D 0x0519
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D 0x051c
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x051d
+#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE 0x051e
+#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2
+#define mmDCHUBBUB_GLOBAL_TIMER_CNTL 0x051f
+#define mmDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2
+#define mmVTG0_CONTROL 0x0528
+#define mmVTG0_CONTROL_BASE_IDX 2
+#define mmVTG1_CONTROL 0x0529
+#define mmVTG1_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SOFT_RESET 0x052e
+#define mmDCHUBBUB_SOFT_RESET_BASE_IDX 2
+#define mmDCHUBBUB_CLOCK_CNTL 0x052f
+#define mmDCHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define mmDCFCLK_CNTL 0x0530
+#define mmDCFCLK_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_VLINE_SNAPSHOT 0x0533
+#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
+#define mmDCHUBBUB_CTRL_STATUS 0x0534
+#define mmDCHUBBUB_CTRL_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x053a
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x053b
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x053c
+#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053d
+#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053e
+#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmFMON_CTRL 0x0548
+#define mmFMON_CTRL_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+// base address: 0x0
+#define mmHUBP0_DCSURF_SURFACE_CONFIG 0x05e5
+#define mmHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_ADDR_CONFIG 0x05e6
+#define mmHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_TILING_CONFIG 0x05e7
+#define mmHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP0_DCHUBP_CNTL 0x05f3
+#define mmHUBP0_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP0_HUBP_CLK_CNTL 0x05f4
+#define mmHUBP0_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG_DB 0x05f6
+#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG 0x05f7
+#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fb
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fc
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+// base address: 0x0
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a
+#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x0620
+#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE 0x0621
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0622
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0623
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0624
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0625
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0626
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0627
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0628
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCN_EXPANSION_MODE 0x062c
+#define mmHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ0_DCN_TTU_QOS_WM 0x062d
+#define mmHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062e
+#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062f
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x0630
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x0631
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x0632
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x0633
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0634
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_BLANK_OFFSET_0 0x0646
+#define mmHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ0_BLANK_OFFSET_1 0x0647
+#define mmHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ0_DST_DIMENSIONS 0x0648
+#define mmHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ0_DST_AFTER_SCALER 0x0649
+#define mmHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ0_PREFETCH_SETTINGS 0x064a
+#define mmHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ0_PREFETCH_SETTINGS_C 0x064b
+#define mmHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_0 0x064c
+#define mmHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_1 0x064d
+#define mmHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_2 0x064e
+#define mmHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_3 0x064f
+#define mmHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_4 0x0650
+#define mmHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_0 0x0651
+#define mmHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_2 0x0653
+#define mmHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_4 0x0658
+#define mmHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_5 0x0659
+#define mmHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_6 0x065a
+#define mmHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_7 0x065b
+#define mmHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE 0x065c
+#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ0_PER_LINE_DELIVERY 0x065d
+#define mmHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ0_CURSOR_SETTINGS 0x065e
+#define mmHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065f
+#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x0660
+#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x0661
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x0662
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+// base address: 0x0
+#define mmHUBPRET0_HUBPRET_CONTROL 0x066a
+#define mmHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066b
+#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066c
+#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066d
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x066e
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE0 0x066f
+#define mmHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE1 0x0670
+#define mmHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_INTERRUPT 0x0671
+#define mmHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0672
+#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0673
+#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+// base address: 0x0
+#define mmCURSOR0_0_CURSOR_CONTROL 0x0678
+#define mmCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SIZE 0x067b
+#define mmCURSOR0_0_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_POSITION 0x067c
+#define mmCURSOR0_0_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_HOT_SPOT 0x067d
+#define mmCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e
+#define mmCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_DST_OFFSET 0x067f
+#define mmCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680
+#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681
+#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682
+#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683
+#define mmCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_CNTL 0x0684
+#define mmCURSOR0_0_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_QOS_CNTL 0x0685
+#define mmCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_STATUS 0x0686
+#define mmCURSOR0_0_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_SW_CNTL 0x0687
+#define mmCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_SW_DATA 0x0688
+#define mmCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+// base address: 0x370
+#define mmHUBP1_DCSURF_SURFACE_CONFIG 0x06c1
+#define mmHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_ADDR_CONFIG 0x06c2
+#define mmHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_TILING_CONFIG 0x06c3
+#define mmHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP1_DCHUBP_CNTL 0x06cf
+#define mmHUBP1_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP1_HUBP_CLK_CNTL 0x06d0
+#define mmHUBP1_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG_DB 0x06d2
+#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG 0x06d3
+#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d7
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06d8
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+// base address: 0x370
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6
+#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fc
+#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fd
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fe
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06ff
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x0700
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0701
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0702
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0703
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0704
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCN_EXPANSION_MODE 0x0708
+#define mmHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ1_DCN_TTU_QOS_WM 0x0709
+#define mmHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x070a
+#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x070b
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x070c
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x070d
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070e
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070f
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x0710
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x0711
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x0712
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_BLANK_OFFSET_0 0x0722
+#define mmHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ1_BLANK_OFFSET_1 0x0723
+#define mmHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ1_DST_DIMENSIONS 0x0724
+#define mmHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ1_DST_AFTER_SCALER 0x0725
+#define mmHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ1_PREFETCH_SETTINGS 0x0726
+#define mmHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ1_PREFETCH_SETTINGS_C 0x0727
+#define mmHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_0 0x0728
+#define mmHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_1 0x0729
+#define mmHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_2 0x072a
+#define mmHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_3 0x072b
+#define mmHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_4 0x072c
+#define mmHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_0 0x072d
+#define mmHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_2 0x072f
+#define mmHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_4 0x0734
+#define mmHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_5 0x0735
+#define mmHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_6 0x0736
+#define mmHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_7 0x0737
+#define mmHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0738
+#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ1_PER_LINE_DELIVERY 0x0739
+#define mmHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ1_CURSOR_SETTINGS 0x073a
+#define mmHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x073b
+#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x073c
+#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073d
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073e
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+// base address: 0x370
+#define mmHUBPRET1_HUBPRET_CONTROL 0x0746
+#define mmHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0747
+#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x0748
+#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x0749
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074a
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE0 0x074b
+#define mmHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE1 0x074c
+#define mmHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_INTERRUPT 0x074d
+#define mmHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE 0x074e
+#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS 0x074f
+#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+// base address: 0x370
+#define mmCURSOR0_1_CURSOR_CONTROL 0x0754
+#define mmCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SIZE 0x0757
+#define mmCURSOR0_1_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_POSITION 0x0758
+#define mmCURSOR0_1_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_HOT_SPOT 0x0759
+#define mmCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a
+#define mmCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_DST_OFFSET 0x075b
+#define mmCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c
+#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d
+#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e
+#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f
+#define mmCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_CNTL 0x0760
+#define mmCURSOR0_1_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_QOS_CNTL 0x0761
+#define mmCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_STATUS 0x0762
+#define mmCURSOR0_1_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_SW_CNTL 0x0763
+#define mmCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_SW_DATA 0x0764
+#define mmCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+// base address: 0x6e0
+#define mmHUBP2_DCSURF_SURFACE_CONFIG 0x079d
+#define mmHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_ADDR_CONFIG 0x079e
+#define mmHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_TILING_CONFIG 0x079f
+#define mmHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP2_DCHUBP_CNTL 0x07ab
+#define mmHUBP2_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP2_HUBP_CLK_CNTL 0x07ac
+#define mmHUBP2_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG_DB 0x07ae
+#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG 0x07af
+#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b3
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b4
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+// base address: 0x6e0
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2
+#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d8
+#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d9
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07da
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07db
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07dc
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dd
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07de
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07df
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07e0
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCN_EXPANSION_MODE 0x07e4
+#define mmHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ2_DCN_TTU_QOS_WM 0x07e5
+#define mmHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e6
+#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e7
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e8
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e9
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07ea
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07eb
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07ec
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07ed
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ee
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_BLANK_OFFSET_0 0x07fe
+#define mmHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ2_BLANK_OFFSET_1 0x07ff
+#define mmHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ2_DST_DIMENSIONS 0x0800
+#define mmHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ2_DST_AFTER_SCALER 0x0801
+#define mmHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ2_PREFETCH_SETTINGS 0x0802
+#define mmHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ2_PREFETCH_SETTINGS_C 0x0803
+#define mmHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_0 0x0804
+#define mmHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_1 0x0805
+#define mmHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_2 0x0806
+#define mmHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_3 0x0807
+#define mmHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_4 0x0808
+#define mmHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_0 0x0809
+#define mmHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_2 0x080b
+#define mmHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_4 0x0810
+#define mmHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_5 0x0811
+#define mmHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_6 0x0812
+#define mmHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_7 0x0813
+#define mmHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0814
+#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ2_PER_LINE_DELIVERY 0x0815
+#define mmHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ2_CURSOR_SETTINGS 0x0816
+#define mmHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0817
+#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0818
+#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0819
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x081a
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+// base address: 0x6e0
+#define mmHUBPRET2_HUBPRET_CONTROL 0x0822
+#define mmHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0823
+#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0824
+#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0825
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0826
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE0 0x0827
+#define mmHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE1 0x0828
+#define mmHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_INTERRUPT 0x0829
+#define mmHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082a
+#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082b
+#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+// base address: 0x6e0
+#define mmCURSOR0_2_CURSOR_CONTROL 0x0830
+#define mmCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SIZE 0x0833
+#define mmCURSOR0_2_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_POSITION 0x0834
+#define mmCURSOR0_2_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_HOT_SPOT 0x0835
+#define mmCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836
+#define mmCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_DST_OFFSET 0x0837
+#define mmCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838
+#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839
+#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a
+#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b
+#define mmCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_CNTL 0x083c
+#define mmCURSOR0_2_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_QOS_CNTL 0x083d
+#define mmCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_STATUS 0x083e
+#define mmCURSOR0_2_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_SW_CNTL 0x083f
+#define mmCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_SW_DATA 0x0840
+#define mmCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+// base address: 0xa50
+#define mmHUBP3_DCSURF_SURFACE_CONFIG 0x0879
+#define mmHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_ADDR_CONFIG 0x087a
+#define mmHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_TILING_CONFIG 0x087b
+#define mmHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP3_DCHUBP_CNTL 0x0887
+#define mmHUBP3_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP3_HUBP_CLK_CNTL 0x0888
+#define mmHUBP3_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG_DB 0x088a
+#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG 0x088b
+#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x088f
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0890
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+// base address: 0xa50
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae
+#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b4
+#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b5
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b6
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b7
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b8
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b9
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08ba
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08bb
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bc
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCN_EXPANSION_MODE 0x08c0
+#define mmHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ3_DCN_TTU_QOS_WM 0x08c1
+#define mmHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08c2
+#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08c3
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c4
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c5
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c6
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c7
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c8
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_BLANK_OFFSET_0 0x08da
+#define mmHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ3_BLANK_OFFSET_1 0x08db
+#define mmHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ3_DST_DIMENSIONS 0x08dc
+#define mmHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ3_DST_AFTER_SCALER 0x08dd
+#define mmHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ3_PREFETCH_SETTINGS 0x08de
+#define mmHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ3_PREFETCH_SETTINGS_C 0x08df
+#define mmHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_0 0x08e0
+#define mmHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_1 0x08e1
+#define mmHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_2 0x08e2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_3 0x08e3
+#define mmHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_4 0x08e4
+#define mmHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_0 0x08e5
+#define mmHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_2 0x08e7
+#define mmHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_4 0x08ec
+#define mmHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_5 0x08ed
+#define mmHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_6 0x08ee
+#define mmHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_7 0x08ef
+#define mmHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08f0
+#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ3_PER_LINE_DELIVERY 0x08f1
+#define mmHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ3_CURSOR_SETTINGS 0x08f2
+#define mmHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f3
+#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f4
+#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f5
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f6
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+// base address: 0xa50
+#define mmHUBPRET3_HUBPRET_CONTROL 0x08fe
+#define mmHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x08ff
+#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0900
+#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0901
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0902
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE0 0x0903
+#define mmHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE1 0x0904
+#define mmHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_INTERRUPT 0x0905
+#define mmHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0906
+#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0907
+#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+// base address: 0xa50
+#define mmCURSOR0_3_CURSOR_CONTROL 0x090c
+#define mmCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SIZE 0x090f
+#define mmCURSOR0_3_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_POSITION 0x0910
+#define mmCURSOR0_3_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_HOT_SPOT 0x0911
+#define mmCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912
+#define mmCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_DST_OFFSET 0x0913
+#define mmCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914
+#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915
+#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916
+#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917
+#define mmCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_CNTL 0x0918
+#define mmCURSOR0_3_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_QOS_CNTL 0x0919
+#define mmCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_STATUS 0x091a
+#define mmCURSOR0_3_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_SW_CNTL 0x091b
+#define mmCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_SW_DATA 0x091c
+#define mmCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+// base address: 0x0
+#define mmDPP_TOP0_DPP_CONTROL 0x0cc5
+#define mmDPP_TOP0_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP0_DPP_SOFT_RESET 0x0cc6
+#define mmDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_VAL_R_G 0x0cc7
+#define mmDPP_TOP0_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_VAL_B_A 0x0cc8
+#define mmDPP_TOP0_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_CTRL 0x0cc9
+#define mmDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP0_HOST_READ_CONTROL 0x0cca
+#define mmDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+// base address: 0x0
+#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf
+#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG0_FORMAT_CONTROL 0x0cd0
+#define mmCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1
+#define mmCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3
+#define mmCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4
+#define mmCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5
+#define mmCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6
+#define mmCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7
+#define mmCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8
+#define mmCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_RED 0x0cd9
+#define mmCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda
+#define mmCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb
+#define mmCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd
+#define mmCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+// base address: 0x0
+#define mmCNVC_CUR0_CURSOR0_CONTROL 0x0ce0
+#define mmCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_COLOR0 0x0ce1
+#define mmCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_COLOR1 0x0ce2
+#define mmCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0ce3
+#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+// base address: 0x0
+#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cea
+#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL0_SCL_COEF_RAM_TAP_DATA 0x0ceb
+#define mmDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL0_SCL_MODE 0x0cec
+#define mmDSCL0_SCL_MODE_BASE_IDX 2
+#define mmDSCL0_SCL_TAP_CONTROL 0x0ced
+#define mmDSCL0_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL0_DSCL_CONTROL 0x0cee
+#define mmDSCL0_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL0_DSCL_2TAP_CONTROL 0x0cef
+#define mmDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cf0
+#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0cf1
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT 0x0cf2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0cf3
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_C 0x0cf4
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0cf5
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT 0x0cf6
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0cf7
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0cf8
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_C 0x0cf9
+#define mmDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0cfa
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL0_SCL_BLACK_OFFSET 0x0cfb
+#define mmDSCL0_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL0_DSCL_UPDATE 0x0cfc
+#define mmDSCL0_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL0_DSCL_AUTOCAL 0x0cfd
+#define mmDSCL0_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0cfe
+#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0cff
+#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL0_OTG_H_BLANK 0x0d00
+#define mmDSCL0_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL0_OTG_V_BLANK 0x0d01
+#define mmDSCL0_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL0_RECOUT_START 0x0d02
+#define mmDSCL0_RECOUT_START_BASE_IDX 2
+#define mmDSCL0_RECOUT_SIZE 0x0d03
+#define mmDSCL0_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL0_MPC_SIZE 0x0d04
+#define mmDSCL0_MPC_SIZE_BASE_IDX 2
+#define mmDSCL0_LB_DATA_FORMAT 0x0d05
+#define mmDSCL0_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL0_LB_MEMORY_CTRL 0x0d06
+#define mmDSCL0_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL0_LB_V_COUNTER 0x0d07
+#define mmDSCL0_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL0_DSCL_MEM_PWR_CTRL 0x0d08
+#define mmDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL0_DSCL_MEM_PWR_STATUS 0x0d09
+#define mmDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL0_OBUF_CONTROL 0x0d0a
+#define mmDSCL0_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL0_OBUF_MEM_PWR_CTRL 0x0d0b
+#define mmDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+// base address: 0x0
+#define mmCM0_CM_CONTROL 0x0d1a
+#define mmCM0_CM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_ICSC_CONTROL 0x0d1b
+#define mmCM0_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM0_CM_ICSC_C11_C12 0x0d1c
+#define mmCM0_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM0_CM_ICSC_C13_C14 0x0d1d
+#define mmCM0_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM0_CM_ICSC_C21_C22 0x0d1e
+#define mmCM0_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM0_CM_ICSC_C23_C24 0x0d1f
+#define mmCM0_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM0_CM_ICSC_C31_C32 0x0d20
+#define mmCM0_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM0_CM_ICSC_C33_C34 0x0d21
+#define mmCM0_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C11_C12 0x0d22
+#define mmCM0_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C13_C14 0x0d23
+#define mmCM0_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C21_C22 0x0d24
+#define mmCM0_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C23_C24 0x0d25
+#define mmCM0_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C31_C32 0x0d26
+#define mmCM0_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C33_C34 0x0d27
+#define mmCM0_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_CONTROL 0x0d28
+#define mmCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C11_C12 0x0d29
+#define mmCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C13_C14 0x0d2a
+#define mmCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C21_C22 0x0d2b
+#define mmCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C23_C24 0x0d2c
+#define mmCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C31_C32 0x0d2d
+#define mmCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C33_C34 0x0d2e
+#define mmCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d2f
+#define mmCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d30
+#define mmCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d31
+#define mmCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d32
+#define mmCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d33
+#define mmCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d34
+#define mmCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM0_CM_BIAS_CR_R 0x0d35
+#define mmCM0_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM0_CM_BIAS_Y_G_CB_B 0x0d36
+#define mmCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_CONTROL 0x0d37
+#define mmCM0_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_INDEX 0x0d38
+#define mmCM0_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_DATA 0x0d39
+#define mmCM0_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK 0x0d3a
+#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_B 0x0d3b
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_G 0x0d3c
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_R 0x0d3d
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0d3e
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0d3f
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0d40
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B 0x0d41
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B 0x0d42
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G 0x0d43
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G 0x0d44
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R 0x0d45
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R 0x0d46
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_0_1 0x0d47
+#define mmCM0_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_2_3 0x0d48
+#define mmCM0_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_4_5 0x0d49
+#define mmCM0_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_6_7 0x0d4a
+#define mmCM0_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_8_9 0x0d4b
+#define mmCM0_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_10_11 0x0d4c
+#define mmCM0_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_12_13 0x0d4d
+#define mmCM0_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_14_15 0x0d4e
+#define mmCM0_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_B 0x0d4f
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_G 0x0d50
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_R 0x0d51
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0d52
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0d53
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0d54
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B 0x0d55
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B 0x0d56
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G 0x0d57
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G 0x0d58
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R 0x0d59
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R 0x0d5a
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_0_1 0x0d5b
+#define mmCM0_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_2_3 0x0d5c
+#define mmCM0_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_4_5 0x0d5d
+#define mmCM0_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_6_7 0x0d5e
+#define mmCM0_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_8_9 0x0d5f
+#define mmCM0_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_10_11 0x0d60
+#define mmCM0_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_12_13 0x0d61
+#define mmCM0_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_14_15 0x0d62
+#define mmCM0_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_CONTROL 0x0d63
+#define mmCM0_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_INDEX 0x0d64
+#define mmCM0_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_DATA 0x0d65
+#define mmCM0_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0d66
+#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B 0x0d67
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G 0x0d68
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R 0x0d69
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0d6a
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0d6b
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0d6c
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0d6d
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0d6e
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0d6f
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0d70
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0d71
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0d72
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1 0x0d73
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3 0x0d74
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5 0x0d75
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7 0x0d76
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9 0x0d77
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11 0x0d78
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13 0x0d79
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15 0x0d7a
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17 0x0d7b
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19 0x0d7c
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21 0x0d7d
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23 0x0d7e
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25 0x0d7f
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27 0x0d80
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29 0x0d81
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31 0x0d82
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33 0x0d83
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B 0x0d84
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G 0x0d85
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R 0x0d86
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0d87
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0d88
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0d89
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0d8a
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0d8b
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0d8c
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0d8d
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0d8e
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0d8f
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1 0x0d90
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3 0x0d91
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5 0x0d92
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7 0x0d93
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9 0x0d94
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11 0x0d95
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13 0x0d96
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15 0x0d97
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17 0x0d98
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19 0x0d99
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21 0x0d9a
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23 0x0d9b
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25 0x0d9c
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27 0x0d9d
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29 0x0d9e
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31 0x0d9f
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33 0x0da0
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_HDR_MULT_COEF 0x0da1
+#define mmCM0_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_CTRL 0x0da2
+#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_STATUS 0x0da3
+#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM0_CM_DEALPHA 0x0da5
+#define mmCM0_CM_DEALPHA_BASE_IDX 2
+#define mmCM0_CM_COEF_FORMAT 0x0da6
+#define mmCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM0_CM_SHAPER_CONTROL 0x0da7
+#define mmCM0_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_R 0x0da8
+#define mmCM0_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_G 0x0da9
+#define mmCM0_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_B 0x0daa
+#define mmCM0_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_SCALE_R 0x0dab
+#define mmCM0_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_SCALE_G_B 0x0dac
+#define mmCM0_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_INDEX 0x0dad
+#define mmCM0_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_DATA 0x0dae
+#define mmCM0_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK 0x0daf
+#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B 0x0db0
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G 0x0db1
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R 0x0db2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B 0x0db3
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G 0x0db4
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R 0x0db5
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_0_1 0x0db6
+#define mmCM0_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_2_3 0x0db7
+#define mmCM0_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_4_5 0x0db8
+#define mmCM0_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_6_7 0x0db9
+#define mmCM0_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_8_9 0x0dba
+#define mmCM0_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_10_11 0x0dbb
+#define mmCM0_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_12_13 0x0dbc
+#define mmCM0_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_14_15 0x0dbd
+#define mmCM0_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_16_17 0x0dbe
+#define mmCM0_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_18_19 0x0dbf
+#define mmCM0_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_20_21 0x0dc0
+#define mmCM0_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_22_23 0x0dc1
+#define mmCM0_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_24_25 0x0dc2
+#define mmCM0_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_26_27 0x0dc3
+#define mmCM0_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_28_29 0x0dc4
+#define mmCM0_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_30_31 0x0dc5
+#define mmCM0_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_32_33 0x0dc6
+#define mmCM0_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B 0x0dc7
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G 0x0dc8
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R 0x0dc9
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B 0x0dca
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G 0x0dcb
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R 0x0dcc
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_0_1 0x0dcd
+#define mmCM0_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_2_3 0x0dce
+#define mmCM0_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_4_5 0x0dcf
+#define mmCM0_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_6_7 0x0dd0
+#define mmCM0_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_8_9 0x0dd1
+#define mmCM0_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_10_11 0x0dd2
+#define mmCM0_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_12_13 0x0dd3
+#define mmCM0_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_14_15 0x0dd4
+#define mmCM0_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_16_17 0x0dd5
+#define mmCM0_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_18_19 0x0dd6
+#define mmCM0_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_20_21 0x0dd7
+#define mmCM0_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_22_23 0x0dd8
+#define mmCM0_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_24_25 0x0dd9
+#define mmCM0_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_26_27 0x0dda
+#define mmCM0_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_28_29 0x0ddb
+#define mmCM0_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_30_31 0x0ddc
+#define mmCM0_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_32_33 0x0ddd
+#define mmCM0_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_CTRL2 0x0dde
+#define mmCM0_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_STATUS2 0x0ddf
+#define mmCM0_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM0_CM_3DLUT_MODE 0x0de0
+#define mmCM0_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM0_CM_3DLUT_INDEX 0x0de1
+#define mmCM0_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_3DLUT_DATA 0x0de2
+#define mmCM0_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM0_CM_3DLUT_DATA_30BIT 0x0de3
+#define mmCM0_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL 0x0de4
+#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR 0x0de5
+#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_R 0x0de6
+#define mmCM0_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_G 0x0de7
+#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0de8
+#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0de9
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0dea
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+// base address: 0x5ac
+#define mmDPP_TOP1_DPP_CONTROL 0x0e30
+#define mmDPP_TOP1_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP1_DPP_SOFT_RESET 0x0e31
+#define mmDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_VAL_R_G 0x0e32
+#define mmDPP_TOP1_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_VAL_B_A 0x0e33
+#define mmDPP_TOP1_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_CTRL 0x0e34
+#define mmDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP1_HOST_READ_CONTROL 0x0e35
+#define mmDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+// base address: 0x5ac
+#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a
+#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG1_FORMAT_CONTROL 0x0e3b
+#define mmCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c
+#define mmCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d
+#define mmCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e
+#define mmCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f
+#define mmCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40
+#define mmCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41
+#define mmCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42
+#define mmCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43
+#define mmCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_RED 0x0e44
+#define mmCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45
+#define mmCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46
+#define mmCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48
+#define mmCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+// base address: 0x5ac
+#define mmCNVC_CUR1_CURSOR0_CONTROL 0x0e4b
+#define mmCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_COLOR0 0x0e4c
+#define mmCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_COLOR1 0x0e4d
+#define mmCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e4e
+#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+// base address: 0x5ac
+#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e55
+#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e56
+#define mmDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL1_SCL_MODE 0x0e57
+#define mmDSCL1_SCL_MODE_BASE_IDX 2
+#define mmDSCL1_SCL_TAP_CONTROL 0x0e58
+#define mmDSCL1_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL1_DSCL_CONTROL 0x0e59
+#define mmDSCL1_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL1_DSCL_2TAP_CONTROL 0x0e5a
+#define mmDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e5b
+#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e5c
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_INIT 0x0e5d
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e5e
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e5f
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e60
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT 0x0e61
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e62
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e63
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_C 0x0e64
+#define mmDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e65
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL1_SCL_BLACK_OFFSET 0x0e66
+#define mmDSCL1_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL1_DSCL_UPDATE 0x0e67
+#define mmDSCL1_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL1_DSCL_AUTOCAL 0x0e68
+#define mmDSCL1_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e69
+#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e6a
+#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL1_OTG_H_BLANK 0x0e6b
+#define mmDSCL1_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL1_OTG_V_BLANK 0x0e6c
+#define mmDSCL1_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL1_RECOUT_START 0x0e6d
+#define mmDSCL1_RECOUT_START_BASE_IDX 2
+#define mmDSCL1_RECOUT_SIZE 0x0e6e
+#define mmDSCL1_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL1_MPC_SIZE 0x0e6f
+#define mmDSCL1_MPC_SIZE_BASE_IDX 2
+#define mmDSCL1_LB_DATA_FORMAT 0x0e70
+#define mmDSCL1_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL1_LB_MEMORY_CTRL 0x0e71
+#define mmDSCL1_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL1_LB_V_COUNTER 0x0e72
+#define mmDSCL1_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL1_DSCL_MEM_PWR_CTRL 0x0e73
+#define mmDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL1_DSCL_MEM_PWR_STATUS 0x0e74
+#define mmDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL1_OBUF_CONTROL 0x0e75
+#define mmDSCL1_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL1_OBUF_MEM_PWR_CTRL 0x0e76
+#define mmDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+// base address: 0x5ac
+#define mmCM1_CM_CONTROL 0x0e85
+#define mmCM1_CM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_ICSC_CONTROL 0x0e86
+#define mmCM1_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM1_CM_ICSC_C11_C12 0x0e87
+#define mmCM1_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM1_CM_ICSC_C13_C14 0x0e88
+#define mmCM1_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM1_CM_ICSC_C21_C22 0x0e89
+#define mmCM1_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM1_CM_ICSC_C23_C24 0x0e8a
+#define mmCM1_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM1_CM_ICSC_C31_C32 0x0e8b
+#define mmCM1_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM1_CM_ICSC_C33_C34 0x0e8c
+#define mmCM1_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C11_C12 0x0e8d
+#define mmCM1_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C13_C14 0x0e8e
+#define mmCM1_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C21_C22 0x0e8f
+#define mmCM1_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C23_C24 0x0e90
+#define mmCM1_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C31_C32 0x0e91
+#define mmCM1_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C33_C34 0x0e92
+#define mmCM1_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_CONTROL 0x0e93
+#define mmCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C11_C12 0x0e94
+#define mmCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C13_C14 0x0e95
+#define mmCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C21_C22 0x0e96
+#define mmCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C23_C24 0x0e97
+#define mmCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C31_C32 0x0e98
+#define mmCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C33_C34 0x0e99
+#define mmCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C11_C12 0x0e9a
+#define mmCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C13_C14 0x0e9b
+#define mmCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C21_C22 0x0e9c
+#define mmCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C23_C24 0x0e9d
+#define mmCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C31_C32 0x0e9e
+#define mmCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C33_C34 0x0e9f
+#define mmCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM1_CM_BIAS_CR_R 0x0ea0
+#define mmCM1_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM1_CM_BIAS_Y_G_CB_B 0x0ea1
+#define mmCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_CONTROL 0x0ea2
+#define mmCM1_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_INDEX 0x0ea3
+#define mmCM1_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_DATA 0x0ea4
+#define mmCM1_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK 0x0ea5
+#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_B 0x0ea6
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_G 0x0ea7
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_R 0x0ea8
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0ea9
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0eaa
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0eab
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B 0x0eac
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B 0x0ead
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G 0x0eae
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G 0x0eaf
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R 0x0eb0
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R 0x0eb1
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_0_1 0x0eb2
+#define mmCM1_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_2_3 0x0eb3
+#define mmCM1_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_4_5 0x0eb4
+#define mmCM1_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_6_7 0x0eb5
+#define mmCM1_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_8_9 0x0eb6
+#define mmCM1_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_10_11 0x0eb7
+#define mmCM1_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_12_13 0x0eb8
+#define mmCM1_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_14_15 0x0eb9
+#define mmCM1_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_B 0x0eba
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_G 0x0ebb
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_R 0x0ebc
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0ebd
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0ebe
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0ebf
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B 0x0ec0
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B 0x0ec1
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G 0x0ec2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G 0x0ec3
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R 0x0ec4
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R 0x0ec5
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_0_1 0x0ec6
+#define mmCM1_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_2_3 0x0ec7
+#define mmCM1_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_4_5 0x0ec8
+#define mmCM1_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_6_7 0x0ec9
+#define mmCM1_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_8_9 0x0eca
+#define mmCM1_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_10_11 0x0ecb
+#define mmCM1_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_12_13 0x0ecc
+#define mmCM1_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_14_15 0x0ecd
+#define mmCM1_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_CONTROL 0x0ece
+#define mmCM1_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_INDEX 0x0ecf
+#define mmCM1_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_DATA 0x0ed0
+#define mmCM1_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0ed1
+#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B 0x0ed2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G 0x0ed3
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R 0x0ed4
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0ed5
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0ed6
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0ed7
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0ed8
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0ed9
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0eda
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0edb
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0edc
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0edd
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1 0x0ede
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3 0x0edf
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5 0x0ee0
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7 0x0ee1
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9 0x0ee2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11 0x0ee3
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13 0x0ee4
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15 0x0ee5
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17 0x0ee6
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19 0x0ee7
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21 0x0ee8
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23 0x0ee9
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25 0x0eea
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27 0x0eeb
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29 0x0eec
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31 0x0eed
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33 0x0eee
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B 0x0eef
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G 0x0ef0
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R 0x0ef1
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0ef2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0ef3
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0ef4
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0ef5
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0ef6
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0ef7
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0ef8
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0ef9
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0efa
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1 0x0efb
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3 0x0efc
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5 0x0efd
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7 0x0efe
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9 0x0eff
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11 0x0f00
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13 0x0f01
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15 0x0f02
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17 0x0f03
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19 0x0f04
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21 0x0f05
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23 0x0f06
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25 0x0f07
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27 0x0f08
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29 0x0f09
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31 0x0f0a
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33 0x0f0b
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_HDR_MULT_COEF 0x0f0c
+#define mmCM1_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_CTRL 0x0f0d
+#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_STATUS 0x0f0e
+#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM1_CM_DEALPHA 0x0f10
+#define mmCM1_CM_DEALPHA_BASE_IDX 2
+#define mmCM1_CM_COEF_FORMAT 0x0f11
+#define mmCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM1_CM_SHAPER_CONTROL 0x0f12
+#define mmCM1_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_R 0x0f13
+#define mmCM1_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_G 0x0f14
+#define mmCM1_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_B 0x0f15
+#define mmCM1_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_SCALE_R 0x0f16
+#define mmCM1_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_SCALE_G_B 0x0f17
+#define mmCM1_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_INDEX 0x0f18
+#define mmCM1_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_DATA 0x0f19
+#define mmCM1_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK 0x0f1a
+#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B 0x0f1b
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G 0x0f1c
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R 0x0f1d
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B 0x0f1e
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G 0x0f1f
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R 0x0f20
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_0_1 0x0f21
+#define mmCM1_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_2_3 0x0f22
+#define mmCM1_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_4_5 0x0f23
+#define mmCM1_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_6_7 0x0f24
+#define mmCM1_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_8_9 0x0f25
+#define mmCM1_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_10_11 0x0f26
+#define mmCM1_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_12_13 0x0f27
+#define mmCM1_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_14_15 0x0f28
+#define mmCM1_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_16_17 0x0f29
+#define mmCM1_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_18_19 0x0f2a
+#define mmCM1_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_20_21 0x0f2b
+#define mmCM1_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_22_23 0x0f2c
+#define mmCM1_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_24_25 0x0f2d
+#define mmCM1_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_26_27 0x0f2e
+#define mmCM1_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_28_29 0x0f2f
+#define mmCM1_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_30_31 0x0f30
+#define mmCM1_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_32_33 0x0f31
+#define mmCM1_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B 0x0f32
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G 0x0f33
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R 0x0f34
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B 0x0f35
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G 0x0f36
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R 0x0f37
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_0_1 0x0f38
+#define mmCM1_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_2_3 0x0f39
+#define mmCM1_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_4_5 0x0f3a
+#define mmCM1_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_6_7 0x0f3b
+#define mmCM1_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_8_9 0x0f3c
+#define mmCM1_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_10_11 0x0f3d
+#define mmCM1_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_12_13 0x0f3e
+#define mmCM1_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_14_15 0x0f3f
+#define mmCM1_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_16_17 0x0f40
+#define mmCM1_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_18_19 0x0f41
+#define mmCM1_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_20_21 0x0f42
+#define mmCM1_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_22_23 0x0f43
+#define mmCM1_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_24_25 0x0f44
+#define mmCM1_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_26_27 0x0f45
+#define mmCM1_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_28_29 0x0f46
+#define mmCM1_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_30_31 0x0f47
+#define mmCM1_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_32_33 0x0f48
+#define mmCM1_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_CTRL2 0x0f49
+#define mmCM1_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_STATUS2 0x0f4a
+#define mmCM1_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM1_CM_3DLUT_MODE 0x0f4b
+#define mmCM1_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM1_CM_3DLUT_INDEX 0x0f4c
+#define mmCM1_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_3DLUT_DATA 0x0f4d
+#define mmCM1_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM1_CM_3DLUT_DATA_30BIT 0x0f4e
+#define mmCM1_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL 0x0f4f
+#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR 0x0f50
+#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_R 0x0f51
+#define mmCM1_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_G 0x0f52
+#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f53
+#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f54
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f55
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+// base address: 0xb58
+#define mmDPP_TOP2_DPP_CONTROL 0x0f9b
+#define mmDPP_TOP2_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP2_DPP_SOFT_RESET 0x0f9c
+#define mmDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_VAL_R_G 0x0f9d
+#define mmDPP_TOP2_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_VAL_B_A 0x0f9e
+#define mmDPP_TOP2_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_CTRL 0x0f9f
+#define mmDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+// base address: 0xb58
+#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5
+#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG2_FORMAT_CONTROL 0x0fa6
+#define mmCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7
+#define mmCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8
+#define mmCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9
+#define mmCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa
+#define mmCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab
+#define mmCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac
+#define mmCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad
+#define mmCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae
+#define mmCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_RED 0x0faf
+#define mmCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0
+#define mmCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1
+#define mmCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3
+#define mmCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+// base address: 0xb58
+#define mmCNVC_CUR2_CURSOR0_CONTROL 0x0fb6
+#define mmCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_COLOR0 0x0fb7
+#define mmCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_COLOR1 0x0fb8
+#define mmCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fb9
+#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+// base address: 0xb58
+#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fc0
+#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fc1
+#define mmDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL2_SCL_MODE 0x0fc2
+#define mmDSCL2_SCL_MODE_BASE_IDX 2
+#define mmDSCL2_SCL_TAP_CONTROL 0x0fc3
+#define mmDSCL2_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL2_DSCL_CONTROL 0x0fc4
+#define mmDSCL2_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL2_DSCL_2TAP_CONTROL 0x0fc5
+#define mmDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fc6
+#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fc7
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_INIT 0x0fc8
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fc9
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fca
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fcb
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT 0x0fcc
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fcd
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fce
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_C 0x0fcf
+#define mmDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fd0
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL2_SCL_BLACK_OFFSET 0x0fd1
+#define mmDSCL2_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL2_DSCL_UPDATE 0x0fd2
+#define mmDSCL2_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL2_DSCL_AUTOCAL 0x0fd3
+#define mmDSCL2_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fd4
+#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fd5
+#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL2_OTG_H_BLANK 0x0fd6
+#define mmDSCL2_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL2_OTG_V_BLANK 0x0fd7
+#define mmDSCL2_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL2_RECOUT_START 0x0fd8
+#define mmDSCL2_RECOUT_START_BASE_IDX 2
+#define mmDSCL2_RECOUT_SIZE 0x0fd9
+#define mmDSCL2_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL2_MPC_SIZE 0x0fda
+#define mmDSCL2_MPC_SIZE_BASE_IDX 2
+#define mmDSCL2_LB_DATA_FORMAT 0x0fdb
+#define mmDSCL2_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL2_LB_MEMORY_CTRL 0x0fdc
+#define mmDSCL2_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL2_LB_V_COUNTER 0x0fdd
+#define mmDSCL2_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL2_DSCL_MEM_PWR_CTRL 0x0fde
+#define mmDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL2_DSCL_MEM_PWR_STATUS 0x0fdf
+#define mmDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL2_OBUF_CONTROL 0x0fe0
+#define mmDSCL2_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL2_OBUF_MEM_PWR_CTRL 0x0fe1
+#define mmDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+// base address: 0xb58
+#define mmCM2_CM_CONTROL 0x0ff0
+#define mmCM2_CM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_ICSC_CONTROL 0x0ff1
+#define mmCM2_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM2_CM_ICSC_C11_C12 0x0ff2
+#define mmCM2_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM2_CM_ICSC_C13_C14 0x0ff3
+#define mmCM2_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM2_CM_ICSC_C21_C22 0x0ff4
+#define mmCM2_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM2_CM_ICSC_C23_C24 0x0ff5
+#define mmCM2_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM2_CM_ICSC_C31_C32 0x0ff6
+#define mmCM2_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM2_CM_ICSC_C33_C34 0x0ff7
+#define mmCM2_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C11_C12 0x0ff8
+#define mmCM2_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C13_C14 0x0ff9
+#define mmCM2_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C21_C22 0x0ffa
+#define mmCM2_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C23_C24 0x0ffb
+#define mmCM2_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C31_C32 0x0ffc
+#define mmCM2_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C33_C34 0x0ffd
+#define mmCM2_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_CONTROL 0x0ffe
+#define mmCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C11_C12 0x0fff
+#define mmCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C13_C14 0x1000
+#define mmCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C21_C22 0x1001
+#define mmCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C23_C24 0x1002
+#define mmCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C31_C32 0x1003
+#define mmCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C33_C34 0x1004
+#define mmCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C11_C12 0x1005
+#define mmCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C13_C14 0x1006
+#define mmCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C21_C22 0x1007
+#define mmCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C23_C24 0x1008
+#define mmCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C31_C32 0x1009
+#define mmCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C33_C34 0x100a
+#define mmCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM2_CM_BIAS_CR_R 0x100b
+#define mmCM2_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM2_CM_BIAS_Y_G_CB_B 0x100c
+#define mmCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_CONTROL 0x100d
+#define mmCM2_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_INDEX 0x100e
+#define mmCM2_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_DATA 0x100f
+#define mmCM2_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK 0x1010
+#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_B 0x1011
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_G 0x1012
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_R 0x1013
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B 0x1014
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1015
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1016
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B 0x1017
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B 0x1018
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G 0x1019
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G 0x101a
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R 0x101b
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R 0x101c
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_0_1 0x101d
+#define mmCM2_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_2_3 0x101e
+#define mmCM2_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_4_5 0x101f
+#define mmCM2_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_6_7 0x1020
+#define mmCM2_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_8_9 0x1021
+#define mmCM2_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_10_11 0x1022
+#define mmCM2_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_12_13 0x1023
+#define mmCM2_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_14_15 0x1024
+#define mmCM2_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_B 0x1025
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_G 0x1026
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_R 0x1027
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1028
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1029
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R 0x102a
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B 0x102b
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B 0x102c
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G 0x102d
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G 0x102e
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R 0x102f
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R 0x1030
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_0_1 0x1031
+#define mmCM2_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_2_3 0x1032
+#define mmCM2_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_4_5 0x1033
+#define mmCM2_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_6_7 0x1034
+#define mmCM2_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_8_9 0x1035
+#define mmCM2_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_10_11 0x1036
+#define mmCM2_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_12_13 0x1037
+#define mmCM2_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_14_15 0x1038
+#define mmCM2_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_CONTROL 0x1039
+#define mmCM2_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_INDEX 0x103a
+#define mmCM2_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_DATA 0x103b
+#define mmCM2_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x103c
+#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B 0x103d
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G 0x103e
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R 0x103f
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x1040
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x1041
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x1042
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B 0x1043
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B 0x1044
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G 0x1045
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G 0x1046
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R 0x1047
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R 0x1048
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1 0x1049
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3 0x104a
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5 0x104b
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7 0x104c
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9 0x104d
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11 0x104e
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13 0x104f
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15 0x1050
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17 0x1051
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19 0x1052
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21 0x1053
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23 0x1054
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25 0x1055
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27 0x1056
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29 0x1057
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31 0x1058
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33 0x1059
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B 0x105a
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G 0x105b
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R 0x105c
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x105d
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x105e
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x105f
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B 0x1060
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B 0x1061
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G 0x1062
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G 0x1063
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R 0x1064
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R 0x1065
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1 0x1066
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3 0x1067
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5 0x1068
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7 0x1069
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9 0x106a
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11 0x106b
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13 0x106c
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15 0x106d
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17 0x106e
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19 0x106f
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21 0x1070
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23 0x1071
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25 0x1072
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27 0x1073
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29 0x1074
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31 0x1075
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33 0x1076
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_HDR_MULT_COEF 0x1077
+#define mmCM2_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_CTRL 0x1078
+#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_STATUS 0x1079
+#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM2_CM_DEALPHA 0x107b
+#define mmCM2_CM_DEALPHA_BASE_IDX 2
+#define mmCM2_CM_COEF_FORMAT 0x107c
+#define mmCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM2_CM_SHAPER_CONTROL 0x107d
+#define mmCM2_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_R 0x107e
+#define mmCM2_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_G 0x107f
+#define mmCM2_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_B 0x1080
+#define mmCM2_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_SCALE_R 0x1081
+#define mmCM2_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_SCALE_G_B 0x1082
+#define mmCM2_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_INDEX 0x1083
+#define mmCM2_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_DATA 0x1084
+#define mmCM2_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK 0x1085
+#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B 0x1086
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G 0x1087
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R 0x1088
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B 0x1089
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G 0x108a
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R 0x108b
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_0_1 0x108c
+#define mmCM2_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_2_3 0x108d
+#define mmCM2_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_4_5 0x108e
+#define mmCM2_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_6_7 0x108f
+#define mmCM2_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_8_9 0x1090
+#define mmCM2_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_10_11 0x1091
+#define mmCM2_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_12_13 0x1092
+#define mmCM2_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_14_15 0x1093
+#define mmCM2_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_16_17 0x1094
+#define mmCM2_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_18_19 0x1095
+#define mmCM2_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_20_21 0x1096
+#define mmCM2_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_22_23 0x1097
+#define mmCM2_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_24_25 0x1098
+#define mmCM2_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_26_27 0x1099
+#define mmCM2_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_28_29 0x109a
+#define mmCM2_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_30_31 0x109b
+#define mmCM2_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_32_33 0x109c
+#define mmCM2_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B 0x109d
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G 0x109e
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R 0x109f
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B 0x10a0
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G 0x10a1
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R 0x10a2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_0_1 0x10a3
+#define mmCM2_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_2_3 0x10a4
+#define mmCM2_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_4_5 0x10a5
+#define mmCM2_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_6_7 0x10a6
+#define mmCM2_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_8_9 0x10a7
+#define mmCM2_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_10_11 0x10a8
+#define mmCM2_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_12_13 0x10a9
+#define mmCM2_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_14_15 0x10aa
+#define mmCM2_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_16_17 0x10ab
+#define mmCM2_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_18_19 0x10ac
+#define mmCM2_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_20_21 0x10ad
+#define mmCM2_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_22_23 0x10ae
+#define mmCM2_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_24_25 0x10af
+#define mmCM2_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_26_27 0x10b0
+#define mmCM2_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_28_29 0x10b1
+#define mmCM2_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_30_31 0x10b2
+#define mmCM2_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_32_33 0x10b3
+#define mmCM2_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_CTRL2 0x10b4
+#define mmCM2_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_STATUS2 0x10b5
+#define mmCM2_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM2_CM_3DLUT_MODE 0x10b6
+#define mmCM2_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM2_CM_3DLUT_INDEX 0x10b7
+#define mmCM2_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_3DLUT_DATA 0x10b8
+#define mmCM2_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM2_CM_3DLUT_DATA_30BIT 0x10b9
+#define mmCM2_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL 0x10ba
+#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR 0x10bb
+#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_R 0x10bc
+#define mmCM2_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_G 0x10bd
+#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10be
+#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10bf
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10c0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+// base address: 0x1104
+#define mmDPP_TOP3_DPP_CONTROL 0x1106
+#define mmDPP_TOP3_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP3_DPP_SOFT_RESET 0x1107
+#define mmDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_VAL_R_G 0x1108
+#define mmDPP_TOP3_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_VAL_B_A 0x1109
+#define mmDPP_TOP3_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_CTRL 0x110a
+#define mmDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+// base address: 0x1104
+#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110
+#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG3_FORMAT_CONTROL 0x1111
+#define mmCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_R 0x1112
+#define mmCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_G 0x1113
+#define mmCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_B 0x1114
+#define mmCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_R 0x1115
+#define mmCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_G 0x1116
+#define mmCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_B 0x1117
+#define mmCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118
+#define mmCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119
+#define mmCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_RED 0x111a
+#define mmCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_GREEN 0x111b
+#define mmCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_BLUE 0x111c
+#define mmCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG3_ALPHA_2BIT_LUT 0x111e
+#define mmCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+// base address: 0x1104
+#define mmCNVC_CUR3_CURSOR0_CONTROL 0x1121
+#define mmCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_COLOR0 0x1122
+#define mmCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_COLOR1 0x1123
+#define mmCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1124
+#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+// base address: 0x1104
+#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT 0x112b
+#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL3_SCL_COEF_RAM_TAP_DATA 0x112c
+#define mmDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL3_SCL_MODE 0x112d
+#define mmDSCL3_SCL_MODE_BASE_IDX 2
+#define mmDSCL3_SCL_TAP_CONTROL 0x112e
+#define mmDSCL3_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL3_DSCL_CONTROL 0x112f
+#define mmDSCL3_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL3_DSCL_2TAP_CONTROL 0x1130
+#define mmDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1131
+#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1132
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_INIT 0x1133
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1134
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_C 0x1135
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1136
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT 0x1137
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1138
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1139
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_C 0x113a
+#define mmDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x113b
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL3_SCL_BLACK_OFFSET 0x113c
+#define mmDSCL3_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL3_DSCL_UPDATE 0x113d
+#define mmDSCL3_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL3_DSCL_AUTOCAL 0x113e
+#define mmDSCL3_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x113f
+#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x1140
+#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL3_OTG_H_BLANK 0x1141
+#define mmDSCL3_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL3_OTG_V_BLANK 0x1142
+#define mmDSCL3_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL3_RECOUT_START 0x1143
+#define mmDSCL3_RECOUT_START_BASE_IDX 2
+#define mmDSCL3_RECOUT_SIZE 0x1144
+#define mmDSCL3_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL3_MPC_SIZE 0x1145
+#define mmDSCL3_MPC_SIZE_BASE_IDX 2
+#define mmDSCL3_LB_DATA_FORMAT 0x1146
+#define mmDSCL3_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL3_LB_MEMORY_CTRL 0x1147
+#define mmDSCL3_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL3_LB_V_COUNTER 0x1148
+#define mmDSCL3_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL3_DSCL_MEM_PWR_CTRL 0x1149
+#define mmDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL3_DSCL_MEM_PWR_STATUS 0x114a
+#define mmDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL3_OBUF_CONTROL 0x114b
+#define mmDSCL3_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL3_OBUF_MEM_PWR_CTRL 0x114c
+#define mmDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+// base address: 0x1104
+#define mmCM3_CM_CONTROL 0x115b
+#define mmCM3_CM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_ICSC_CONTROL 0x115c
+#define mmCM3_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM3_CM_ICSC_C11_C12 0x115d
+#define mmCM3_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM3_CM_ICSC_C13_C14 0x115e
+#define mmCM3_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM3_CM_ICSC_C21_C22 0x115f
+#define mmCM3_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM3_CM_ICSC_C23_C24 0x1160
+#define mmCM3_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM3_CM_ICSC_C31_C32 0x1161
+#define mmCM3_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM3_CM_ICSC_C33_C34 0x1162
+#define mmCM3_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C11_C12 0x1163
+#define mmCM3_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C13_C14 0x1164
+#define mmCM3_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C21_C22 0x1165
+#define mmCM3_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C23_C24 0x1166
+#define mmCM3_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C31_C32 0x1167
+#define mmCM3_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C33_C34 0x1168
+#define mmCM3_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_CONTROL 0x1169
+#define mmCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C11_C12 0x116a
+#define mmCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C13_C14 0x116b
+#define mmCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C21_C22 0x116c
+#define mmCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C23_C24 0x116d
+#define mmCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C31_C32 0x116e
+#define mmCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C33_C34 0x116f
+#define mmCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C11_C12 0x1170
+#define mmCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C13_C14 0x1171
+#define mmCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C21_C22 0x1172
+#define mmCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C23_C24 0x1173
+#define mmCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C31_C32 0x1174
+#define mmCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C33_C34 0x1175
+#define mmCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM3_CM_BIAS_CR_R 0x1176
+#define mmCM3_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM3_CM_BIAS_Y_G_CB_B 0x1177
+#define mmCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_CONTROL 0x1178
+#define mmCM3_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_INDEX 0x1179
+#define mmCM3_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_DATA 0x117a
+#define mmCM3_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK 0x117b
+#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_B 0x117c
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_G 0x117d
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_R 0x117e
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B 0x117f
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1180
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1181
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B 0x1182
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B 0x1183
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G 0x1184
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G 0x1185
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R 0x1186
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R 0x1187
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_0_1 0x1188
+#define mmCM3_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_2_3 0x1189
+#define mmCM3_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_4_5 0x118a
+#define mmCM3_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_6_7 0x118b
+#define mmCM3_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_8_9 0x118c
+#define mmCM3_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_10_11 0x118d
+#define mmCM3_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_12_13 0x118e
+#define mmCM3_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_14_15 0x118f
+#define mmCM3_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_B 0x1190
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_G 0x1191
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_R 0x1192
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1193
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1194
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R 0x1195
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B 0x1196
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B 0x1197
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G 0x1198
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G 0x1199
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R 0x119a
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R 0x119b
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_0_1 0x119c
+#define mmCM3_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_2_3 0x119d
+#define mmCM3_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_4_5 0x119e
+#define mmCM3_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_6_7 0x119f
+#define mmCM3_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_8_9 0x11a0
+#define mmCM3_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_10_11 0x11a1
+#define mmCM3_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_12_13 0x11a2
+#define mmCM3_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_14_15 0x11a3
+#define mmCM3_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_CONTROL 0x11a4
+#define mmCM3_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_INDEX 0x11a5
+#define mmCM3_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_DATA 0x11a6
+#define mmCM3_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x11a7
+#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B 0x11a8
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G 0x11a9
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R 0x11aa
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x11ab
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x11ac
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x11ad
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B 0x11ae
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B 0x11af
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G 0x11b0
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G 0x11b1
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R 0x11b2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R 0x11b3
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1 0x11b4
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3 0x11b5
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5 0x11b6
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7 0x11b7
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9 0x11b8
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11 0x11b9
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13 0x11ba
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15 0x11bb
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17 0x11bc
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19 0x11bd
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21 0x11be
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23 0x11bf
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25 0x11c0
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27 0x11c1
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29 0x11c2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31 0x11c3
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33 0x11c4
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B 0x11c5
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G 0x11c6
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R 0x11c7
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x11c8
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x11c9
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x11ca
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B 0x11cb
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B 0x11cc
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G 0x11cd
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G 0x11ce
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R 0x11cf
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R 0x11d0
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1 0x11d1
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3 0x11d2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5 0x11d3
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7 0x11d4
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9 0x11d5
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11 0x11d6
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13 0x11d7
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15 0x11d8
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17 0x11d9
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19 0x11da
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21 0x11db
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23 0x11dc
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25 0x11dd
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27 0x11de
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29 0x11df
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31 0x11e0
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33 0x11e1
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_HDR_MULT_COEF 0x11e2
+#define mmCM3_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_CTRL 0x11e3
+#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_STATUS 0x11e4
+#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM3_CM_DEALPHA 0x11e6
+#define mmCM3_CM_DEALPHA_BASE_IDX 2
+#define mmCM3_CM_COEF_FORMAT 0x11e7
+#define mmCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM3_CM_SHAPER_CONTROL 0x11e8
+#define mmCM3_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_R 0x11e9
+#define mmCM3_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_G 0x11ea
+#define mmCM3_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_B 0x11eb
+#define mmCM3_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_SCALE_R 0x11ec
+#define mmCM3_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_SCALE_G_B 0x11ed
+#define mmCM3_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_INDEX 0x11ee
+#define mmCM3_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_DATA 0x11ef
+#define mmCM3_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK 0x11f0
+#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B 0x11f1
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G 0x11f2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R 0x11f3
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B 0x11f4
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G 0x11f5
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R 0x11f6
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_0_1 0x11f7
+#define mmCM3_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_2_3 0x11f8
+#define mmCM3_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_4_5 0x11f9
+#define mmCM3_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_6_7 0x11fa
+#define mmCM3_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_8_9 0x11fb
+#define mmCM3_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_10_11 0x11fc
+#define mmCM3_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_12_13 0x11fd
+#define mmCM3_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_14_15 0x11fe
+#define mmCM3_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_16_17 0x11ff
+#define mmCM3_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_18_19 0x1200
+#define mmCM3_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_20_21 0x1201
+#define mmCM3_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_22_23 0x1202
+#define mmCM3_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_24_25 0x1203
+#define mmCM3_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_26_27 0x1204
+#define mmCM3_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_28_29 0x1205
+#define mmCM3_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_30_31 0x1206
+#define mmCM3_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_32_33 0x1207
+#define mmCM3_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B 0x1208
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G 0x1209
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R 0x120a
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B 0x120b
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G 0x120c
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R 0x120d
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_0_1 0x120e
+#define mmCM3_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_2_3 0x120f
+#define mmCM3_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_4_5 0x1210
+#define mmCM3_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_6_7 0x1211
+#define mmCM3_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_8_9 0x1212
+#define mmCM3_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_10_11 0x1213
+#define mmCM3_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_12_13 0x1214
+#define mmCM3_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_14_15 0x1215
+#define mmCM3_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_16_17 0x1216
+#define mmCM3_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_18_19 0x1217
+#define mmCM3_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_20_21 0x1218
+#define mmCM3_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_22_23 0x1219
+#define mmCM3_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_24_25 0x121a
+#define mmCM3_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_26_27 0x121b
+#define mmCM3_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_28_29 0x121c
+#define mmCM3_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_30_31 0x121d
+#define mmCM3_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_32_33 0x121e
+#define mmCM3_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_CTRL2 0x121f
+#define mmCM3_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_STATUS2 0x1220
+#define mmCM3_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM3_CM_3DLUT_MODE 0x1221
+#define mmCM3_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM3_CM_3DLUT_INDEX 0x1222
+#define mmCM3_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_3DLUT_DATA 0x1223
+#define mmCM3_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM3_CM_3DLUT_DATA_30BIT 0x1224
+#define mmCM3_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL 0x1225
+#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR 0x1226
+#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_R 0x1227
+#define mmCM3_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_G 0x1228
+#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1229
+#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x122a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x122b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+// base address: 0x0
+#define mmMPCC0_MPCC_TOP_SEL 0x1271
+#define mmMPCC0_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_SEL 0x1272
+#define mmMPCC0_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_OPP_ID 0x1273
+#define mmMPCC0_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC0_MPCC_CONTROL 0x1274
+#define mmMPCC0_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC0_MPCC_SM_CONTROL 0x1275
+#define mmMPCC0_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC0_MPCC_UPDATE_LOCK_SEL 0x1276
+#define mmMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_TOP_GAIN 0x1277
+#define mmMPCC0_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_GAIN_INSIDE 0x1278
+#define mmMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x1279
+#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_R_CR 0x127a
+#define mmMPCC0_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_G_Y 0x127b
+#define mmMPCC0_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_B_CB 0x127c
+#define mmMPCC0_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC0_MPCC_MEM_PWR_CTRL 0x127d
+#define mmMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC0_MPCC_STALL_STATUS 0x127e
+#define mmMPCC0_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC0_MPCC_STATUS 0x127f
+#define mmMPCC0_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+// base address: 0x6c
+#define mmMPCC1_MPCC_TOP_SEL 0x128c
+#define mmMPCC1_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_SEL 0x128d
+#define mmMPCC1_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_OPP_ID 0x128e
+#define mmMPCC1_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC1_MPCC_CONTROL 0x128f
+#define mmMPCC1_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC1_MPCC_SM_CONTROL 0x1290
+#define mmMPCC1_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC1_MPCC_UPDATE_LOCK_SEL 0x1291
+#define mmMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_TOP_GAIN 0x1292
+#define mmMPCC1_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_GAIN_INSIDE 0x1293
+#define mmMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x1294
+#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_R_CR 0x1295
+#define mmMPCC1_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_G_Y 0x1296
+#define mmMPCC1_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_B_CB 0x1297
+#define mmMPCC1_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC1_MPCC_MEM_PWR_CTRL 0x1298
+#define mmMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC1_MPCC_STALL_STATUS 0x1299
+#define mmMPCC1_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC1_MPCC_STATUS 0x129a
+#define mmMPCC1_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+// base address: 0xd8
+#define mmMPCC2_MPCC_TOP_SEL 0x12a7
+#define mmMPCC2_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_SEL 0x12a8
+#define mmMPCC2_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_OPP_ID 0x12a9
+#define mmMPCC2_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC2_MPCC_CONTROL 0x12aa
+#define mmMPCC2_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC2_MPCC_SM_CONTROL 0x12ab
+#define mmMPCC2_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC2_MPCC_UPDATE_LOCK_SEL 0x12ac
+#define mmMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_TOP_GAIN 0x12ad
+#define mmMPCC2_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_GAIN_INSIDE 0x12ae
+#define mmMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x12af
+#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_R_CR 0x12b0
+#define mmMPCC2_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_G_Y 0x12b1
+#define mmMPCC2_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_B_CB 0x12b2
+#define mmMPCC2_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC2_MPCC_MEM_PWR_CTRL 0x12b3
+#define mmMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC2_MPCC_STALL_STATUS 0x12b4
+#define mmMPCC2_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC2_MPCC_STATUS 0x12b5
+#define mmMPCC2_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+// base address: 0x144
+#define mmMPCC3_MPCC_TOP_SEL 0x12c2
+#define mmMPCC3_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_SEL 0x12c3
+#define mmMPCC3_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_OPP_ID 0x12c4
+#define mmMPCC3_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC3_MPCC_CONTROL 0x12c5
+#define mmMPCC3_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC3_MPCC_SM_CONTROL 0x12c6
+#define mmMPCC3_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC3_MPCC_UPDATE_LOCK_SEL 0x12c7
+#define mmMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_TOP_GAIN 0x12c8
+#define mmMPCC3_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_GAIN_INSIDE 0x12c9
+#define mmMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x12ca
+#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_R_CR 0x12cb
+#define mmMPCC3_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_G_Y 0x12cc
+#define mmMPCC3_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_B_CB 0x12cd
+#define mmMPCC3_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC3_MPCC_MEM_PWR_CTRL 0x12ce
+#define mmMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC3_MPCC_STALL_STATUS 0x12cf
+#define mmMPCC3_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC3_MPCC_STATUS 0x12d0
+#define mmMPCC3_MPCC_STATUS_BASE_IDX 2
+
+// addressBlock: dce_dc_mpc_mpcc4_dispdec
+// base address: 0x1b0
+#define mmMPCC4_MPCC_TOP_SEL 0x12dd
+#define mmMPCC4_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_SEL 0x12de
+#define mmMPCC4_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_OPP_ID 0x12df
+#define mmMPCC4_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC4_MPCC_CONTROL 0x12e0
+#define mmMPCC4_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC4_MPCC_SM_CONTROL 0x12e1
+#define mmMPCC4_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC4_MPCC_UPDATE_LOCK_SEL 0x12e2
+#define mmMPCC4_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_TOP_GAIN 0x12e3
+#define mmMPCC4_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_GAIN_INSIDE 0x12e4
+#define mmMPCC4_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE 0x12e5
+#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_R_CR 0x12e6
+#define mmMPCC4_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_G_Y 0x12e7
+#define mmMPCC4_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_B_CB 0x12e8
+#define mmMPCC4_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC4_MPCC_MEM_PWR_CTRL 0x12e9
+#define mmMPCC4_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC4_MPCC_STALL_STATUS 0x12ea
+#define mmMPCC4_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC4_MPCC_STATUS 0x12eb
+#define mmMPCC4_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+// base address: 0x0
+#define mmMPC_CLOCK_CONTROL 0x1349
+#define mmMPC_CLOCK_CONTROL_BASE_IDX 2
+#define mmMPC_SOFT_RESET 0x134a
+#define mmMPC_SOFT_RESET_BASE_IDX 2
+#define mmMPC_CRC_CTRL 0x134b
+#define mmMPC_CRC_CTRL_BASE_IDX 2
+#define mmMPC_CRC_SEL_CONTROL 0x134c
+#define mmMPC_CRC_SEL_CONTROL_BASE_IDX 2
+#define mmMPC_CRC_RESULT_AR 0x134d
+#define mmMPC_CRC_RESULT_AR_BASE_IDX 2
+#define mmMPC_CRC_RESULT_GB 0x134e
+#define mmMPC_CRC_RESULT_GB_BASE_IDX 2
+#define mmMPC_CRC_RESULT_C 0x134f
+#define mmMPC_CRC_RESULT_C_BASE_IDX 2
+#define mmMPC_PERFMON_EVENT_CTRL 0x1352
+#define mmMPC_PERFMON_EVENT_CTRL_BASE_IDX 2
+#define mmMPC_BYPASS_BG_AR 0x1353
+#define mmMPC_BYPASS_BG_AR_BASE_IDX 2
+#define mmMPC_BYPASS_BG_GB 0x1354
+#define mmMPC_BYPASS_BG_GB_BASE_IDX 2
+#define mmMPC_STALL_GRACE_WINDOW 0x1355
+#define mmMPC_STALL_GRACE_WINDOW_BASE_IDX 2
+#define mmMPC_HOST_READ_CONTROL 0x1356
+#define mmMPC_HOST_READ_CONTROL_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0 0x135d
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET0 0x135e
+#define mmADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET0 0x135f
+#define mmADR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET0 0x1360
+#define mmCFG_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET0 0x1361
+#define mmCUR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1 0x1362
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET1 0x1363
+#define mmADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET1 0x1364
+#define mmADR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET1 0x1365
+#define mmCFG_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET1 0x1366
+#define mmCUR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2 0x1367
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET2 0x1368
+#define mmADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET2 0x1369
+#define mmADR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET2 0x136a
+#define mmCFG_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET2 0x136b
+#define mmCUR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3 0x136c
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET3 0x136d
+#define mmADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET3 0x136e
+#define mmADR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET3 0x136f
+#define mmCFG_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET3 0x1370
+#define mmCUR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmMPC_OUT0_MUX 0x1385
+#define mmMPC_OUT0_MUX_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CONTROL 0x1386
+#define mmMPC_OUT0_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CLAMP_G_Y 0x1387
+#define mmMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CLAMP_B_CB 0x1388
+#define mmMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 2
+#define mmMPC_OUT1_MUX 0x1389
+#define mmMPC_OUT1_MUX_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CONTROL 0x138a
+#define mmMPC_OUT1_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CLAMP_G_Y 0x138b
+#define mmMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CLAMP_B_CB 0x138c
+#define mmMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+// base address: 0x0
+#define mmMPCC_OGAM0_MPCC_OGAM_MODE 0x13ae
+#define mmMPCC_OGAM0_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x13af
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x13b0
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL 0x13b1
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x13b2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x13b3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x13b4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13b5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13b6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13b7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x13b8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x13b9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x13ba
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x13bb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x13bc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x13bd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x13be
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x13bf
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x13c0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x13c1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x13c2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x13c3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x13c4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x13c5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x13c6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x13c7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x13c8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x13c9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x13ca
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x13cb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x13cc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x13cd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x13ce
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x13cf
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x13d0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x13d1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x13d2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x13d3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x13d4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x13d5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x13d6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x13d7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x13d8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x13d9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x13da
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x13db
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x13dc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x13dd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x13de
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x13df
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x13e0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x13e1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x13e2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x13e3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x13e4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x13e5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x13e6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x13e7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x13e8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x13e9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x13ea
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x13eb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+// base address: 0x104
+#define mmMPCC_OGAM1_MPCC_OGAM_MODE 0x13ef
+#define mmMPCC_OGAM1_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x13f0
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x13f1
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL 0x13f2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x13f3
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x13f4
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x13f5
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13f6
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13f7
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13f8
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x13f9
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x13fa
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x13fb
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x13fc
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x13fd
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x13fe
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x13ff
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x1400
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x1401
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x1402
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x1403
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x1404
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x1405
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x1406
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x1407
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x1408
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x1409
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x140a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x140b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x140c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x140d
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x140e
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x140f
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x1410
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x1411
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x1412
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1413
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1414
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1415
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x1416
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x1417
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x1418
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x1419
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x141a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x141b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x141c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x141d
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x141e
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x141f
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x1420
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x1421
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x1422
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x1423
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x1424
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x1425
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x1426
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x1427
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x1428
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x1429
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x142a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x142b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x142c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+// base address: 0x208
+#define mmMPCC_OGAM2_MPCC_OGAM_MODE 0x1430
+#define mmMPCC_OGAM2_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x1431
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x1432
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL 0x1433
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x1434
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x1435
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x1436
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1437
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1438
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x1439
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x143a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x143b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x143c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x143d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x143e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x143f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x1440
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x1441
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x1442
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x1443
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x1444
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x1445
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x1446
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x1447
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x1448
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x1449
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x144a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x144b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x144c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x144d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x144e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x144f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x1450
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x1451
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x1452
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x1453
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1454
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1455
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1456
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x1457
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x1458
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x1459
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x145a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x145b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x145c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x145d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x145e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x145f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x1460
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x1461
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x1462
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x1463
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x1464
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x1465
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x1466
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x1467
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x1468
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x1469
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x146a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x146b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x146c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x146d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+// base address: 0x30c
+#define mmMPCC_OGAM3_MPCC_OGAM_MODE 0x1471
+#define mmMPCC_OGAM3_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x1472
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x1473
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL 0x1474
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x1475
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x1476
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x1477
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1478
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1479
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x147a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x147b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x147c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x147d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x147e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x147f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x1480
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x1481
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x1482
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x1483
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x1484
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x1485
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x1486
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x1487
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x1488
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x1489
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x148a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x148b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x148c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x148d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x148e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x148f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x1490
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x1491
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x1492
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x1493
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x1494
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1495
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1496
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1497
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x1498
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x1499
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x149a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x149b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x149c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x149d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x149e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x149f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x14a0
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x14a1
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x14a2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x14a3
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x14a4
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x14a5
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x14a6
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x14a7
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x14a8
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x14a9
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x14aa
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x14ab
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x14ac
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x14ad
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x14ae
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec
+// base address: 0x410
+#define mmMPCC_OGAM4_MPCC_OGAM_MODE 0x14b2
+#define mmMPCC_OGAM4_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX 0x14b3
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA 0x14b4
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL 0x14b5
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B 0x14b6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G 0x14b7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R 0x14b8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x14b9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x14ba
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x14bb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B 0x14bc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B 0x14bd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G 0x14be
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G 0x14bf
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R 0x14c0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R 0x14c1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1 0x14c2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3 0x14c3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5 0x14c4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7 0x14c5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9 0x14c6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11 0x14c7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13 0x14c8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15 0x14c9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17 0x14ca
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19 0x14cb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21 0x14cc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23 0x14cd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25 0x14ce
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27 0x14cf
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29 0x14d0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31 0x14d1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33 0x14d2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B 0x14d3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G 0x14d4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R 0x14d5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x14d6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x14d7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x14d8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B 0x14d9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B 0x14da
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G 0x14db
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G 0x14dc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R 0x14dd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R 0x14de
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1 0x14df
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3 0x14e0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5 0x14e1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7 0x14e2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9 0x14e3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11 0x14e4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13 0x14e5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15 0x14e6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17 0x14e7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19 0x14e8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21 0x14e9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23 0x14ea
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25 0x14eb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27 0x14ec
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29 0x14ed
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31 0x14ee
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33 0x14ef
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+// base address: 0x0
+#define mmMPC_OUT_CSC_COEF_FORMAT 0x15b6
+#define mmMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 2
+#define mmMPC_OUT0_CSC_MODE 0x15b7
+#define mmMPC_OUT0_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C11_C12_A 0x15b8
+#define mmMPC_OUT0_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C13_C14_A 0x15b9
+#define mmMPC_OUT0_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C21_C22_A 0x15ba
+#define mmMPC_OUT0_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C23_C24_A 0x15bb
+#define mmMPC_OUT0_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C31_C32_A 0x15bc
+#define mmMPC_OUT0_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C33_C34_A 0x15bd
+#define mmMPC_OUT0_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C11_C12_B 0x15be
+#define mmMPC_OUT0_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C13_C14_B 0x15bf
+#define mmMPC_OUT0_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C21_C22_B 0x15c0
+#define mmMPC_OUT0_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C23_C24_B 0x15c1
+#define mmMPC_OUT0_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C31_C32_B 0x15c2
+#define mmMPC_OUT0_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C33_C34_B 0x15c3
+#define mmMPC_OUT0_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_MODE 0x15c4
+#define mmMPC_OUT1_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C11_C12_A 0x15c5
+#define mmMPC_OUT1_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C13_C14_A 0x15c6
+#define mmMPC_OUT1_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C21_C22_A 0x15c7
+#define mmMPC_OUT1_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C23_C24_A 0x15c8
+#define mmMPC_OUT1_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C31_C32_A 0x15c9
+#define mmMPC_OUT1_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C33_C34_A 0x15ca
+#define mmMPC_OUT1_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C11_C12_B 0x15cb
+#define mmMPC_OUT1_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C13_C14_B 0x15cc
+#define mmMPC_OUT1_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C21_C22_B 0x15cd
+#define mmMPC_OUT1_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C23_C24_B 0x15ce
+#define mmMPC_OUT1_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C31_C32_B 0x15cf
+#define mmMPC_OUT1_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C33_C34_B 0x15d0
+#define mmMPC_OUT1_CSC_C33_C34_B_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+// base address: 0x0
+#define mmFMT0_FMT_CLAMP_COMPONENT_R 0x183c
+#define mmFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_COMPONENT_G 0x183d
+#define mmFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_COMPONENT_B 0x183e
+#define mmFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT0_FMT_CONTROL 0x1840
+#define mmFMT0_FMT_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1841
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1842
+#define mmFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1843
+#define mmFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1844
+#define mmFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_CNTL 0x1845
+#define mmFMT0_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847
+#define mmFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_422_CONTROL 0x1849
+#define mmFMT0_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+// base address: 0x0
+#define mmDPG0_DPG_CONTROL 0x1854
+#define mmDPG0_DPG_CONTROL_BASE_IDX 2
+#define mmDPG0_DPG_RAMP_CONTROL 0x1855
+#define mmDPG0_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG0_DPG_DIMENSIONS 0x1856
+#define mmDPG0_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_R_CR 0x1857
+#define mmDPG0_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_G_Y 0x1858
+#define mmDPG0_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_B_CB 0x1859
+#define mmDPG0_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG0_DPG_OFFSET_SEGMENT 0x185a
+#define mmDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG0_DPG_STATUS 0x185b
+#define mmDPG0_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+// base address: 0x0
+#define mmOPPBUF0_OPPBUF_CONTROL 0x1884
+#define mmOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+// base address: 0x0
+#define mmOPP_PIPE0_OPP_PIPE_CONTROL 0x188c
+#define mmOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+// base address: 0x0
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0 0x1893
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1 0x1894
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2 0x1895
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+// base address: 0x168
+#define mmFMT1_FMT_CLAMP_COMPONENT_R 0x1896
+#define mmFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_COMPONENT_G 0x1897
+#define mmFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_COMPONENT_B 0x1898
+#define mmFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT1_FMT_CONTROL 0x189a
+#define mmFMT1_FMT_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x189b
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x189c
+#define mmFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x189d
+#define mmFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x189e
+#define mmFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_CNTL 0x189f
+#define mmFMT1_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1
+#define mmFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_422_CONTROL 0x18a3
+#define mmFMT1_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+// base address: 0x168
+#define mmDPG1_DPG_CONTROL 0x18ae
+#define mmDPG1_DPG_CONTROL_BASE_IDX 2
+#define mmDPG1_DPG_RAMP_CONTROL 0x18af
+#define mmDPG1_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG1_DPG_DIMENSIONS 0x18b0
+#define mmDPG1_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_R_CR 0x18b1
+#define mmDPG1_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_G_Y 0x18b2
+#define mmDPG1_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_B_CB 0x18b3
+#define mmDPG1_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG1_DPG_OFFSET_SEGMENT 0x18b4
+#define mmDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG1_DPG_STATUS 0x18b5
+#define mmDPG1_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+// base address: 0x168
+#define mmOPPBUF1_OPPBUF_CONTROL 0x18de
+#define mmOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+// base address: 0x168
+#define mmOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6
+#define mmOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+// base address: 0x168
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0 0x18ed
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1 0x18ee
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2 0x18ef
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+// base address: 0x0
+#define mmOPP_TOP_CLK_CONTROL 0x1a5e
+#define mmOPP_TOP_CLK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+// base address: 0x0
+#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
+#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_DATA_SOURCE_SELECT 0x1acb
+#define mmODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc
+#define mmODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_BYTES_PER_PIXEL 0x1acd
+#define mmODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM0_OPTC_WIDTH_CONTROL 0x1ace
+#define mmODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf
+#define mmODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+// base address: 0x40
+#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
+#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_DATA_SOURCE_SELECT 0x1adb
+#define mmODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc
+#define mmODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_BYTES_PER_PIXEL 0x1add
+#define mmODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM1_OPTC_WIDTH_CONTROL 0x1ade
+#define mmODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf
+#define mmODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+// base address: 0x0
+#define mmOTG0_OTG_H_TOTAL 0x1b2a
+#define mmOTG0_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG0_OTG_H_BLANK_START_END 0x1b2b
+#define mmOTG0_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG0_OTG_H_SYNC_A 0x1b2c
+#define mmOTG0_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG0_OTG_H_SYNC_A_CNTL 0x1b2d
+#define mmOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_H_TIMING_CNTL 0x1b2e
+#define mmOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL 0x1b2f
+#define mmOTG0_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MIN 0x1b30
+#define mmOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MAX 0x1b31
+#define mmOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MID 0x1b32
+#define mmOTG0_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_CONTROL 0x1b33
+#define mmOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_INT_STATUS 0x1b34
+#define mmOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b35
+#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_V_BLANK_START_END 0x1b36
+#define mmOTG0_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG0_OTG_V_SYNC_A 0x1b37
+#define mmOTG0_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG0_OTG_V_SYNC_A_CNTL 0x1b38
+#define mmOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGA_CNTL 0x1b39
+#define mmOTG0_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3a
+#define mmOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG0_OTG_TRIGB_CNTL 0x1b3b
+#define mmOTG0_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3c
+#define mmOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3d
+#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b3f
+#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG0_OTG_CONTROL 0x1b41
+#define mmOTG0_OTG_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_CONTROL 0x1b42
+#define mmOTG0_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_INTERLACE_CONTROL 0x1b44
+#define mmOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_INTERLACE_STATUS 0x1b45
+#define mmOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47
+#define mmOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48
+#define mmOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG0_OTG_STATUS 0x1b49
+#define mmOTG0_OTG_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_POSITION 0x1b4a
+#define mmOTG0_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_NOM_VERT_POSITION 0x1b4b
+#define mmOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_FRAME_COUNT 0x1b4c
+#define mmOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_VF_COUNT 0x1b4d
+#define mmOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_HV_COUNT 0x1b4e
+#define mmOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_COUNT_CONTROL 0x1b4f
+#define mmOTG0_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_COUNT_RESET 0x1b50
+#define mmOTG0_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b51
+#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG0_OTG_VERT_SYNC_CONTROL 0x1b52
+#define mmOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_STATUS 0x1b53
+#define mmOTG0_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_CONTROL 0x1b54
+#define mmOTG0_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_STATUS 0x1b55
+#define mmOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_CONTROL 0x1b56
+#define mmOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_POSITION 0x1b57
+#define mmOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_FRAME 0x1b58
+#define mmOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG0_OTG_INTERRUPT_CONTROL 0x1b59
+#define mmOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_UPDATE_LOCK 0x1b5a
+#define mmOTG0_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5b
+#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_EN 0x1b5c
+#define mmOTG0_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_DATA_COLOR 0x1b5e
+#define mmOTG0_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT 0x1b5f
+#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG0_OTG_BLACK_COLOR 0x1b60
+#define mmOTG0_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG0_OTG_BLACK_COLOR_EXT 0x1b61
+#define mmOTG0_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b62
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b63
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b64
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b65
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b66
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b67
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC_CNTL 0x1b68
+#define mmOTG0_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_CRC_CNTL2 0x1b69
+#define mmOTG0_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b6a
+#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b6b
+#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b6c
+#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b6d
+#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_DATA_RG 0x1b6e
+#define mmOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_DATA_B 0x1b6f
+#define mmOTG0_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b70
+#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b71
+#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b72
+#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b73
+#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_DATA_RG 0x1b74
+#define mmOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_DATA_B 0x1b75
+#define mmOTG0_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b7a
+#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b7b
+#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b82
+#define mmOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b83
+#define mmOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_GSL_VSYNC_GAP 0x1b84
+#define mmOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_UPDATE_MODE 0x1b85
+#define mmOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG0_OTG_CLOCK_CONTROL 0x1b86
+#define mmOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VSTARTUP_PARAM 0x1b87
+#define mmOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_VUPDATE_PARAM 0x1b88
+#define mmOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_VREADY_PARAM 0x1b89
+#define mmOTG0_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b8a
+#define mmOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_UPDATE_LOCK 0x1b8b
+#define mmOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG0_OTG_GSL_CONTROL 0x1b8c
+#define mmOTG0_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_GSL_WINDOW_X 0x1b8d
+#define mmOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG0_OTG_GSL_WINDOW_Y 0x1b8e
+#define mmOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG0_OTG_VUPDATE_KEEPOUT 0x1b8f
+#define mmOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL0 0x1b90
+#define mmOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL1 0x1b91
+#define mmOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL2 0x1b92
+#define mmOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL3 0x1b93
+#define mmOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b94
+#define mmOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_DRR_CONTROL 0x1b97
+#define mmOTG0_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_DSC_START_POSITION 0x1b99
+#define mmOTG0_OTG_DSC_START_POSITION_BASE_IDX 2
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+// base address: 0x200
+#define mmOTG1_OTG_H_TOTAL 0x1baa
+#define mmOTG1_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG1_OTG_H_BLANK_START_END 0x1bab
+#define mmOTG1_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG1_OTG_H_SYNC_A 0x1bac
+#define mmOTG1_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG1_OTG_H_SYNC_A_CNTL 0x1bad
+#define mmOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_H_TIMING_CNTL 0x1bae
+#define mmOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL 0x1baf
+#define mmOTG1_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MIN 0x1bb0
+#define mmOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MAX 0x1bb1
+#define mmOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MID 0x1bb2
+#define mmOTG1_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_CONTROL 0x1bb3
+#define mmOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb4
+#define mmOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb5
+#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_V_BLANK_START_END 0x1bb6
+#define mmOTG1_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG1_OTG_V_SYNC_A 0x1bb7
+#define mmOTG1_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG1_OTG_V_SYNC_A_CNTL 0x1bb8
+#define mmOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGA_CNTL 0x1bb9
+#define mmOTG1_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bba
+#define mmOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG1_OTG_TRIGB_CNTL 0x1bbb
+#define mmOTG1_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbc
+#define mmOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbd
+#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bbf
+#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG1_OTG_CONTROL 0x1bc1
+#define mmOTG1_OTG_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_CONTROL 0x1bc2
+#define mmOTG1_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_INTERLACE_CONTROL 0x1bc4
+#define mmOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_INTERLACE_STATUS 0x1bc5
+#define mmOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7
+#define mmOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8
+#define mmOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG1_OTG_STATUS 0x1bc9
+#define mmOTG1_OTG_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_POSITION 0x1bca
+#define mmOTG1_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_NOM_VERT_POSITION 0x1bcb
+#define mmOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_FRAME_COUNT 0x1bcc
+#define mmOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_VF_COUNT 0x1bcd
+#define mmOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_HV_COUNT 0x1bce
+#define mmOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_COUNT_CONTROL 0x1bcf
+#define mmOTG1_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_COUNT_RESET 0x1bd0
+#define mmOTG1_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd1
+#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG1_OTG_VERT_SYNC_CONTROL 0x1bd2
+#define mmOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_STATUS 0x1bd3
+#define mmOTG1_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_CONTROL 0x1bd4
+#define mmOTG1_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_STATUS 0x1bd5
+#define mmOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_CONTROL 0x1bd6
+#define mmOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_POSITION 0x1bd7
+#define mmOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_FRAME 0x1bd8
+#define mmOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG1_OTG_INTERRUPT_CONTROL 0x1bd9
+#define mmOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_UPDATE_LOCK 0x1bda
+#define mmOTG1_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdb
+#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_EN 0x1bdc
+#define mmOTG1_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_DATA_COLOR 0x1bde
+#define mmOTG1_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT 0x1bdf
+#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG1_OTG_BLACK_COLOR 0x1be0
+#define mmOTG1_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG1_OTG_BLACK_COLOR_EXT 0x1be1
+#define mmOTG1_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1be2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be3
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be4
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be5
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be6
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be7
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC_CNTL 0x1be8
+#define mmOTG1_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_CRC_CNTL2 0x1be9
+#define mmOTG1_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1bea
+#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1beb
+#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1bec
+#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1bed
+#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_DATA_RG 0x1bee
+#define mmOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_DATA_B 0x1bef
+#define mmOTG1_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bf0
+#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bf1
+#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bf2
+#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bf3
+#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_DATA_RG 0x1bf4
+#define mmOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_DATA_B 0x1bf5
+#define mmOTG1_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bfa
+#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bfb
+#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c02
+#define mmOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c03
+#define mmOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_GSL_VSYNC_GAP 0x1c04
+#define mmOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_UPDATE_MODE 0x1c05
+#define mmOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG1_OTG_CLOCK_CONTROL 0x1c06
+#define mmOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VSTARTUP_PARAM 0x1c07
+#define mmOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_VUPDATE_PARAM 0x1c08
+#define mmOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_VREADY_PARAM 0x1c09
+#define mmOTG1_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c0a
+#define mmOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_UPDATE_LOCK 0x1c0b
+#define mmOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG1_OTG_GSL_CONTROL 0x1c0c
+#define mmOTG1_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_GSL_WINDOW_X 0x1c0d
+#define mmOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG1_OTG_GSL_WINDOW_Y 0x1c0e
+#define mmOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG1_OTG_VUPDATE_KEEPOUT 0x1c0f
+#define mmOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL0 0x1c10
+#define mmOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL1 0x1c11
+#define mmOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL2 0x1c12
+#define mmOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL3 0x1c13
+#define mmOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c14
+#define mmOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_DRR_CONTROL 0x1c17
+#define mmOTG1_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_DSC_START_POSITION 0x1c19
+#define mmOTG1_OTG_DSC_START_POSITION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+// base address: 0x0
+#define mmDWB_SOURCE_SELECT 0x1e2a
+#define mmDWB_SOURCE_SELECT_BASE_IDX 2
+#define mmGSL_SOURCE_SELECT 0x1e2b
+#define mmGSL_SOURCE_SELECT_BASE_IDX 2
+#define mmOPTC_CLOCK_CONTROL 0x1e2c
+#define mmOPTC_CLOCK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+// base address: 0x0
+#define mmDC_I2C_CONTROL 0x1e98
+#define mmDC_I2C_CONTROL_BASE_IDX 2
+#define mmDC_I2C_ARBITRATION 0x1e99
+#define mmDC_I2C_ARBITRATION_BASE_IDX 2
+#define mmDC_I2C_INTERRUPT_CONTROL 0x1e9a
+#define mmDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDC_I2C_SW_STATUS 0x1e9b
+#define mmDC_I2C_SW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC1_HW_STATUS 0x1e9c
+#define mmDC_I2C_DDC1_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC2_HW_STATUS 0x1e9d
+#define mmDC_I2C_DDC2_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC1_SPEED 0x1ea2
+#define mmDC_I2C_DDC1_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC1_SETUP 0x1ea3
+#define mmDC_I2C_DDC1_SETUP_BASE_IDX 2
+#define mmDC_I2C_DDC2_SPEED 0x1ea4
+#define mmDC_I2C_DDC2_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC2_SETUP 0x1ea5
+#define mmDC_I2C_DDC2_SETUP_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION0 0x1eae
+#define mmDC_I2C_TRANSACTION0_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION1 0x1eaf
+#define mmDC_I2C_TRANSACTION1_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION2 0x1eb0
+#define mmDC_I2C_TRANSACTION2_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION3 0x1eb1
+#define mmDC_I2C_TRANSACTION3_BASE_IDX 2
+#define mmDC_I2C_DATA 0x1eb2
+#define mmDC_I2C_DATA_BASE_IDX 2
+#define mmDC_I2C_EDID_DETECT_CTRL 0x1eb6
+#define mmDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2
+#define mmDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7
+#define mmDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+// base address: 0x0
+#define mmDIO_SCRATCH0 0x1eca
+#define mmDIO_SCRATCH0_BASE_IDX 2
+#define mmDIO_SCRATCH1 0x1ecb
+#define mmDIO_SCRATCH1_BASE_IDX 2
+#define mmDIO_SCRATCH2 0x1ecc
+#define mmDIO_SCRATCH2_BASE_IDX 2
+#define mmDIO_SCRATCH3 0x1ecd
+#define mmDIO_SCRATCH3_BASE_IDX 2
+#define mmDIO_SCRATCH4 0x1ece
+#define mmDIO_SCRATCH4_BASE_IDX 2
+#define mmDIO_SCRATCH5 0x1ecf
+#define mmDIO_SCRATCH5_BASE_IDX 2
+#define mmDIO_SCRATCH6 0x1ed0
+#define mmDIO_SCRATCH6_BASE_IDX 2
+#define mmDIO_SCRATCH7 0x1ed1
+#define mmDIO_SCRATCH7_BASE_IDX 2
+#define mmDIO_MEM_PWR_STATUS 0x1edd
+#define mmDIO_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL 0x1ede
+#define mmDIO_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL2 0x1edf
+#define mmDIO_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmDIO_CLK_CNTL 0x1ee0
+#define mmDIO_CLK_CNTL_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL3 0x1ee1
+#define mmDIO_MEM_PWR_CTRL3_BASE_IDX 2
+#define mmDIO_POWER_MANAGEMENT_CNTL 0x1ee4
+#define mmDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2
+#define mmDIG_SOFT_RESET 0x1eee
+#define mmDIG_SOFT_RESET_BASE_IDX 2
+#define mmDIO_MEM_PWR_STATUS1 0x1ef0
+#define mmDIO_MEM_PWR_STATUS1_BASE_IDX 2
+#define mmDIO_CLK_CNTL2 0x1ef2
+#define mmDIO_CLK_CNTL2_BASE_IDX 2
+#define mmDIO_CLK_CNTL3 0x1ef3
+#define mmDIO_CLK_CNTL3_BASE_IDX 2
+#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff
+#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2
+#define mmDIO_GENERIC_INTERRUPT_MESSAGE 0x1f02
+#define mmDIO_GENERIC_INTERRUPT_MESSAGE_BASE_IDX 2
+#define mmDIO_GENERIC_INTERRUPT_CLEAR 0x1f03
+#define mmDIO_GENERIC_INTERRUPT_CLEAR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+// base address: 0x0
+#define mmHPD0_DC_HPD_INT_STATUS 0x1f14
+#define mmHPD0_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD0_DC_HPD_INT_CONTROL 0x1f15
+#define mmHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD0_DC_HPD_CONTROL 0x1f16
+#define mmHPD0_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+// base address: 0x20
+#define mmHPD1_DC_HPD_INT_STATUS 0x1f1c
+#define mmHPD1_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD1_DC_HPD_INT_CONTROL 0x1f1d
+#define mmHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD1_DC_HPD_CONTROL 0x1f1e
+#define mmHPD1_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+// base address: 0x0
+#define mmDP_AUX0_AUX_CONTROL 0x1f50
+#define mmDP_AUX0_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_CONTROL 0x1f51
+#define mmDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_ARB_CONTROL 0x1f52
+#define mmDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_STATUS 0x1f54
+#define mmDP_AUX0_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_LS_STATUS 0x1f55
+#define mmDP_AUX0_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_DATA 0x1f56
+#define mmDP_AUX0_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX0_AUX_LS_DATA 0x1f57
+#define mmDP_AUX0_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+// base address: 0x70
+#define mmDP_AUX1_AUX_CONTROL 0x1f6c
+#define mmDP_AUX1_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_CONTROL 0x1f6d
+#define mmDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_ARB_CONTROL 0x1f6e
+#define mmDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_STATUS 0x1f70
+#define mmDP_AUX1_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_LS_STATUS 0x1f71
+#define mmDP_AUX1_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_DATA 0x1f72
+#define mmDP_AUX1_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX1_AUX_LS_DATA 0x1f73
+#define mmDP_AUX1_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+// base address: 0x0
+#define mmDIG0_DIG_FE_CNTL 0x2068
+#define mmDIG0_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x2069
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x206a
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG0_DIG_CLOCK_PATTERN 0x206b
+#define mmDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG0_DIG_TEST_PATTERN 0x206c
+#define mmDIG0_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x206d
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG0_DIG_FIFO_STATUS 0x206e
+#define mmDIG0_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_METADATA_PACKET_CONTROL 0x206f
+#define mmDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x2070
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG0_HDMI_CONTROL 0x2071
+#define mmDIG0_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_STATUS 0x2072
+#define mmDIG0_HDMI_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x2073
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x2074
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x2075
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x2076
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x2077
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x2078
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_INTERRUPT_STATUS 0x2079
+#define mmDIG0_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_GC 0x207b
+#define mmDIG0_HDMI_GC_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x207c
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_0 0x207d
+#define mmDIG0_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_1 0x207e
+#define mmDIG0_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_2 0x207f
+#define mmDIG0_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_3 0x2080
+#define mmDIG0_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_4 0x2081
+#define mmDIG0_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_0 0x2082
+#define mmDIG0_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_1 0x2083
+#define mmDIG0_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_2 0x2084
+#define mmDIG0_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_3 0x2085
+#define mmDIG0_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x2086
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x2087
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG0_HDMI_DB_CONTROL 0x2088
+#define mmDIG0_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG0_DME_CONTROL 0x2089
+#define mmDIG0_DME_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_MPEG_INFO0 0x208a
+#define mmDIG0_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG0_AFMT_MPEG_INFO1 0x208b
+#define mmDIG0_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_HDR 0x208c
+#define mmDIG0_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_0 0x208d
+#define mmDIG0_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_1 0x208e
+#define mmDIG0_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_2 0x208f
+#define mmDIG0_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_3 0x2090
+#define mmDIG0_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_4 0x2091
+#define mmDIG0_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_5 0x2092
+#define mmDIG0_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_6 0x2093
+#define mmDIG0_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_7 0x2094
+#define mmDIG0_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x2095
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_32_0 0x2096
+#define mmDIG0_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_32_1 0x2097
+#define mmDIG0_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_44_0 0x2098
+#define mmDIG0_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_44_1 0x2099
+#define mmDIG0_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_48_0 0x209a
+#define mmDIG0_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_48_1 0x209b
+#define mmDIG0_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_STATUS_0 0x209c
+#define mmDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_STATUS_1 0x209d
+#define mmDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_INFO0 0x209e
+#define mmDIG0_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_INFO1 0x209f
+#define mmDIG0_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG0_AFMT_60958_0 0x20a0
+#define mmDIG0_AFMT_60958_0_BASE_IDX 2
+#define mmDIG0_AFMT_60958_1 0x20a1
+#define mmDIG0_AFMT_60958_1_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x20a2
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL0 0x20a3
+#define mmDIG0_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL1 0x20a4
+#define mmDIG0_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL2 0x20a5
+#define mmDIG0_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL3 0x20a6
+#define mmDIG0_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG0_AFMT_60958_2 0x20a7
+#define mmDIG0_AFMT_60958_2_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x20a8
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG0_AFMT_STATUS 0x20a9
+#define mmDIG0_AFMT_STATUS_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x20aa
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x20ab
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x20ac
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x20ad
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG0_DIG_BE_CNTL 0x20af
+#define mmDIG0_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_BE_EN_CNTL 0x20b0
+#define mmDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CNTL 0x20d3
+#define mmDIG0_TMDS_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CONTROL_CHAR 0x20d4
+#define mmDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x20d5
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20d6
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20d7
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20d8
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG0_TMDS_CTL_BITS 0x20da
+#define mmDIG0_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x20db
+#define mmDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20dc
+#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x20dd
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x20de
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_VERSION 0x20e0
+#define mmDIG0_DIG_VERSION_BASE_IDX 2
+#define mmDIG0_DIG_LANE_ENABLE 0x20e1
+#define mmDIG0_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG0_AFMT_CNTL 0x20e6
+#define mmDIG0_AFMT_CNTL_BASE_IDX 2
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL1 0x20e7
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20f6
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+// base address: 0x0
+#define mmDP0_DP_LINK_CNTL 0x2108
+#define mmDP0_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP0_DP_PIXEL_FORMAT 0x2109
+#define mmDP0_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP0_DP_MSA_COLORIMETRY 0x210a
+#define mmDP0_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP0_DP_CONFIG 0x210b
+#define mmDP0_DP_CONFIG_BASE_IDX 2
+#define mmDP0_DP_VID_STREAM_CNTL 0x210c
+#define mmDP0_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP0_DP_STEER_FIFO 0x210d
+#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP0_DP_MSA_MISC 0x210e
+#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_VID_TIMING 0x2110
+#define mmDP0_DP_VID_TIMING_BASE_IDX 2
+#define mmDP0_DP_VID_N 0x2111
+#define mmDP0_DP_VID_N_BASE_IDX 2
+#define mmDP0_DP_VID_M 0x2112
+#define mmDP0_DP_VID_M_BASE_IDX 2
+#define mmDP0_DP_LINK_FRAMING_CNTL 0x2113
+#define mmDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP0_DP_HBR2_EYE_PATTERN 0x2114
+#define mmDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP0_DP_VID_MSA_VBID 0x2115
+#define mmDP0_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP0_DP_VID_INTERRUPT_CNTL 0x2116
+#define mmDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CNTL 0x2117
+#define mmDP0_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x2118
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM0 0x2119
+#define mmDP0_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM1 0x211a
+#define mmDP0_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM2 0x211b
+#define mmDP0_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP0_DP_DPHY_8B10B_CNTL 0x211c
+#define mmDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_PRBS_CNTL 0x211d
+#define mmDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x211e
+#define mmDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_EN 0x211f
+#define mmDP0_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_CNTL 0x2120
+#define mmDP0_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_RESULT 0x2121
+#define mmDP0_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x2122
+#define mmDP0_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x2123
+#define mmDP0_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP0_DP_DPHY_FAST_TRAINING 0x2124
+#define mmDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x2125
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL 0x212b
+#define mmDP0_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL1 0x212c
+#define mmDP0_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING1 0x212d
+#define mmDP0_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING2 0x212e
+#define mmDP0_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING3 0x212f
+#define mmDP0_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING4 0x2130
+#define mmDP0_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_N 0x2131
+#define mmDP0_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_N_READBACK 0x2132
+#define mmDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_M 0x2133
+#define mmDP0_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_M_READBACK 0x2134
+#define mmDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP0_DP_SEC_TIMESTAMP 0x2135
+#define mmDP0_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP0_DP_SEC_PACKET_CNTL 0x2136
+#define mmDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSE_RATE_CNTL 0x2137
+#define mmDP0_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSE_RATE_UPDATE 0x2139
+#define mmDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT0 0x213a
+#define mmDP0_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT1 0x213b
+#define mmDP0_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT2 0x213c
+#define mmDP0_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT_UPDATE 0x213d
+#define mmDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP0_DP_MSE_LINK_TIMING 0x213e
+#define mmDP0_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP0_DP_MSE_MISC_CNTL 0x213f
+#define mmDP0_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x2144
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x2145
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT0_STATUS 0x2147
+#define mmDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT1_STATUS 0x2148
+#define mmDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT2_STATUS 0x2149
+#define mmDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM1 0x214c
+#define mmDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM2 0x214d
+#define mmDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM3 0x214e
+#define mmDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM4 0x214f
+#define mmDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP0_DP_DSC_CNTL 0x2152
+#define mmDP0_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL2 0x2153
+#define mmDP0_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL3 0x2154
+#define mmDP0_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL4 0x2155
+#define mmDP0_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL5 0x2156
+#define mmDP0_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL6 0x2157
+#define mmDP0_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL7 0x2158
+#define mmDP0_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP0_DP_DB_CNTL 0x2159
+#define mmDP0_DP_DB_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSA_VBID_MISC 0x215a
+#define mmDP0_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP0_DP_SEC_METADATA_TRANSMISSION 0x215b
+#define mmDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP0_DP_DSC_BYTES_PER_PIXEL 0x215c
+#define mmDP0_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+// base address: 0x400
+#define mmDIG1_DIG_FE_CNTL 0x2168
+#define mmDIG1_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x2169
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x216a
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG1_DIG_CLOCK_PATTERN 0x216b
+#define mmDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG1_DIG_TEST_PATTERN 0x216c
+#define mmDIG1_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x216d
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG1_DIG_FIFO_STATUS 0x216e
+#define mmDIG1_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_METADATA_PACKET_CONTROL 0x216f
+#define mmDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x2170
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG1_HDMI_CONTROL 0x2171
+#define mmDIG1_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_STATUS 0x2172
+#define mmDIG1_HDMI_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x2173
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x2174
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x2175
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x2176
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x2177
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x2178
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_INTERRUPT_STATUS 0x2179
+#define mmDIG1_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_GC 0x217b
+#define mmDIG1_HDMI_GC_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x217c
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_0 0x217d
+#define mmDIG1_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_1 0x217e
+#define mmDIG1_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_2 0x217f
+#define mmDIG1_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_3 0x2180
+#define mmDIG1_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_4 0x2181
+#define mmDIG1_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_0 0x2182
+#define mmDIG1_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_1 0x2183
+#define mmDIG1_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_2 0x2184
+#define mmDIG1_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_3 0x2185
+#define mmDIG1_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x2186
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x2187
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG1_HDMI_DB_CONTROL 0x2188
+#define mmDIG1_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG1_DME_CONTROL 0x2189
+#define mmDIG1_DME_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_MPEG_INFO0 0x218a
+#define mmDIG1_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG1_AFMT_MPEG_INFO1 0x218b
+#define mmDIG1_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_HDR 0x218c
+#define mmDIG1_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_0 0x218d
+#define mmDIG1_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_1 0x218e
+#define mmDIG1_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_2 0x218f
+#define mmDIG1_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_3 0x2190
+#define mmDIG1_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_4 0x2191
+#define mmDIG1_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_5 0x2192
+#define mmDIG1_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_6 0x2193
+#define mmDIG1_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_7 0x2194
+#define mmDIG1_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x2195
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_32_0 0x2196
+#define mmDIG1_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_32_1 0x2197
+#define mmDIG1_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_44_0 0x2198
+#define mmDIG1_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_44_1 0x2199
+#define mmDIG1_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_48_0 0x219a
+#define mmDIG1_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_48_1 0x219b
+#define mmDIG1_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_STATUS_0 0x219c
+#define mmDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_STATUS_1 0x219d
+#define mmDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_INFO0 0x219e
+#define mmDIG1_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_INFO1 0x219f
+#define mmDIG1_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG1_AFMT_60958_0 0x21a0
+#define mmDIG1_AFMT_60958_0_BASE_IDX 2
+#define mmDIG1_AFMT_60958_1 0x21a1
+#define mmDIG1_AFMT_60958_1_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x21a2
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL0 0x21a3
+#define mmDIG1_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL1 0x21a4
+#define mmDIG1_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL2 0x21a5
+#define mmDIG1_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL3 0x21a6
+#define mmDIG1_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG1_AFMT_60958_2 0x21a7
+#define mmDIG1_AFMT_60958_2_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x21a8
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG1_AFMT_STATUS 0x21a9
+#define mmDIG1_AFMT_STATUS_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x21aa
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x21ab
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x21ac
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x21ad
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG1_DIG_BE_CNTL 0x21af
+#define mmDIG1_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_BE_EN_CNTL 0x21b0
+#define mmDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CNTL 0x21d3
+#define mmDIG1_TMDS_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CONTROL_CHAR 0x21d4
+#define mmDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x21d5
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x21d6
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x21d7
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x21d8
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG1_TMDS_CTL_BITS 0x21da
+#define mmDIG1_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x21db
+#define mmDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x21dc
+#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x21dd
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x21de
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_VERSION 0x21e0
+#define mmDIG1_DIG_VERSION_BASE_IDX 2
+#define mmDIG1_DIG_LANE_ENABLE 0x21e1
+#define mmDIG1_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG1_AFMT_CNTL 0x21e6
+#define mmDIG1_AFMT_CNTL_BASE_IDX 2
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL1 0x21e7
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21f6
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+// base address: 0x400
+#define mmDP1_DP_LINK_CNTL 0x2208
+#define mmDP1_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP1_DP_PIXEL_FORMAT 0x2209
+#define mmDP1_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP1_DP_MSA_COLORIMETRY 0x220a
+#define mmDP1_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP1_DP_CONFIG 0x220b
+#define mmDP1_DP_CONFIG_BASE_IDX 2
+#define mmDP1_DP_VID_STREAM_CNTL 0x220c
+#define mmDP1_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP1_DP_STEER_FIFO 0x220d
+#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP1_DP_MSA_MISC 0x220e
+#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_VID_TIMING 0x2210
+#define mmDP1_DP_VID_TIMING_BASE_IDX 2
+#define mmDP1_DP_VID_N 0x2211
+#define mmDP1_DP_VID_N_BASE_IDX 2
+#define mmDP1_DP_VID_M 0x2212
+#define mmDP1_DP_VID_M_BASE_IDX 2
+#define mmDP1_DP_LINK_FRAMING_CNTL 0x2213
+#define mmDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP1_DP_HBR2_EYE_PATTERN 0x2214
+#define mmDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP1_DP_VID_MSA_VBID 0x2215
+#define mmDP1_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP1_DP_VID_INTERRUPT_CNTL 0x2216
+#define mmDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CNTL 0x2217
+#define mmDP1_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2218
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM0 0x2219
+#define mmDP1_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM1 0x221a
+#define mmDP1_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM2 0x221b
+#define mmDP1_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP1_DP_DPHY_8B10B_CNTL 0x221c
+#define mmDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_PRBS_CNTL 0x221d
+#define mmDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x221e
+#define mmDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_EN 0x221f
+#define mmDP1_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_CNTL 0x2220
+#define mmDP1_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_RESULT 0x2221
+#define mmDP1_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x2222
+#define mmDP1_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x2223
+#define mmDP1_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP1_DP_DPHY_FAST_TRAINING 0x2224
+#define mmDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x2225
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL 0x222b
+#define mmDP1_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL1 0x222c
+#define mmDP1_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING1 0x222d
+#define mmDP1_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING2 0x222e
+#define mmDP1_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING3 0x222f
+#define mmDP1_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING4 0x2230
+#define mmDP1_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_N 0x2231
+#define mmDP1_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_N_READBACK 0x2232
+#define mmDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_M 0x2233
+#define mmDP1_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_M_READBACK 0x2234
+#define mmDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP1_DP_SEC_TIMESTAMP 0x2235
+#define mmDP1_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP1_DP_SEC_PACKET_CNTL 0x2236
+#define mmDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSE_RATE_CNTL 0x2237
+#define mmDP1_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSE_RATE_UPDATE 0x2239
+#define mmDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT0 0x223a
+#define mmDP1_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT1 0x223b
+#define mmDP1_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT2 0x223c
+#define mmDP1_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT_UPDATE 0x223d
+#define mmDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP1_DP_MSE_LINK_TIMING 0x223e
+#define mmDP1_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP1_DP_MSE_MISC_CNTL 0x223f
+#define mmDP1_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x2244
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x2245
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT0_STATUS 0x2247
+#define mmDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT1_STATUS 0x2248
+#define mmDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT2_STATUS 0x2249
+#define mmDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM1 0x224c
+#define mmDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM2 0x224d
+#define mmDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM3 0x224e
+#define mmDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM4 0x224f
+#define mmDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP1_DP_DSC_CNTL 0x2252
+#define mmDP1_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL2 0x2253
+#define mmDP1_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL3 0x2254
+#define mmDP1_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL4 0x2255
+#define mmDP1_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL5 0x2256
+#define mmDP1_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL6 0x2257
+#define mmDP1_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL7 0x2258
+#define mmDP1_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP1_DP_DB_CNTL 0x2259
+#define mmDP1_DP_DB_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSA_VBID_MISC 0x225a
+#define mmDP1_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP1_DP_SEC_METADATA_TRANSMISSION 0x225b
+#define mmDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP1_DP_DSC_BYTES_PER_PIXEL 0x225c
+#define mmDP1_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+// base address: 0x0
+#define mmDC_GENERICA 0x2868
+#define mmDC_GENERICA_BASE_IDX 2
+#define mmUNIPHYA_LINK_CNTL 0x286d
+#define mmUNIPHYA_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL 0x286e
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmUNIPHYB_LINK_CNTL 0x286f
+#define mmUNIPHYB_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL 0x2870
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmDCIO_WRCMD_DELAY 0x287e
+#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2880
+#define mmDC_PINSTRAPS_BASE_IDX 2
+#define mmDCIO_CLOCK_CNTL 0x2895
+#define mmDCIO_CLOCK_CNTL_BASE_IDX 2
+#define mmDCIO_SOFT_RESET 0x289e
+#define mmDCIO_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+// base address: 0x0
+#define mmDC_GPIO_DDC1_MASK 0x28d0
+#define mmDC_GPIO_DDC1_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC1_A 0x28d1
+#define mmDC_GPIO_DDC1_A_BASE_IDX 2
+#define mmDC_GPIO_DDC1_EN 0x28d2
+#define mmDC_GPIO_DDC1_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC1_Y 0x28d3
+#define mmDC_GPIO_DDC1_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC2_MASK 0x28d4
+#define mmDC_GPIO_DDC2_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC2_A 0x28d5
+#define mmDC_GPIO_DDC2_A_BASE_IDX 2
+#define mmDC_GPIO_DDC2_EN 0x28d6
+#define mmDC_GPIO_DDC2_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC2_Y 0x28d7
+#define mmDC_GPIO_DDC2_Y_BASE_IDX 2
+#define mmDC_GPIO_HPD_MASK 0x28f4
+#define mmDC_GPIO_HPD_MASK_BASE_IDX 2
+#define mmDC_GPIO_HPD_A 0x28f5
+#define mmDC_GPIO_HPD_A_BASE_IDX 2
+#define mmDC_GPIO_HPD_EN 0x28f6
+#define mmDC_GPIO_HPD_EN_BASE_IDX 2
+#define mmDC_GPIO_HPD_Y 0x28f7
+#define mmDC_GPIO_HPD_Y_BASE_IDX 2
+#define mmDC_GPIO_PAD_STRENGTH_1 0x28fc
+#define mmDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2
+#define mmPHY_AUX_CNTL 0x28ff
+#define mmPHY_AUX_CNTL_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_1 0x2917
+#define mmDC_GPIO_AUX_CTRL_1_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_2 0x2918
+#define mmDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_3 0x291b
+#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_4 0x291c
+#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_5 0x291d
+#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX 2
+#define mmAUXI2C_PAD_ALL_PWR_OK 0x291e
+#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
+
+// addressBlock: azf0endpoint0_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint1_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
new file mode 100755
index 000000000000..91969554e36a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
@@ -0,0 +1,22091 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dcn_2_0_3_SH_MASK_HEADER
+#define _dcn_2_0_3_SH_MASK_HEADER
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+//PHYPLLA_PIXCLK_RESYNC_CNTL
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//PHYPLLB_PIXCLK_RESYNC_CNTL
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//DP_DTO_DBUF_EN
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L
+//DPREFCLK_CGTT_BLK_CTRL_REG
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//REFCLK_CNTL
+#define REFCLK_CNTL__REFCLK_CLOCK_EN__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_SRC_SEL__SHIFT 0x1
+#define REFCLK_CNTL__REFCLK_CLOCK_EN_MASK 0x00000001L
+#define REFCLK_CNTL__REFCLK_SRC_SEL_MASK 0x00000002L
+//DCCG_DS_DTO_INCR
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_DS_DTO_MODULO
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_DS_CNTL
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L
+//DCCG_DS_HW_CAL_INTERVAL
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL
+//DPREFCLK_CNTL
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L
+//DCE_VERSION
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL
+#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+//DCCG_GTC_CNTL
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+//DCCG_GTC_DTO_INCR
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_GTC_DTO_MODULO
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_GTC_CURRENT
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL
+//MILLISECOND_TIME_BASE_DIV
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DISPCLK_FREQ_CHANGE_CNTL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+//DC_MEM_GLOBAL_PWR_REQ_CNTL
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+//DCCG_PERFMON_CNTL
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L
+//DCCG_GATE_DISABLE_CNTL
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L
+//DISPCLK_CGTT_BLK_CTRL_REG
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//SOCCLK_CGTT_BLK_CTRL_REG
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_CAC_STATUS
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL
+//MICROSECOND_TIME_BASE_DIV
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DCCG_GATE_DISABLE_CNTL2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L
+//SYMCLK_CGTT_BLK_CTRL_REG
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_DISP_CNTL_REG
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L
+//OTG0_PIXEL_RATE_CNTL
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO0_PHASE
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO0_MODULO
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL
+//OTG0_PHYPLL_PIXEL_RATE_CNTL
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG1_PIXEL_RATE_CNTL
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO1_PHASE
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO1_MODULO
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL
+//OTG1_PHYPLL_PIXEL_RATE_CNTL
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//DPPCLK_CGTT_BLK_CTRL_REG
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DPPCLK0_DTO_PARAM
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK1_DTO_PARAM
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK2_DTO_PARAM
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK3_DTO_PARAM
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DCCG_CAC_STATUS2
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0000000FL
+//SYMCLKA_CLOCK_ENABLE
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x00000700L
+//SYMCLKB_CLOCK_ENABLE
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x00000700L
+//DCCG_AUDIO_DTO_SOURCE
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL__SHIFT 0xc
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN__SHIFT 0x10
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000030L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL_MASK 0x00003000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN_MASK 0x00010000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L
+//DCCG_AUDIO_DTO0_PHASE
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO0_MODULE
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_PHASE
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_MODULE
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG0_LATCH_VALUE
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG1_LATCH_VALUE
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DPPCLK_DTO_CTRL
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE__SHIFT 0x0
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE__SHIFT 0x4
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE__SHIFT 0x8
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE__SHIFT 0xc
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE_MASK 0x00000001L
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE_MASK 0x00000010L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE_MASK 0x00000100L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE_MASK 0x00001000L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L
+//DCCG_VSYNC_CNT_CTRL
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL__SHIFT 0x1
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L
+//DCCG_VSYNC_CNT_INT_CTRL
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+//DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+//DISP_INTERRUPT_STATUS
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+
+//AZ_CLOCK_CNTL
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x8
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x10
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000100L
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00010000L
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+//AZALIA_CONTROLLER_CLOCK_GATING
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L
+//AZALIA_AUDIO_DTO
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L
+//AZALIA_AUDIO_DTO_CONTROL
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+//AZALIA_SOCCLK_CONTROL
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L
+//AZALIA_UNDERFLOW_FILLER_SAMPLE
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL
+//AZALIA_DATA_DMA_CONTROL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+//AZALIA_BDL_DMA_CONTROL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L
+//AZALIA_RIRB_AND_DP_CONTROL
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L
+//AZALIA_CORB_DMA_CONTROL
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+//AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+//AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//CC_RCU_DC_AUDIO_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_sdpif_dispdec
+//DCHUBBUB_SDPIF_CFG0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L
+//DCHUBBUB_SDPIF_PIPE_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x6
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0x9
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x00000007L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x000001C0L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x00000E00L
+//DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x6
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0x9
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x00000007L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x000001C0L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x00000E00L
+//DCHUBBUB_SDPIF_MEM_PWR_CTRL
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_SDPIF_MEM_PWR_STATUS
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_SDPIF_CFG1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
+//DCHUBBUB_RET_PATH_DCC_CFG
+#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN_MASK 0x00000001L
+//DCHUBBUB_RET_PATH_DCC_CFG0_0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG0_1
+#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG1_0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG1_1
+#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG2_0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG2_1
+#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG3_0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG3_1
+#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG4_0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG4_1
+#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG5_0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG5_1
+#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG6_0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG6_1
+#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG7_0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG7_1
+#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_MEM_PWR_CTRL
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_RET_PATH_MEM_PWR_STATUS
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L
+
+// addressBlock: dce_dc_dchubbub_hubbub_dispdec
+//DCHUBBUB_ARB_DF_REQ_OUTSTAND
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0x10
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x01FF0000L
+//DCHUBBUB_ARB_SAT_LEVEL
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL
+//DCHUBBUB_ARB_QOS_FORCE
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L
+//DCHUBBUB_ARB_DRAM_STATE_CNTL
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL__SHIFT 0x9
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_MASK 0x00000100L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL_MASK 0x00000200L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_ACK__SHIFT 0x6
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_ACK_MASK 0x00000040L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L
+//DCHUBBUB_ARB_TIMEOUT_ENABLE
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L
+//DCHUBBUB_GLOBAL_TIMER_CNTL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L
+//SURFACE_CHECK0_ADDRESS_LSB
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK0_ADDRESS_MSB
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK1_ADDRESS_LSB
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK1_ADDRESS_MSB
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK2_ADDRESS_LSB
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK2_ADDRESS_MSB
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK3_ADDRESS_LSB
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK3_ADDRESS_MSB
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L
+//VTG0_CONTROL
+#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10
+#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f
+#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L
+//VTG1_CONTROL
+#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10
+#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f
+#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L
+//DCHUBBUB_SOFT_RESET
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L
+//DCHUBBUB_CLOCK_CNTL
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L
+//DCFCLK_CNTL
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L
+//DCHUBBUB_VLINE_SNAPSHOT
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L
+//DCHUBBUB_CTRL_STATUS
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL1
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL2
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L
+//DCHUBBUB_TIMEOUT_INTERRUPT_STATUS
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L
+//DCHUBBUB_TEST_DEBUG_INDEX
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+//DCHUBBUB_TEST_DEBUG_DATA
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+//HUBP0_DCSURF_SURFACE_CONFIG
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP0_DCSURF_ADDR_CONFIG
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP0_DCSURF_TILING_CONFIG
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP0_DCSURF_PRI_VIEWPORT_START
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP0_DCHUBP_CNTL
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP0_HUBP_CLK_CNTL
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP0_HUBPREQ_DEBUG_DB
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+//HUBPREQ0_DCSURF_SURFACE_PITCH
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL2
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCN_EXPANSION_MODE
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ0_DCN_TTU_QOS_WM
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ0_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_BLANK_OFFSET_0
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ0_BLANK_OFFSET_1
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ0_DST_DIMENSIONS
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ0_DST_AFTER_SCALER
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ0_PREFETCH_SETTINGS
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ0_PREFETCH_SETTINGS_C
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ0_VBLANK_PARAMETERS_1
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_2
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_3
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_4
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ0_FLIP_PARAMETERS_2
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_4
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_5
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_6
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_7
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_PER_LINE_DELIVERY_PRE
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ0_PER_LINE_DELIVERY
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ0_CURSOR_SETTINGS
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ0_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ0_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ0_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+//HUBPRET0_HUBPRET_CONTROL
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET0_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET0_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET0_HUBPRET_READ_LINE0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE1
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_INTERRUPT
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET0_HUBPRET_READ_LINE_VALUE
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_STATUS
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+//CURSOR0_0_CURSOR_CONTROL
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_0_CURSOR_SIZE
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_0_CURSOR_POSITION
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_0_CURSOR_HOT_SPOT
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_0_CURSOR_STEREO_CONTROL
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_0_CURSOR_DST_OFFSET
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_0_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_0_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_0_DMDATA_ADDRESS_HIGH
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_0_DMDATA_ADDRESS_LOW
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_0_DMDATA_CNTL
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_QOS_CNTL
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_0_DMDATA_STATUS
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_0_DMDATA_SW_CNTL
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_SW_DATA
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+//HUBP1_DCSURF_SURFACE_CONFIG
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP1_DCSURF_ADDR_CONFIG
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP1_DCSURF_TILING_CONFIG
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP1_DCSURF_PRI_VIEWPORT_START
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP1_DCHUBP_CNTL
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP1_HUBP_CLK_CNTL
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP1_HUBPREQ_DEBUG_DB
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+//HUBPREQ1_DCSURF_SURFACE_PITCH
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_CONTROL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL2
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCN_EXPANSION_MODE
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ1_DCN_TTU_QOS_WM
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ1_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_BLANK_OFFSET_0
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ1_BLANK_OFFSET_1
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ1_DST_DIMENSIONS
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ1_DST_AFTER_SCALER
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ1_PREFETCH_SETTINGS
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ1_PREFETCH_SETTINGS_C
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ1_VBLANK_PARAMETERS_1
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_2
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_3
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_4
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ1_FLIP_PARAMETERS_2
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_4
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_5
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_6
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_7
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_PER_LINE_DELIVERY_PRE
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ1_PER_LINE_DELIVERY
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ1_CURSOR_SETTINGS
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ1_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ1_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ1_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+//HUBPRET1_HUBPRET_CONTROL
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET1_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET1_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET1_HUBPRET_READ_LINE0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE1
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_INTERRUPT
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET1_HUBPRET_READ_LINE_VALUE
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_STATUS
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+//CURSOR0_1_CURSOR_CONTROL
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_1_CURSOR_SIZE
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_1_CURSOR_POSITION
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_1_CURSOR_HOT_SPOT
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_1_CURSOR_STEREO_CONTROL
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_1_CURSOR_DST_OFFSET
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_1_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_1_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_1_DMDATA_ADDRESS_HIGH
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_1_DMDATA_ADDRESS_LOW
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_1_DMDATA_CNTL
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_QOS_CNTL
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_1_DMDATA_STATUS
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_1_DMDATA_SW_CNTL
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_SW_DATA
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+//HUBP2_DCSURF_SURFACE_CONFIG
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP2_DCSURF_ADDR_CONFIG
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP2_DCSURF_TILING_CONFIG
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP2_DCSURF_PRI_VIEWPORT_START
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP2_DCHUBP_CNTL
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP2_HUBP_CLK_CNTL
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP2_HUBPREQ_DEBUG_DB
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+//HUBPREQ2_DCSURF_SURFACE_PITCH
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_CONTROL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL.,
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL2
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+
+//HUBPREQ2_DCN_EXPANSION_MODE
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ2_DCN_TTU_QOS_WM
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ2_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_BLANK_OFFSET_0
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ2_BLANK_OFFSET_1
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ2_DST_DIMENSIONS
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ2_DST_AFTER_SCALER
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ2_PREFETCH_SETTINGS
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ2_PREFETCH_SETTINGS_C
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ2_VBLANK_PARAMETERS_1
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_2
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_3
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_4
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ2_FLIP_PARAMETERS_2
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_4
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_5
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_6
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_7
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_PER_LINE_DELIVERY_PRE
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ2_PER_LINE_DELIVERY
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ2_CURSOR_SETTINGS
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ2_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ2_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ2_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+//HUBPRET2_HUBPRET_CONTROL
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET2_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET2_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET2_HUBPRET_READ_LINE0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE1
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_INTERRUPT
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET2_HUBPRET_READ_LINE_VALUE
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_STATUS
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+//CURSOR0_2_CURSOR_CONTROL
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_2_CURSOR_SIZE
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_2_CURSOR_POSITION
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_2_CURSOR_HOT_SPOT
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_2_CURSOR_STEREO_CONTROL
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_2_CURSOR_DST_OFFSET
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_2_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_2_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_2_DMDATA_ADDRESS_HIGH
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_2_DMDATA_ADDRESS_LOW
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_2_DMDATA_CNTL
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_QOS_CNTL
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_2_DMDATA_STATUS
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_2_DMDATA_SW_CNTL
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_SW_DATA
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+//HUBP3_DCSURF_SURFACE_CONFIG
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP3_DCSURF_ADDR_CONFIG
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP3_DCSURF_TILING_CONFIG
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP3_DCSURF_PRI_VIEWPORT_START
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP3_DCHUBP_CNTL
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP3_HUBP_CLK_CNTL
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP3_HUBPREQ_DEBUG_DB
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+//HUBPREQ3_DCSURF_SURFACE_PITCH
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_CONTROL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL2
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCN_EXPANSION_MODE
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ3_DCN_TTU_QOS_WM
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ3_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_BLANK_OFFSET_0
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ3_BLANK_OFFSET_1
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ3_DST_DIMENSIONS
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ3_DST_AFTER_SCALER
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ3_PREFETCH_SETTINGS
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ3_PREFETCH_SETTINGS_C
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ3_VBLANK_PARAMETERS_1
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_2
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_3
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_4
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ3_FLIP_PARAMETERS_2
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_4
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_5
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_6
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_7
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_PER_LINE_DELIVERY_PRE
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ3_PER_LINE_DELIVERY
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ3_CURSOR_SETTINGS
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ3_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ3_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ3_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+//HUBPRET3_HUBPRET_CONTROL
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET3_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET3_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET3_HUBPRET_READ_LINE0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE1
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_INTERRUPT
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET3_HUBPRET_READ_LINE_VALUE
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_STATUS
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+//CURSOR0_3_CURSOR_CONTROL
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_3_CURSOR_SIZE
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_3_CURSOR_POSITION
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_3_CURSOR_HOT_SPOT
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_3_CURSOR_STEREO_CONTROL
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_3_CURSOR_DST_OFFSET
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_3_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_3_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_3_DMDATA_ADDRESS_HIGH
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_3_DMDATA_ADDRESS_LOW
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_3_DMDATA_CNTL
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_QOS_CNTL
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_3_DMDATA_STATUS
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_3_DMDATA_SW_CNTL
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_SW_DATA
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+//DPP_TOP0_DPP_CONTROL
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP0_DPP_SOFT_RESET
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP0_DPP_CRC_VAL_R_G
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP0_DPP_CRC_VAL_B_A
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP0_DPP_CRC_CTRL
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP0_HOST_READ_CONTROL
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG0_FORMAT_CONTROL
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG0_FCNV_FP_BIAS_R
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_G
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_B
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_R
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_G
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_B
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG0_COLOR_KEYER_CONTROL
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG0_COLOR_KEYER_ALPHA
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_RED
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_GREEN
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_BLUE
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_ALPHA_2BIT_LUT
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+//CNVC_CUR0_CURSOR0_CONTROL
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR0_CURSOR0_COLOR0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_COLOR1
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+//DSCL0_SCL_COEF_RAM_TAP_SELECT
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL0_SCL_COEF_RAM_TAP_DATA
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL0_SCL_MODE
+#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL0_SCL_TAP_CONTROL
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL0_DSCL_CONTROL
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL0_DSCL_2TAP_CONTROL
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL0_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT_C
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL0_SCL_BLACK_OFFSET
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL0_DSCL_UPDATE
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL0_DSCL_AUTOCAL
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL0_OTG_H_BLANK
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_OTG_V_BLANK
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_RECOUT_START
+#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL0_RECOUT_SIZE
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_MPC_SIZE
+#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_LB_DATA_FORMAT
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL0_LB_MEMORY_CTRL
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL0_LB_V_COUNTER
+#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL0_DSCL_MEM_PWR_CTRL
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL0_DSCL_MEM_PWR_STATUS
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL0_OBUF_CONTROL
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL0_OBUF_MEM_PWR_CTRL
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+//CM0_CM_CONTROL
+#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM0_CM_ICSC_CONTROL
+#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM0_CM_ICSC_C11_C12
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C13_C14
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C21_C22
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C23_C24
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C31_C32
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C33_C34
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C11_C12
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C13_C14
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C21_C22
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C23_C24
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C31_C32
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C33_C34
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_CONTROL
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM0_CM_GAMUT_REMAP_C11_C12
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C13_C14
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C21_C22
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C23_C24
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C31_C32
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C33_C34
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C11_C12
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C13_C14
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C21_C22
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C23_C24
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C31_C32
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C33_C34
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM0_CM_BIAS_CR_R
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM0_CM_BIAS_Y_G_CB_B
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_CONTROL
+#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM0_CM_DGAM_LUT_INDEX
+#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_DGAM_LUT_DATA
+#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM0_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM0_CM_DGAM_RAMA_START_CNTL_B
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_START_CNTL_G
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_START_CNTL_R
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL1_B
+#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_B
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_END_CNTL1_G
+#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_G
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_END_CNTL1_R
+#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_R
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_REGION_0_1
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_2_3
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_4_5
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_6_7
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_8_9
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_10_11
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_12_13
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_14_15
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_START_CNTL_B
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_START_CNTL_G
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_START_CNTL_R
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL1_B
+#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_B
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_END_CNTL1_G
+#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_G
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_END_CNTL1_R
+#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_R
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_REGION_0_1
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_2_3
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_4_5
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_6_7
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_8_9
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_10_11
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_12_13
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_14_15
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_CONTROL
+#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM0_CM_BLNDGAM_LUT_INDEX
+#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_BLNDGAM_LUT_DATA
+#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_HDR_MULT_COEF
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM0_CM_MEM_PWR_CTRL
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM0_CM_MEM_PWR_STATUS
+#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM0_CM_DEALPHA
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM0_CM_COEF_FORMAT
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_SHAPER_CONTROL
+#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM0_CM_SHAPER_OFFSET_R
+#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_OFFSET_G
+#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_OFFSET_B
+#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_SCALE_R
+#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM0_CM_SHAPER_SCALE_G_B
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_SHAPER_LUT_INDEX
+#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM0_CM_SHAPER_LUT_DATA
+#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM0_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM0_CM_SHAPER_RAMA_START_CNTL_B
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_START_CNTL_G
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_START_CNTL_R
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_B
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_G
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_R
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_REGION_0_1
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_2_3
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_4_5
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_6_7
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_8_9
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_10_11
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_12_13
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_14_15
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_16_17
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_18_19
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_20_21
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_22_23
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_24_25
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_26_27
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_28_29
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_30_31
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_32_33
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_B
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_G
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_R
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_B
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_G
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_R
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_REGION_0_1
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_2_3
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_4_5
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_6_7
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_8_9
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_10_11
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_12_13
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_14_15
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_16_17
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_18_19
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_20_21
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_22_23
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_24_25
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_26_27
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_28_29
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_30_31
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_32_33
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_MEM_PWR_CTRL2
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM0_CM_MEM_PWR_STATUS2
+#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM0_CM_3DLUT_MODE
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM0_CM_3DLUT_INDEX
+#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM0_CM_3DLUT_DATA
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_DATA_30BIT
+#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM0_CM_3DLUT_READ_WRITE_CONTROL
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM0_CM_3DLUT_OUT_NORM_FACTOR
+#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM0_CM_3DLUT_OUT_OFFSET_R
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_OUT_OFFSET_G
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_OUT_OFFSET_B
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+//DPP_TOP1_DPP_CONTROL
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP1_DPP_SOFT_RESET
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP1_DPP_CRC_VAL_R_G
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP1_DPP_CRC_VAL_B_A
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP1_DPP_CRC_CTRL
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP1_HOST_READ_CONTROL
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG1_FORMAT_CONTROL
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG1_FCNV_FP_BIAS_R
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_G
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_B
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_R
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_G
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_B
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG1_COLOR_KEYER_CONTROL
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG1_COLOR_KEYER_ALPHA
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_RED
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_GREEN
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_BLUE
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_ALPHA_2BIT_LUT
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+//CNVC_CUR1_CURSOR0_CONTROL
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR1_CURSOR0_COLOR0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_COLOR1
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+//DSCL1_SCL_COEF_RAM_TAP_SELECT
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL1_SCL_COEF_RAM_TAP_DATA
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL1_SCL_MODE
+#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL1_SCL_TAP_CONTROL
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL1_DSCL_CONTROL
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL1_DSCL_2TAP_CONTROL
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL1_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT_C
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL1_SCL_BLACK_OFFSET
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL1_DSCL_UPDATE
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL1_DSCL_AUTOCAL
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL1_OTG_H_BLANK
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_OTG_V_BLANK
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_RECOUT_START
+#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL1_RECOUT_SIZE
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_MPC_SIZE
+#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_LB_DATA_FORMAT
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL1_LB_MEMORY_CTRL
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL1_LB_V_COUNTER
+#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL1_DSCL_MEM_PWR_CTRL
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL1_DSCL_MEM_PWR_STATUS
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL1_OBUF_CONTROL
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL1_OBUF_MEM_PWR_CTRL
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+//CM1_CM_CONTROL
+#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM1_CM_ICSC_CONTROL
+#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM1_CM_ICSC_C11_C12
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C13_C14
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C21_C22
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C23_C24
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C31_C32
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C33_C34
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C11_C12
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C13_C14
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C21_C22
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C23_C24
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C31_C32
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C33_C34
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_CONTROL
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM1_CM_GAMUT_REMAP_C11_C12
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C13_C14
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C21_C22
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C23_C24
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C31_C32
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C33_C34
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C11_C12
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C13_C14
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C21_C22
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C23_C24
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C31_C32
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C33_C34
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM1_CM_BIAS_CR_R
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM1_CM_BIAS_Y_G_CB_B
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_CONTROL
+#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM1_CM_DGAM_LUT_INDEX
+#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_DGAM_LUT_DATA
+#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM1_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM1_CM_DGAM_RAMA_START_CNTL_B
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_START_CNTL_G
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_START_CNTL_R
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL1_B
+#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_B
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_END_CNTL1_G
+#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_G
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_END_CNTL1_R
+#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_R
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_REGION_0_1
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_2_3
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_4_5
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_6_7
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_8_9
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_10_11
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_12_13
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_14_15
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_START_CNTL_B
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_START_CNTL_G
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_START_CNTL_R
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL1_B
+#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_B
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_END_CNTL1_G
+#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_G
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_END_CNTL1_R
+#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_R
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_REGION_0_1
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_2_3
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_4_5
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_6_7
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_8_9
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_10_11
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_12_13
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_14_15
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_CONTROL
+#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM1_CM_BLNDGAM_LUT_INDEX
+#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_BLNDGAM_LUT_DATA
+#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_HDR_MULT_COEF
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM1_CM_MEM_PWR_CTRL
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM1_CM_MEM_PWR_STATUS
+#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM1_CM_DEALPHA
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM1_CM_COEF_FORMAT
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM1_CM_SHAPER_CONTROL
+#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM1_CM_SHAPER_OFFSET_R
+#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_OFFSET_G
+#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_OFFSET_B
+#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_SCALE_R
+#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM1_CM_SHAPER_SCALE_G_B
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM1_CM_SHAPER_LUT_INDEX
+#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM1_CM_SHAPER_LUT_DATA
+#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM1_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM1_CM_SHAPER_RAMA_START_CNTL_B
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_START_CNTL_G
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_START_CNTL_R
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_B
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_G
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_R
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_REGION_0_1
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_2_3
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_4_5
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_6_7
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_8_9
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_10_11
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_12_13
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_14_15
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_16_17
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_18_19
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_20_21
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_22_23
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_24_25
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_26_27
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_28_29
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_30_31
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_32_33
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_B
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_G
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_R
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_B
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_G
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_R
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_REGION_0_1
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_2_3
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_4_5
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_6_7
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_8_9
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_10_11
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_12_13
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_14_15
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_16_17
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_18_19
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_20_21
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_22_23
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_24_25
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_26_27
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_28_29
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_30_31
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_32_33
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_MEM_PWR_CTRL2
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM1_CM_MEM_PWR_STATUS2
+#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM1_CM_3DLUT_MODE
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM1_CM_3DLUT_INDEX
+#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM1_CM_3DLUT_DATA
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_DATA_30BIT
+#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM1_CM_3DLUT_READ_WRITE_CONTROL
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM1_CM_3DLUT_OUT_NORM_FACTOR
+#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM1_CM_3DLUT_OUT_OFFSET_R
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_OUT_OFFSET_G
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_OUT_OFFSET_B
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+//DPP_TOP2_DPP_CONTROL
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP2_DPP_SOFT_RESET
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP2_DPP_CRC_VAL_R_G
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP2_DPP_CRC_VAL_B_A
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP2_DPP_CRC_CTRL
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP2_HOST_READ_CONTROL
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG2_FORMAT_CONTROL
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG2_FCNV_FP_BIAS_R
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_G
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_B
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_R
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_G
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_B
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG2_COLOR_KEYER_CONTROL
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG2_COLOR_KEYER_ALPHA
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_RED
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_GREEN
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_BLUE
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_ALPHA_2BIT_LUT
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+//CNVC_CUR2_CURSOR0_CONTROL
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR2_CURSOR0_COLOR0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_COLOR1
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+//DSCL2_SCL_COEF_RAM_TAP_SELECT
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL2_SCL_COEF_RAM_TAP_DATA
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL2_SCL_MODE
+#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL2_SCL_TAP_CONTROL
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL2_DSCL_CONTROL
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL2_DSCL_2TAP_CONTROL
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL2_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT_C
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL2_SCL_BLACK_OFFSET
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL2_DSCL_UPDATE
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL2_DSCL_AUTOCAL
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL2_OTG_H_BLANK
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_OTG_V_BLANK
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_RECOUT_START
+#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL2_RECOUT_SIZE
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_MPC_SIZE
+#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_LB_DATA_FORMAT
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL2_LB_MEMORY_CTRL
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL2_LB_V_COUNTER
+#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL2_DSCL_MEM_PWR_CTRL
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL2_DSCL_MEM_PWR_STATUS
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL2_OBUF_CONTROL
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL2_OBUF_MEM_PWR_CTRL
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+//CM2_CM_CONTROL
+#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM2_CM_ICSC_CONTROL
+#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM2_CM_ICSC_C11_C12
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C13_C14
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C21_C22
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C23_C24
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C31_C32
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C33_C34
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C11_C12
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C13_C14
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C21_C22
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C23_C24
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C31_C32
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C33_C34
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_CONTROL
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM2_CM_GAMUT_REMAP_C11_C12
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C13_C14
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C21_C22
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C23_C24
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C31_C32
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C33_C34
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C11_C12
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C13_C14
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C21_C22
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C23_C24
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C31_C32
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C33_C34
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM2_CM_BIAS_CR_R
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM2_CM_BIAS_Y_G_CB_B
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_CONTROL
+#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM2_CM_DGAM_LUT_INDEX
+#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_DGAM_LUT_DATA
+#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM2_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM2_CM_DGAM_RAMA_START_CNTL_B
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_START_CNTL_G
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_START_CNTL_R
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL1_B
+#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_B
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_END_CNTL1_G
+#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_G
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_END_CNTL1_R
+#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_R
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_REGION_0_1
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_2_3
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_4_5
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_6_7
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_8_9
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_10_11
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_12_13
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_14_15
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_START_CNTL_B
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_START_CNTL_G
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_START_CNTL_R
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL1_B
+#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_B
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_END_CNTL1_G
+#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_G
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_END_CNTL1_R
+#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_R
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_REGION_0_1
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_2_3
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_4_5
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_6_7
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_8_9
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_10_11
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_12_13
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_14_15
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_CONTROL
+#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM2_CM_BLNDGAM_LUT_INDEX
+#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_BLNDGAM_LUT_DATA
+#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_HDR_MULT_COEF
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM2_CM_MEM_PWR_CTRL
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM2_CM_MEM_PWR_STATUS
+#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM2_CM_DEALPHA
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM2_CM_COEF_FORMAT
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM2_CM_SHAPER_CONTROL
+#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM2_CM_SHAPER_OFFSET_R
+#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_OFFSET_G
+#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_OFFSET_B
+#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_SCALE_R
+#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM2_CM_SHAPER_SCALE_G_B
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM2_CM_SHAPER_LUT_INDEX
+#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM2_CM_SHAPER_LUT_DATA
+#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM2_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM2_CM_SHAPER_RAMA_START_CNTL_B
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_START_CNTL_G
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_START_CNTL_R
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_B
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_G
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_R
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_REGION_0_1
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_2_3
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_4_5
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_6_7
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_8_9
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_10_11
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_12_13
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_14_15
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_16_17
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_18_19
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_20_21
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_22_23
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_24_25
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_26_27
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_28_29
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_30_31
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_32_33
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_B
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_G
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_R
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_B
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_G
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_R
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_REGION_0_1
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_2_3
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_4_5
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_6_7
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_8_9
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_10_11
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_12_13
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_14_15
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_16_17
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_18_19
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_20_21
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_22_23
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_24_25
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_26_27
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_28_29
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_30_31
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_32_33
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_MEM_PWR_CTRL2
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM2_CM_MEM_PWR_STATUS2
+#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM2_CM_3DLUT_MODE
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM2_CM_3DLUT_INDEX
+#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM2_CM_3DLUT_DATA
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_DATA_30BIT
+#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM2_CM_3DLUT_READ_WRITE_CONTROL
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM2_CM_3DLUT_OUT_NORM_FACTOR
+#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM2_CM_3DLUT_OUT_OFFSET_R
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_OUT_OFFSET_G
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_OUT_OFFSET_B
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+//DPP_TOP3_DPP_CONTROL
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP3_DPP_SOFT_RESET
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP3_DPP_CRC_VAL_R_G
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP3_DPP_CRC_VAL_B_A
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP3_DPP_CRC_CTRL
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP3_HOST_READ_CONTROL
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG3_FORMAT_CONTROL
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG3_FCNV_FP_BIAS_R
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_G
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_B
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_R
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_G
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_B
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG3_COLOR_KEYER_CONTROL
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG3_COLOR_KEYER_ALPHA
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_RED
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_GREEN
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_BLUE
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_ALPHA_2BIT_LUT
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+//CNVC_CUR3_CURSOR0_CONTROL
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR3_CURSOR0_COLOR0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_COLOR1
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+//DSCL3_SCL_COEF_RAM_TAP_SELECT
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL3_SCL_COEF_RAM_TAP_DATA
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL3_SCL_MODE
+#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL3_SCL_TAP_CONTROL
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL3_DSCL_CONTROL
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL3_DSCL_2TAP_CONTROL
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL3_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT_C
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL3_SCL_BLACK_OFFSET
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL3_DSCL_UPDATE
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL3_DSCL_AUTOCAL
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL3_OTG_H_BLANK
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_OTG_V_BLANK
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_RECOUT_START
+#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL3_RECOUT_SIZE
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_MPC_SIZE
+#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_LB_DATA_FORMAT
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL3_LB_MEMORY_CTRL
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL3_LB_V_COUNTER
+#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL3_DSCL_MEM_PWR_CTRL
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL3_DSCL_MEM_PWR_STATUS
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL3_OBUF_CONTROL
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL3_OBUF_MEM_PWR_CTRL
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+//CM3_CM_CONTROL
+#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM3_CM_ICSC_CONTROL
+#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM3_CM_ICSC_C11_C12
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C13_C14
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C21_C22
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C23_C24
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C31_C32
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C33_C34
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C11_C12
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C13_C14
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C21_C22
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C23_C24
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C31_C32
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C33_C34
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_CONTROL
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM3_CM_GAMUT_REMAP_C11_C12
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C13_C14
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C21_C22
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C23_C24
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C31_C32
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C33_C34
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C11_C12
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C13_C14
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C21_C22
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C23_C24
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C31_C32
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C33_C34
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM3_CM_BIAS_CR_R
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM3_CM_BIAS_Y_G_CB_B
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_CONTROL
+#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM3_CM_DGAM_LUT_INDEX
+#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_DGAM_LUT_DATA
+#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM3_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM3_CM_DGAM_RAMA_START_CNTL_B
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_START_CNTL_G
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_START_CNTL_R
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL1_B
+#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_B
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_END_CNTL1_G
+#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_G
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_END_CNTL1_R
+#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_R
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_REGION_0_1
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_2_3
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_4_5
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_6_7
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_8_9
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_10_11
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_12_13
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_14_15
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_START_CNTL_B
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_START_CNTL_G
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_START_CNTL_R
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL1_B
+#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_B
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_END_CNTL1_G
+#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_G
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_END_CNTL1_R
+#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_R
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_REGION_0_1
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_2_3
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_4_5
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_6_7
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_8_9
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_10_11
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_12_13
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_14_15
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_CONTROL
+#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM3_CM_BLNDGAM_LUT_INDEX
+#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_BLNDGAM_LUT_DATA
+#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_HDR_MULT_COEF
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM3_CM_MEM_PWR_CTRL
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM3_CM_MEM_PWR_STATUS
+#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM3_CM_DEALPHA
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM3_CM_COEF_FORMAT
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM3_CM_SHAPER_CONTROL
+#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM3_CM_SHAPER_OFFSET_R
+#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_OFFSET_G
+#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_OFFSET_B
+#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_SCALE_R
+#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM3_CM_SHAPER_SCALE_G_B
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM3_CM_SHAPER_LUT_INDEX
+#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM3_CM_SHAPER_LUT_DATA
+#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM3_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM3_CM_SHAPER_RAMA_START_CNTL_B
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_START_CNTL_G
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_START_CNTL_R
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_B
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_G
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_R
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_REGION_0_1
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_2_3
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_4_5
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_6_7
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_8_9
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_10_11
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_12_13
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_14_15
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_16_17
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_18_19
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_20_21
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_22_23
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_24_25
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_26_27
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_28_29
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_30_31
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_32_33
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_B
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_G
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_R
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_B
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_G
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_R
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_REGION_0_1
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_2_3
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_4_5
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_6_7
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_8_9
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_10_11
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_12_13
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_14_15
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_16_17
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_18_19
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_20_21
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_22_23
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_24_25
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_26_27
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_28_29
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_30_31
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_32_33
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_MEM_PWR_CTRL2
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM3_CM_MEM_PWR_STATUS2
+#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM3_CM_3DLUT_MODE
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM3_CM_3DLUT_INDEX
+#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM3_CM_3DLUT_DATA
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_DATA_30BIT
+#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM3_CM_3DLUT_READ_WRITE_CONTROL
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM3_CM_3DLUT_OUT_NORM_FACTOR
+#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM3_CM_3DLUT_OUT_OFFSET_R
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_OUT_OFFSET_G
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_OUT_OFFSET_B
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM3_CM_TEST_DEBUG_INDEX
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_DATA
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+//MPCC0_MPCC_TOP_SEL
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_BOT_SEL
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_OPP_ID
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC0_MPCC_CONTROL
+#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC0_MPCC_SM_CONTROL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC0_MPCC_UPDATE_LOCK_SEL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC0_MPCC_TOP_GAIN
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_INSIDE
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BG_R_CR
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_G_Y
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_B_CB
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC0_MPCC_MEM_PWR_CTRL
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC0_MPCC_STALL_STATUS
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC0_MPCC_STATUS
+#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+//MPCC1_MPCC_TOP_SEL
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_BOT_SEL
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_OPP_ID
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC1_MPCC_CONTROL
+#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC1_MPCC_SM_CONTROL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC1_MPCC_UPDATE_LOCK_SEL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC1_MPCC_TOP_GAIN
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_INSIDE
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BG_R_CR
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_G_Y
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_B_CB
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC1_MPCC_MEM_PWR_CTRL
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC1_MPCC_STALL_STATUS
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC1_MPCC_STATUS
+#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+//MPCC2_MPCC_TOP_SEL
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_BOT_SEL
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_OPP_ID
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC2_MPCC_CONTROL
+#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC2_MPCC_SM_CONTROL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC2_MPCC_UPDATE_LOCK_SEL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC2_MPCC_TOP_GAIN
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_INSIDE
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BG_R_CR
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_G_Y
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_B_CB
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC2_MPCC_MEM_PWR_CTRL
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC2_MPCC_STALL_STATUS
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC2_MPCC_STATUS
+#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+//MPCC3_MPCC_TOP_SEL
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_BOT_SEL
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_OPP_ID
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC3_MPCC_CONTROL
+#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC3_MPCC_SM_CONTROL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC3_MPCC_UPDATE_LOCK_SEL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC3_MPCC_TOP_GAIN
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_INSIDE
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BG_R_CR
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_G_Y
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_B_CB
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC3_MPCC_MEM_PWR_CTRL
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC3_MPCC_STALL_STATUS
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC3_MPCC_STATUS
+#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc4_dispdec
+//MPCC4_MPCC_TOP_SEL
+#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC4_MPCC_BOT_SEL
+#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC4_MPCC_OPP_ID
+#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC4_MPCC_CONTROL
+#define MPCC4_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC4_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC4_MPCC_SM_CONTROL
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC4_MPCC_UPDATE_LOCK_SEL
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC4_MPCC_TOP_GAIN
+#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC4_MPCC_BOT_GAIN_INSIDE
+#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC4_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC4_MPCC_BG_R_CR
+#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC4_MPCC_BG_G_Y
+#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC4_MPCC_BG_B_CB
+#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC4_MPCC_MEM_PWR_CTRL
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC4_MPCC_STALL_STATUS
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC4_MPCC_STATUS
+#define MPCC4_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC4_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC4_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC4_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC4_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC4_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+//MPC_CLOCK_CONTROL
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L
+//MPC_SOFT_RESET
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3
+#define MPC_SOFT_RESET__MPCC4_SOFT_RESET__SHIFT 0x4
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15
+#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L
+#define MPC_SOFT_RESET__MPCC4_SOFT_RESET_MASK 0x00000010L
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L
+#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L
+//MPC_BYPASS_BG_AR
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L
+//MPC_BYPASS_BG_GB
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L
+//MPC_STALL_GRACE_WINDOW
+#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD__SHIFT 0x0
+#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD_MASK 0x000000FFL
+//MPC_HOST_READ_CONTROL
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//ADR_CFG_CUR_VUPDATE_LOCK_SET0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET1
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET1
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET1
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET1
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET1
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET2
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET2
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET2
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET2
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET2
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET3
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET3
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET3
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET3
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET3
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//MPC_OUT0_MUX
+#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR__SHIFT 0x6
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT0__SHIFT 0xb
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT1__SHIFT 0x14
+#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR_MASK 0x00000040L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT0_MASK 0x000FF800L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT1_MASK 0xFFF00000L
+//MPC_OUT0_DENORM_CONTROL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT0_DENORM_CLAMP_G_Y
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT0_DENORM_CLAMP_B_CB
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT1_MUX
+#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR__SHIFT 0x6
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT0__SHIFT 0xb
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT1__SHIFT 0x14
+#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR_MASK 0x00000040L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT0_MASK 0x000FF800L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT1_MASK 0xFFF00000L
+//MPC_OUT1_DENORM_CONTROL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT1_DENORM_CLAMP_G_Y
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT1_DENORM_CLAMP_B_CB
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+//MPCC_OGAM0_MPCC_OGAM_MODE
+#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM0_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+//MPCC_OGAM1_MPCC_OGAM_MODE
+#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM1_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+//MPCC_OGAM2_MPCC_OGAM_MODE
+#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM2_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+//MPCC_OGAM3_MPCC_OGAM_MODE
+#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM3_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec
+//MPCC_OGAM4_MPCC_OGAM_MODE
+#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM4_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM4_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+//MPC_OUT_CSC_COEF_FORMAT
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L
+//MPC_OUT0_CSC_MODE
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT0_CSC_C11_C12_A
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_A
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_A
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_A
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_A
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_A
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C11_C12_B
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_B
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_B
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_B
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_B
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_B
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_MODE
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT1_CSC_C11_C12_A
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_A
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_A
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_A
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_A
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_A
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C11_C12_B
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_B
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_B
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_B
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_B
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_B
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+//FMT0_FMT_CLAMP_COMPONENT_R
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_G
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_B
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT0_FMT_DYNAMIC_EXP_CNTL
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT0_FMT_CONTROL
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT0_FMT_BIT_DEPTH_CONTROL
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT0_FMT_DITHER_RAND_R_SEED
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_G_SEED
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_B_SEED
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_CNTL
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT0_FMT_MAP420_MEMORY_CONTROL
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT0_FMT_422_CONTROL
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+//DPG0_DPG_CONTROL
+#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG0_DPG_RAMP_CONTROL
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG0_DPG_DIMENSIONS
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_COLOUR_R_CR
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_G_Y
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_B_CB
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG0_DPG_OFFSET_SEGMENT
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_STATUS
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+//OPPBUF0_OPPBUF_CONTROL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+//OPP_PIPE0_OPP_PIPE_CONTROL
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+//FMT1_FMT_CLAMP_COMPONENT_R
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_G
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_B
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT1_FMT_DYNAMIC_EXP_CNTL
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT1_FMT_CONTROL
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT1_FMT_BIT_DEPTH_CONTROL
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT1_FMT_DITHER_RAND_R_SEED
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_G_SEED
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_B_SEED
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_CNTL
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT1_FMT_MAP420_MEMORY_CONTROL
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT1_FMT_422_CONTROL
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+//DPG1_DPG_CONTROL
+#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG1_DPG_RAMP_CONTROL
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG1_DPG_DIMENSIONS
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_COLOUR_R_CR
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_G_Y
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_B_CB
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG1_DPG_OFFSET_SEGMENT
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_STATUS
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+//OPPBUF1_OPPBUF_CONTROL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+//OPP_PIPE1_OPP_PIPE_CONTROL
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+//OPP_TOP_CLK_CONTROL
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+//ODM0_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_DATA_SOURCE_SELECT
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+//ODM0_OPTC_DATA_FORMAT_CONTROL
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM0_OPTC_BYTES_PER_PIXEL
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM0_OPTC_WIDTH_CONTROL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM0_OPTC_INPUT_CLOCK_CONTROL
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM0_OPTC_INPUT_SPARE_REGISTER
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+//ODM1_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM1_OPTC_DATA_SOURCE_SELECT
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+//ODM1_OPTC_DATA_FORMAT_CONTROL
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM1_OPTC_BYTES_PER_PIXEL
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM1_OPTC_WIDTH_CONTROL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM1_OPTC_INPUT_CLOCK_CONTROL
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM1_OPTC_INPUT_SPARE_REGISTER
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+//OTG0_OTG_H_TOTAL
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_H_BLANK_START_END
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A_CNTL
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG0_OTG_H_TIMING_CNTL
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG0_OTG_V_TOTAL
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MIN
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MAX
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MID
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_CONTROL
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_V_TOTAL_INT_STATUS
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L
+//OTG0_OTG_VSYNC_NOM_INT_STATUS
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG0_OTG_V_BLANK_START_END
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A_CNTL
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG0_OTG_TRIGA_CNTL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGA_MANUAL_TRIG
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_TRIGB_CNTL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGB_MANUAL_TRIG
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG0_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG0_OTG_CONTROL
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG0_OTG_BLANK_CONTROL
+#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG0_OTG_INTERLACE_CONTROL
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG0_OTG_INTERLACE_STATUS
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG0_OTG_PIXEL_DATA_READBACK0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG0_OTG_PIXEL_DATA_READBACK1
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG0_OTG_STATUS
+#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG0_OTG_STATUS_POSITION
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_NOM_VERT_POSITION
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG0_OTG_STATUS_FRAME_COUNT
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_STATUS_VF_COUNT
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_STATUS_HV_COUNT
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_COUNT_CONTROL
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG0_OTG_COUNT_RESET
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG0_OTG_VERT_SYNC_CONTROL
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG0_OTG_STEREO_STATUS
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG0_OTG_STEREO_CONTROL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG0_OTG_SNAPSHOT_STATUS
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG0_OTG_SNAPSHOT_CONTROL
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG0_OTG_SNAPSHOT_POSITION
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_SNAPSHOT_FRAME
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_INTERRUPT_CONTROL
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG0_OTG_UPDATE_LOCK
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG0_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG0_OTG_MASTER_EN
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG0_OTG_BLANK_DATA_COLOR
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG0_OTG_BLANK_DATA_COLOR_EXT
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG0_OTG_BLACK_COLOR
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG0_OTG_BLACK_COLOR_EXT
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_CRC_CNTL
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG0_OTG_CRC_CNTL2
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_DATA_RG
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_DATA_B
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_DATA_RG
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_DATA_B
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_STATIC_SCREEN_CONTROL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG0_OTG_3D_STRUCTURE_CONTROL
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG0_OTG_GSL_VSYNC_GAP
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG0_OTG_MASTER_UPDATE_MODE
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG0_OTG_CLOCK_CONTROL
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG0_OTG_VSTARTUP_PARAM
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG0_OTG_VUPDATE_PARAM
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG0_OTG_VREADY_PARAM
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG0_OTG_GLOBAL_SYNC_STATUS
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG0_OTG_MASTER_UPDATE_LOCK
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG0_OTG_GSL_CONTROL
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG0_OTG_GSL_WINDOW_X
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_GSL_WINDOW_Y
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG0_OTG_VUPDATE_KEEPOUT
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL0
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG0_OTG_GLOBAL_CONTROL1
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL2
+#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL3
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG0_OTG_TRIG_MANUAL_CONTROL
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG0_OTG_RANGE_TIMING_INT_STATUS
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG0_OTG_DRR_CONTROL
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG0_OTG_REQUEST_CONTROL
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG0_OTG_DSC_START_POSITION
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG0_OTG_SPARE_REGISTER
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+//OTG1_OTG_H_TOTAL
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_H_BLANK_START_END
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A_CNTL
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG1_OTG_H_TIMING_CNTL
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG1_OTG_V_TOTAL
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MIN
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MAX
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MID
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_CONTROL
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_V_TOTAL_INT_STATUS
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L
+//OTG1_OTG_VSYNC_NOM_INT_STATUS
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG1_OTG_V_BLANK_START_END
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A_CNTL
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG1_OTG_TRIGA_CNTL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGA_MANUAL_TRIG
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_TRIGB_CNTL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGB_MANUAL_TRIG
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG1_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG1_OTG_CONTROL
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_CONTROL__OTG_SYNC_RESET_SEL__SHIFT 0x4
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_CONTROL__OTG_SYNC_RESET_SEL_MASK 0x00000010L
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG1_OTG_BLANK_CONTROL
+#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG1_OTG_INTERLACE_CONTROL
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG1_OTG_INTERLACE_STATUS
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG1_OTG_PIXEL_DATA_READBACK0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG1_OTG_PIXEL_DATA_READBACK1
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG1_OTG_STATUS
+#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG1_OTG_STATUS_POSITION
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_NOM_VERT_POSITION
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG1_OTG_STATUS_FRAME_COUNT
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_STATUS_VF_COUNT
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_STATUS_HV_COUNT
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_COUNT_CONTROL
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG1_OTG_COUNT_RESET
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG1_OTG_VERT_SYNC_CONTROL
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG1_OTG_STEREO_STATUS
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG1_OTG_STEREO_CONTROL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG1_OTG_SNAPSHOT_STATUS
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG1_OTG_SNAPSHOT_CONTROL
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG1_OTG_SNAPSHOT_POSITION
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_SNAPSHOT_FRAME
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_INTERRUPT_CONTROL
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG1_OTG_UPDATE_LOCK
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG1_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG1_OTG_MASTER_EN
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG1_OTG_BLANK_DATA_COLOR
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG1_OTG_BLANK_DATA_COLOR_EXT
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG1_OTG_BLACK_COLOR
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG1_OTG_BLACK_COLOR_EXT
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_CRC_CNTL
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG1_OTG_CRC_CNTL2
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_DATA_RG
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_DATA_B
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_DATA_RG
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_DATA_B
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_STATIC_SCREEN_CONTROL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG1_OTG_3D_STRUCTURE_CONTROL
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG1_OTG_GSL_VSYNC_GAP
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG1_OTG_MASTER_UPDATE_MODE
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG1_OTG_CLOCK_CONTROL
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG1_OTG_VSTARTUP_PARAM
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG1_OTG_VUPDATE_PARAM
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG1_OTG_VREADY_PARAM
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG1_OTG_GLOBAL_SYNC_STATUS
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG1_OTG_MASTER_UPDATE_LOCK
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG1_OTG_GSL_CONTROL
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG1_OTG_GSL_WINDOW_X
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_GSL_WINDOW_Y
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG1_OTG_VUPDATE_KEEPOUT
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL0
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG1_OTG_GLOBAL_CONTROL1
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL2
+#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL3
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG1_OTG_TRIG_MANUAL_CONTROL
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG1_OTG_RANGE_TIMING_INT_STATUS
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG1_OTG_DRR_CONTROL
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG1_OTG_REQUEST_CONTROL
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG1_OTG_DSC_START_POSITION
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+//DWB_SOURCE_SELECT
+#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT__SHIFT 0x0
+#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT__SHIFT 0x3
+#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT__SHIFT 0x6
+#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT_MASK 0x00000007L
+#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT_MASK 0x00000038L
+#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT_MASK 0x000001C0L
+//GSL_SOURCE_SELECT
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L
+//OPTC_CLOCK_CONTROL
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+//DC_I2C_CONTROL
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+//DC_I2C_ARBITRATION
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+//DC_I2C_INTERRUPT_CONTROL
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+//DC_I2C_SW_STATUS
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+//DC_I2C_DDC1_HW_STATUS
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC2_HW_STATUS
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC1_SPEED
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC1_SETUP
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC2_SPEED
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC2_SETUP
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_TRANSACTION0
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION2
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION3
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L
+//DC_I2C_DATA
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+
+//DIO_SCRATCH0
+#define DIO_SCRATCH0__DIO_SCRATCH0__SHIFT 0x0
+#define DIO_SCRATCH0__DIO_SCRATCH0_MASK 0xFFFFFFFFL
+//DIO_SCRATCH1
+#define DIO_SCRATCH1__DIO_SCRATCH1__SHIFT 0x0
+#define DIO_SCRATCH1__DIO_SCRATCH1_MASK 0xFFFFFFFFL
+//DIO_SCRATCH2
+#define DIO_SCRATCH2__DIO_SCRATCH2__SHIFT 0x0
+#define DIO_SCRATCH2__DIO_SCRATCH2_MASK 0xFFFFFFFFL
+//DIO_SCRATCH3
+#define DIO_SCRATCH3__DIO_SCRATCH3__SHIFT 0x0
+#define DIO_SCRATCH3__DIO_SCRATCH3_MASK 0xFFFFFFFFL
+//DIO_SCRATCH4
+#define DIO_SCRATCH4__DIO_SCRATCH4__SHIFT 0x0
+#define DIO_SCRATCH4__DIO_SCRATCH4_MASK 0xFFFFFFFFL
+//DIO_SCRATCH5
+#define DIO_SCRATCH5__DIO_SCRATCH5__SHIFT 0x0
+#define DIO_SCRATCH5__DIO_SCRATCH5_MASK 0xFFFFFFFFL
+//DIO_SCRATCH6
+#define DIO_SCRATCH6__DIO_SCRATCH6__SHIFT 0x0
+#define DIO_SCRATCH6__DIO_SCRATCH6_MASK 0xFFFFFFFFL
+//DIO_SCRATCH7
+#define DIO_SCRATCH7__DIO_SCRATCH7__SHIFT 0x0
+#define DIO_SCRATCH7__DIO_SCRATCH7_MASK 0xFFFFFFFFL
+//DIO_MEM_PWR_STATUS
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DIO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE__SHIFT 0xa
+#define DIO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE__SHIFT 0xc
+#define DIO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE__SHIFT 0xe
+#define DIO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE__SHIFT 0x10
+#define DIO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE__SHIFT 0x12
+#define DIO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE__SHIFT 0x14
+#define DIO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE__SHIFT 0x16
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x00000008L
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x00000020L
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x00000080L
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x00000200L
+#define DIO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE_MASK 0x00000C00L
+#define DIO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE_MASK 0x00003000L
+#define DIO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE_MASK 0x0000C000L
+#define DIO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE_MASK 0x00030000L
+#define DIO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE_MASK 0x000C0000L
+#define DIO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE_MASK 0x00300000L
+#define DIO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE_MASK 0x00C00000L
+//DIO_MEM_PWR_CTRL
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE__SHIFT 0xb
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS__SHIFT 0xd
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE__SHIFT 0xe
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS__SHIFT 0x10
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE__SHIFT 0x11
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS__SHIFT 0x13
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE__SHIFT 0x14
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS__SHIFT 0x16
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE__SHIFT 0x17
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS__SHIFT 0x1f
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE_MASK 0x00001800L
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS_MASK 0x00002000L
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE_MASK 0x0000C000L
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS_MASK 0x00010000L
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE_MASK 0x00060000L
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS_MASK 0x00080000L
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE_MASK 0x00300000L
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS_MASK 0x00400000L
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE_MASK 0x01800000L
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE_MASK 0x0C000000L
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE_MASK 0x60000000L
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS_MASK 0x80000000L
+//DIO_MEM_PWR_CTRL2
+#define DIO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_FORCE__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_FORCE__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_FORCE__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_FORCE__SHIFT 0xb
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_DIS__SHIFT 0xc
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_FORCE__SHIFT 0xd
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_DIS__SHIFT 0xe
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_FORCE__SHIFT 0xf
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE__SHIFT 0x18
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE__SHIFT 0x1b
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE__SHIFT 0x1e
+#define DIO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL_MASK 0x00000003L
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_FORCE_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_FORCE_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_FORCE_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_FORCE_MASK 0x00000800L
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_DIS_MASK 0x00001000L
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_FORCE_MASK 0x00002000L
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_DIS_MASK 0x00004000L
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_FORCE_MASK 0x00008000L
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE_MASK 0x01000000L
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE_MASK 0x04000000L
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE_MASK 0x08000000L
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE_MASK 0x20000000L
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE_MASK 0x40000000L
+//DIO_CLK_CNTL
+#define DIO_CLK_CNTL__DISPCLK_R_DIO_GATE_DIS__SHIFT 0x5
+#define DIO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x8
+#define DIO_CLK_CNTL__REFCLK_R_DIO_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x1c
+#define DIO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x1d
+#define DIO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS__SHIFT 0x1e
+#define DIO_CLK_CNTL__DISPCLK_R_DIO_GATE_DIS_MASK 0x00000020L
+#define DIO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x00000100L
+#define DIO_CLK_CNTL__REFCLK_R_DIO_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS_MASK 0x40000000L
+//DIO_MEM_PWR_CTRL3
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_DIS__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_FORCE__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_DIS__SHIFT 0x3
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_FORCE__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_FORCE__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_FORCE__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_DIS__SHIFT 0xc
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_FORCE__SHIFT 0xd
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_DIS__SHIFT 0xf
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_FORCE__SHIFT 0x10
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_DIS_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_FORCE_MASK 0x00000006L
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_DIS_MASK 0x00000008L
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_FORCE_MASK 0x00000180L
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_FORCE_MASK 0x00000C00L
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_DIS_MASK 0x00001000L
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_FORCE_MASK 0x00006000L
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_DIS_MASK 0x00008000L
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_FORCE_MASK 0x00030000L
+//DIO_POWER_MANAGEMENT_CNTL
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+
+//DIO_MEM_PWR_STATUS1
+#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE__SHIFT 0x2
+#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE__SHIFT 0xa
+#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE__SHIFT 0x10
+#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE__SHIFT 0x12
+#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE__SHIFT 0x14
+#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE__SHIFT 0x16
+#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE__SHIFT 0x18
+#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE__SHIFT 0x1a
+#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE_MASK 0x00000004L
+#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE_MASK 0x00000400L
+#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE_MASK 0x00030000L
+#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE_MASK 0x000C0000L
+#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE_MASK 0x00300000L
+#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE_MASK 0x00C00000L
+#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE_MASK 0x03000000L
+#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE_MASK 0x0C000000L
+//DIO_CLK_CNTL2
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS__SHIFT 0x7
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS__SHIFT 0x8
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS__SHIFT 0x9
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS__SHIFT 0x11
+#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS__SHIFT 0x12
+#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS__SHIFT 0x13
+#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS_MASK 0x00000080L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS_MASK 0x00000100L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS_MASK 0x00000200L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS_MASK 0x00020000L
+#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS_MASK 0x00040000L
+#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS_MASK 0x00080000L
+#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS_MASK 0x00100000L
+#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS_MASK 0x00800000L
+//DIO_CLK_CNTL3
+#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS__SHIFT 0x0
+#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS__SHIFT 0x1
+#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS__SHIFT 0x2
+#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS__SHIFT 0x3
+#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS__SHIFT 0x4
+#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS__SHIFT 0x5
+#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS__SHIFT 0x6
+#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS__SHIFT 0xe
+#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS__SHIFT 0xf
+#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS__SHIFT 0x10
+#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS_MASK 0x00000001L
+#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS_MASK 0x00000002L
+#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS_MASK 0x00000004L
+#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS_MASK 0x00000008L
+#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS_MASK 0x00000010L
+#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS_MASK 0x00000020L
+#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS_MASK 0x00000040L
+#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS_MASK 0x00004000L
+#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS_MASK 0x00008000L
+#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS_MASK 0x00010000L
+//DIO_HDMI_RXSTATUS_TIMER_CONTROL
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L
+//DIO_GENERIC_INTERRUPT_MESSAGE
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+//DIO_GENERIC_INTERRUPT_CLEAR
+#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD0_DC_HPD_INT_CONTROL
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD0_DC_HPD_CONTROL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD0_DC_HPD_FAST_TRAIN_CNTL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD0_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+//HPD1_DC_HPD_INT_STATUS
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD1_DC_HPD_INT_CONTROL
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD1_DC_HPD_CONTROL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD1_DC_HPD_FAST_TRAIN_CNTL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD1_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+//DP_AUX0_AUX_CONTROL
+#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX0_AUX_SW_CONTROL
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX0_AUX_ARB_CONTROL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX0_AUX_INTERRUPT_CONTROL
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+//DP_AUX0_AUX_SW_STATUS
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX0_AUX_LS_STATUS
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX0_AUX_SW_DATA
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX0_AUX_LS_DATA
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX0_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_TX_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL1
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX0_AUX_DPHY_TX_STATUS
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_RX_STATUS
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+//DP_AUX1_AUX_CONTROL
+#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX1_AUX_SW_CONTROL
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX1_AUX_ARB_CONTROL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX1_AUX_INTERRUPT_CONTROL
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+//DP_AUX1_AUX_SW_STATUS
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX1_AUX_LS_STATUS
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX1_AUX_SW_DATA
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX1_AUX_LS_DATA
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX1_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_TX_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL1
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX1_AUX_DPHY_TX_STATUS
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_RX_STATUS
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+//DIG0_DIG_FE_CNTL
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG0_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG0_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG0_DIG_OUTPUT_CRC_CNTL
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG0_DIG_OUTPUT_CRC_RESULT
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG0_DIG_CLOCK_PATTERN
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG0_DIG_TEST_PATTERN
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG0_DIG_RANDOM_PATTERN_SEED
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG0_DIG_FIFO_STATUS
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG0_HDMI_METADATA_PACKET_CONTROL
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_CONTROL
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG0_HDMI_STATUS
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG0_HDMI_AUDIO_PACKET_CONTROL
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG0_HDMI_ACR_PACKET_CONTROL
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG0_HDMI_VBI_PACKET_CONTROL
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+
+//DIG0_HDMI_INFOFRAME_CONTROL0
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG0_HDMI_INFOFRAME_CONTROL1
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG0_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG0_AFMT_ISRC1_0
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG0_AFMT_ISRC1_1
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_2
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_3
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_4
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_0
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_1
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_2
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_3
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_DB_CONTROL
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG0_DME_CONTROL
+#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG0_AFMT_MPEG_INFO0
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG0_AFMT_MPEG_INFO1
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG0_AFMT_GENERIC_HDR
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_0
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_1
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_2
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_3
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_4
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_5
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_6
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_7
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_ACR_32_0
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_32_1
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_44_0
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_44_1
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_48_0
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_48_1
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_STATUS_0
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_STATUS_1
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG0_AFMT_AUDIO_INFO0
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG0_AFMT_AUDIO_INFO1
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG0_AFMT_60958_0
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG0_AFMT_60958_1
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG0_AFMT_AUDIO_CRC_CONTROL
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG0_AFMT_RAMP_CONTROL0
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG0_AFMT_RAMP_CONTROL1
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG0_AFMT_RAMP_CONTROL2
+#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG0_AFMT_RAMP_CONTROL3
+#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG0_AFMT_60958_2
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG0_AFMT_AUDIO_CRC_RESULT
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG0_AFMT_STATUS
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG0_AFMT_AUDIO_PACKET_CONTROL
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG0_AFMT_VBI_PACKET_CONTROL
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG0_AFMT_INFOFRAME_CONTROL0
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG0_AFMT_AUDIO_SRC_CONTROL
+#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG0_DIG_BE_CNTL
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG0_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG0_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG0_DIG_BE_EN_CNTL
+#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG0_TMDS_CNTL
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG0_TMDS_CONTROL_CHAR
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG0_TMDS_CONTROL0_FEEDBACK
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG0_TMDS_STEREOSYNC_CTL_SEL
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+
+//DIG0_TMDS_CTL_BITS
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG0_TMDS_DCBALANCER_CONTROL
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG0_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG0_TMDS_CTL0_1_GEN_CNTL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG0_TMDS_CTL2_3_GEN_CNTL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+
+//DIG0_DIG_VERSION
+#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG0_DIG_LANE_ENABLE
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+
+//DIG0_AFMT_CNTL
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG0_AFMT_VBI_PACKET_CONTROL1
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+//DP0_DP_LINK_CNTL
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP0_DP_PIXEL_FORMAT
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP0_DP_MSA_COLORIMETRY
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP0_DP_CONFIG
+#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP0_DP_VID_STREAM_CNTL
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP0_DP_STEER_FIFO
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP0_DP_MSA_MISC
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP0_DP_VID_TIMING
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP0_DP_VID_N
+#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP0_DP_VID_M
+#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP0_DP_LINK_FRAMING_CNTL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP0_DP_HBR2_EYE_PATTERN
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP0_DP_VID_MSA_VBID
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP0_DP_VID_INTERRUPT_CNTL
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP0_DP_DPHY_CNTL
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP0_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP0_DP_DPHY_SYM0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM1
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM2
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP0_DP_DPHY_8B10B_CNTL
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP0_DP_DPHY_PRBS_CNTL
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP0_DP_DPHY_SCRAM_CNTL
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP0_DP_DPHY_CRC_EN
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP0_DP_DPHY_CRC_CNTL
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP0_DP_DPHY_CRC_RESULT
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP0_DP_DPHY_CRC_MST_CNTL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP0_DP_DPHY_CRC_MST_STATUS
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP0_DP_DPHY_FAST_TRAINING
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP0_DP_DPHY_FAST_TRAINING_STATUS
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP0_DP_SEC_CNTL
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP0_DP_SEC_CNTL1
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING1
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING2
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING3
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING4
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP0_DP_SEC_AUD_N
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_N_READBACK
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M_READBACK
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_TIMESTAMP
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP0_DP_SEC_PACKET_CNTL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP0_DP_MSE_RATE_CNTL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP0_DP_CP_MSE_STATUS
+//DP0_DP_MSE_RATE_UPDATE
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP0_DP_MSE_SAT0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP0_DP_MSE_SAT_UPDATE
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP0_DP_MSE_LINK_TIMING
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP0_DP_MSE_MISC_CNTL
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP0_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP0_DP_MSE_SAT0_STATUS
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1_STATUS
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2_STATUS
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP0_DP_MSA_TIMING_PARAM1
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM2
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM3
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP0_DP_MSA_TIMING_PARAM4
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP0_DP_DSC_CNTL
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP0_DP_SEC_CNTL2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP0_DP_SEC_CNTL3
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL4
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL5
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL6
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP0_DP_SEC_CNTL7
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP0_DP_DB_CNTL
+#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP0_DP_MSA_VBID_MISC
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_METADATA_TRANSMISSION
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP0_DP_DSC_BYTES_PER_PIXEL
+#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+//DIG1_DIG_FE_CNTL
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG1_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG1_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG1_DIG_OUTPUT_CRC_CNTL
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG1_DIG_OUTPUT_CRC_RESULT
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG1_DIG_CLOCK_PATTERN
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG1_DIG_TEST_PATTERN
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG1_DIG_RANDOM_PATTERN_SEED
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG1_DIG_FIFO_STATUS
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG1_HDMI_METADATA_PACKET_CONTROL
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_CONTROL
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG1_HDMI_STATUS
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG1_HDMI_AUDIO_PACKET_CONTROL
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG1_HDMI_ACR_PACKET_CONTROL
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG1_HDMI_VBI_PACKET_CONTROL
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+
+//DIG1_HDMI_INFOFRAME_CONTROL0
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG1_HDMI_INFOFRAME_CONTROL1
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+
+//DIG1_HDMI_GC
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG1_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG1_AFMT_ISRC1_0
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG1_AFMT_ISRC1_1
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_2
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_3
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_4
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_0
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_1
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_2
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_3
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_DB_CONTROL
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG1_DME_CONTROL
+#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG1_AFMT_MPEG_INFO0
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG1_AFMT_MPEG_INFO1
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG1_AFMT_GENERIC_HDR
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_0
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_1
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_2
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_3
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_4
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_5
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_6
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_7
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_ACR_32_0
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_32_1
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_44_0
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_44_1
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_48_0
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_48_1
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_STATUS_0
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_STATUS_1
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG1_AFMT_AUDIO_INFO0
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG1_AFMT_AUDIO_INFO1
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG1_AFMT_60958_0
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG1_AFMT_60958_1
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG1_AFMT_AUDIO_CRC_CONTROL
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG1_AFMT_RAMP_CONTROL0
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG1_AFMT_RAMP_CONTROL1
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG1_AFMT_RAMP_CONTROL2
+#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG1_AFMT_RAMP_CONTROL3
+#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG1_AFMT_60958_2
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG1_AFMT_AUDIO_CRC_RESULT
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG1_AFMT_STATUS
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG1_AFMT_AUDIO_PACKET_CONTROL
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG1_AFMT_VBI_PACKET_CONTROL
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG1_AFMT_INFOFRAME_CONTROL0
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG1_AFMT_AUDIO_SRC_CONTROL
+#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG1_DIG_BE_CNTL
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG1_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG1_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG1_DIG_BE_EN_CNTL
+#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG1_TMDS_CNTL
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG1_TMDS_CONTROL_CHAR
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG1_TMDS_CONTROL0_FEEDBACK
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG1_TMDS_STEREOSYNC_CTL_SEL
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+
+//DIG1_TMDS_CTL_BITS
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG1_TMDS_DCBALANCER_CONTROL
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG1_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG1_TMDS_CTL0_1_GEN_CNTL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG1_TMDS_CTL2_3_GEN_CNTL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+
+//DIG1_DIG_VERSION
+#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG1_DIG_LANE_ENABLE
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+
+//DIG1_AFMT_CNTL
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG1_AFMT_VBI_PACKET_CONTROL1
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+//DP1_DP_LINK_CNTL
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP1_DP_PIXEL_FORMAT
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP1_DP_MSA_COLORIMETRY
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP1_DP_CONFIG
+#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP1_DP_VID_STREAM_CNTL
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP1_DP_STEER_FIFO
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP1_DP_MSA_MISC
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP1_DP_VID_TIMING
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP1_DP_VID_N
+#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP1_DP_VID_M
+#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP1_DP_LINK_FRAMING_CNTL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP1_DP_HBR2_EYE_PATTERN
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP1_DP_VID_MSA_VBID
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP1_DP_VID_INTERRUPT_CNTL
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP1_DP_DPHY_CNTL
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP1_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP1_DP_DPHY_SYM0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM1
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM2
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP1_DP_DPHY_8B10B_CNTL
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP1_DP_DPHY_PRBS_CNTL
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP1_DP_DPHY_SCRAM_CNTL
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP1_DP_DPHY_CRC_EN
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP1_DP_DPHY_CRC_CNTL
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP1_DP_DPHY_CRC_RESULT
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP1_DP_DPHY_CRC_MST_CNTL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP1_DP_DPHY_CRC_MST_STATUS
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP1_DP_DPHY_FAST_TRAINING
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP1_DP_DPHY_FAST_TRAINING_STATUS
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP1_DP_SEC_CNTL
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP1_DP_SEC_CNTL1
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING1
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING2
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING3
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING4
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP1_DP_SEC_AUD_N
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_N_READBACK
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M_READBACK
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_TIMESTAMP
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP1_DP_SEC_PACKET_CNTL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP1_DP_MSE_RATE_CNTL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP1_DP_CP_MSE_STATUS
+//DP1_DP_MSE_RATE_UPDATE
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP1_DP_MSE_SAT0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP1_DP_MSE_SAT_UPDATE
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP1_DP_MSE_LINK_TIMING
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP1_DP_MSE_MISC_CNTL
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+
+//DP1_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP1_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP1_DP_MSE_SAT0_STATUS
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1_STATUS
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2_STATUS
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP1_DP_MSA_TIMING_PARAM1
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM2
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM3
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP1_DP_MSA_TIMING_PARAM4
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP1_DP_DSC_CNTL
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP1_DP_SEC_CNTL2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP1_DP_SEC_CNTL3
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL4
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL5
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL6
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP1_DP_SEC_CNTL7
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP1_DP_DB_CNTL
+#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP1_DP_MSA_VBID_MISC
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_METADATA_TRANSMISSION
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP1_DP_DSC_BYTES_PER_PIXEL
+#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+//DC_GENERICA
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0x0000F000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0x000F0000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00F00000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x0F000000L
+//UNIPHYA_LINK_CNTL
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYA_CHANNEL_XBAR_CNTL
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//UNIPHYB_LINK_CNTL
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYB_CHANNEL_XBAR_CNTL
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY__SHIFT 0x11
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY_MASK 0x000E0000L
+//DCIO_CLOCK_CNTL
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+//DC_GPIO_DDC1_MASK
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC1_A
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC1_EN
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC1_Y
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC2_MASK
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC2_A
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC2_EN
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC2_Y
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+//DC_GPIO_HPD_MASK
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK__SHIFT 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS__SHIFT 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL__SHIFT 0x3
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK_MASK 0x00000002L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS_MASK 0x00000004L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL_MASK 0x00000008L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L
+//DC_GPIO_HPD_A
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+
+
+//DC_GPIO_HPD_EN
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI__SHIFT 0x3
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE__SHIFT 0x4
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0__SHIFT 0x7
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI_MASK 0x00000008L
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE_MASK 0x00000010L
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0_MASK 0x00000080L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L
+
+//DC_GPIO_HPD_Y
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+//DC_GPIO_PAD_STRENGTH_1
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN_MASK 0x00000F00L
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP_MASK 0x0000F000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L
+//PHY_AUX_CNTL
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN__SHIFT 0x0
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE__SHIFT 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL__SHIFT 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE__SHIFT 0x3
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN__SHIFT 0x4
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN__SHIFT 0x5
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN__SHIFT 0x6
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN__SHIFT 0x7
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN__SHIFT 0x8
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc
+#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN__SHIFT 0x17
+#define PHY_AUX_CNTL__AUX_CAL_SPARE__SHIFT 0x18
+#define PHY_AUX_CNTL__AUX_CAL_BIASENTST__SHIFT 0x1c
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN_MASK 0x00000001L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE_MASK 0x00000002L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL_MASK 0x00000004L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE_MASK 0x00000008L
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN_MASK 0x00000010L
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN_MASK 0x00000020L
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN_MASK 0x00000040L
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN_MASK 0x00000080L
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN_MASK 0x00000100L
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L
+#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN_MASK 0x00800000L
+#define PHY_AUX_CNTL__AUX_CAL_SPARE_MASK 0x03000000L
+#define PHY_AUX_CNTL__AUX_CAL_BIASENTST_MASK 0x70000000L
+//DC_GPIO_AUX_CTRL_1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0x40000000L
+//DC_GPIO_AUX_CTRL_2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
+//DC_GPIO_AUX_CTRL_4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
+//DC_GPIO_AUX_CTRL_5
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
+//AUXI2C_PAD_ALL_PWR_OK
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
+
+
+// addressBlock: azf0endpoint0_endpointind
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint1_endpointind
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
index 312c50ea30f3..f268d33c4744 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
@@ -436,6 +436,8 @@
#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regDCCG_GATE_DISABLE_CNTL3 0x005a
#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
+#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
+#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2
#define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061
#define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2
#define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
index a9d553ef26c0..1f21f313bd1d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
@@ -1438,6 +1438,14 @@
#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L
#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L
#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L
+//HDMISTREAMCLK0_DTO_PARAM
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L
+
//DCCG_AUDIO_DTBCLK_DTO_PHASE
#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0
#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index bd37aa6b6560..b4b2584bbd66 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -77,4 +77,9 @@
#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL
#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL
+#define mmDF_CS_UMC_AON0_HardwareAssertMaskLow 0x067e
+#define mmDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 0
+#define mmDF_NCS_PG0_HardwareAssertMaskHigh 0x067f
+#define mmDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 0
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index f804e13b002e..f45ec6f97ff2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -62,4 +62,136 @@
#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L
#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L
+//DF_CS_UMC_AON0_HardwareAssertMaskLow
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L
+
+//DF_NCS_PG0_HardwareAssertMaskHigh
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h
new file mode 100755
index 000000000000..3c2f270fb3bb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _dpcs_2_0_3_OFFSET_HEADER
+#define _dpcs_2_0_3_OFFSET_HEADER
+// addressBlock: dpcssysa_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssysa_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+
+
+// addressBlock: dpcssysa_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssysa_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h
new file mode 100755
index 000000000000..a6d076530117
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h
@@ -0,0 +1,952 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _dpcs_2_0_3_SH_MASK_HEADER
+#define _dpcs_2_0_3_SH_MASK_HEADER
+// addressBlock: dpcssysa_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+//DPCSTX0_DPCSTX_TEST_DEBUG_DATA
+#define DPCSTX0_DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dpcssysa_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x0000000CL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x000000E0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00E00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x03000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x0000000CL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE_MASK 0x000000E0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE_MASK 0x00E00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x03000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+
+
+// addressBlock: dpcssysa_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssysa_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dpcssysa_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x0000000CL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x000000E0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00E00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x03000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x0000000CL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE_MASK 0x000000E0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE_MASK 0x00E00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x03000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h
new file mode 100644
index 000000000000..b7d3d0df3260
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mp_11_0_8_SH_MASK_HEADER
+#define _mp_11_0_8_SH_MASK_HEADER
+
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 6a505d1b82a5..da895d1f3b4f 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -7148,7 +7148,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
#else // not __cplusplus
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (offsetof(ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES, FieldName) / sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 44955458fe38..7bd763361d6e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -768,6 +768,10 @@ enum atom_encoder_caps_def
ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not.
ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board.
+ ATOM_ENCODER_CAP_RECORD_DP2 =0x10, // DP2 is supported by ASIC/board.
+ ATOM_ENCODER_CAP_RECORD_UHBR10_EN =0x20, // DP2.0 UHBR10 settings is supported by board
+ ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN =0x40, // DP2.0 UHBR13.5 settings is supported by board
+ ATOM_ENCODER_CAP_RECORD_UHBR20_EN =0x80, // DP2.0 UHBR20 settings is supported by board
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type.
};
diff --git a/drivers/gpu/drm/amd/include/soc15_hw_ip.h b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
index 45ca4c921a66..c1519d20596a 100644
--- a/drivers/gpu/drm/amd/include/soc15_hw_ip.h
+++ b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
@@ -80,6 +80,8 @@
#define L1IMU15_HWID 65
#define WAFLC_HWID 66
#define FCH_USB_PD_HWID 67
+#define SDMA2_HWID 68
+#define SDMA3_HWID 69
#define PCIE_HWID 70
#define PCS_HWID 80
#define DDCL_HWID 89
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 249cb0aeb5ae..49fe4155c374 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -310,7 +310,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level = 0xff;
+ enum amd_dpm_forced_level current_level;
int ret = 0;
if (amdgpu_in_reset(adev))
@@ -350,6 +350,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
if (pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
+ else
+ current_level = adev->pm.dpm.forced_level;
if (current_level == level) {
pm_runtime_mark_last_busy(ddev->dev);
@@ -2019,15 +2021,15 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
@@ -2087,10 +2089,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (asic_type < CHIP_VEGA12)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
- if (!(asic_type == CHIP_VANGOGH))
+ if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
- if (!(asic_type == CHIP_VANGOGH))
+ if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 8156729c370b..3557f4e7fc30 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -1008,7 +1008,9 @@ struct pptable_funcs {
/**
* @set_power_limit: Set power limit in watts.
*/
- int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+ int (*set_power_limit)(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
/**
* @init_max_sustainable_clocks: Populate max sustainable clock speed
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index cbdae8a2c698..2d422e6a9feb 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -197,7 +197,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu);
int smu_v11_0_get_current_power_limit(struct smu_context *smu,
uint32_t *power_limit);
-int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+int smu_v11_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index dc91eb608791..e5d3b0d1a032 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -163,7 +163,9 @@ int smu_v13_0_notify_display_change(struct smu_context *smu);
int smu_v13_0_get_current_power_limit(struct smu_context *smu,
uint32_t *power_limit);
-int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n);
+int smu_v13_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index b7e2651b570b..2fc1733bcdcf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
@@ -29,9 +29,9 @@
typedef enum atom_smu9_syspll0_clock_id BIOS_CLKID;
#define GetIndexIntoMasterCmdTable(FieldName) \
- (((char*)(&((struct atom_master_list_of_command_functions_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t))
+ (offsetof(struct atom_master_list_of_command_functions_v2_1, FieldName) / sizeof(uint16_t))
#define GetIndexIntoMasterDataTable(FieldName) \
- (((char*)(&((struct atom_master_list_of_data_tables_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t))
+ (offsetof(struct atom_master_list_of_data_tables_v2_1, FieldName) / sizeof(uint16_t))
#define PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES 32
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 04863a797115..b06c59dcc1b4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -455,7 +455,11 @@ static int smu_get_power_num_states(void *handle,
bool is_support_sw_smu(struct amdgpu_device *adev)
{
- if (adev->asic_type >= CHIP_ARCTURUS)
+ /* vega20 is 11.0.2, but it's supported via the powerplay code */
+ if (adev->asic_type == CHIP_VEGA20)
+ return false;
+
+ if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
return true;
return false;
@@ -575,41 +579,43 @@ static int smu_set_funcs(struct amdgpu_device *adev)
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
navi10_set_ppt_funcs(smu);
break;
- case CHIP_ARCTURUS:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- arcturus_set_ppt_funcs(smu);
- /* OD is not supported on Arcturus */
- smu->od_enabled =false;
- break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
sienna_cichlid_set_ppt_funcs(smu);
break;
- case CHIP_ALDEBARAN:
- aldebaran_set_ppt_funcs(smu);
- /* Enable pp_od_clk_voltage node */
- smu->od_enabled = true;
- break;
- case CHIP_RENOIR:
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
renoir_set_ppt_funcs(smu);
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
vangogh_set_ppt_funcs(smu);
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
yellow_carp_set_ppt_funcs(smu);
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(11, 0, 8):
cyan_skillfish_set_ppt_funcs(smu);
break;
+ case IP_VERSION(11, 0, 2):
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
+ break;
+ case IP_VERSION(13, 0, 2):
+ aldebaran_set_ppt_funcs(smu);
+ /* Enable pp_od_clk_voltage node */
+ smu->od_enabled = true;
+ break;
default:
return -EINVAL;
}
@@ -694,7 +700,8 @@ static int smu_late_init(void *handle)
return ret;
}
- if (adev->asic_type == CHIP_YELLOW_CARP)
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
return 0;
if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
@@ -1140,9 +1147,16 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
/* this is needed specifically */
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
ret = smu_system_features_control(smu, true);
+ break;
+ default:
+ break;
+ }
return ret;
}
@@ -1284,7 +1298,7 @@ static int smu_start_smc_engine(struct smu_context *smu)
int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->asic_type < CHIP_NAVI10) {
+ if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
if (smu->ppt_funcs->load_microcode) {
ret = smu->ppt_funcs->load_microcode(smu);
if (ret)
@@ -1402,23 +1416,41 @@ static int smu_disable_dpms(struct smu_context *smu)
* - SMU firmware can handle the DPM reenablement
* properly.
*/
- if (smu->uploading_custom_pp_table &&
- (adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_BEIGE_GOBY))
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_COUNT);
+ if (smu->uploading_custom_pp_table) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ return smu_disable_all_features_with_exception(smu,
+ true,
+ SMU_FEATURE_COUNT);
+ default:
+ break;
+ }
+ }
/*
* For Sienna_Cichlid, PMFW will handle the features disablement properly
* on BACO in. Driver involvement is unnecessary.
*/
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
- ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) &&
- use_baco)
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_BACO_BIT);
+ if (use_baco) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ return smu_disable_all_features_with_exception(smu,
+ true,
+ SMU_FEATURE_BACO_BIT);
+ default:
+ break;
+ }
+ }
/*
* For gpu reset, runpm and hibernation through BACO,
@@ -1436,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n");
}
- if (adev->asic_type >= CHIP_NAVI10 &&
+ if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
@@ -2229,6 +2261,7 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type)
{
struct smu_context *smu = handle;
+ struct amdgpu_device *adev = smu->adev;
enum smu_ppt_limit_level limit_level;
uint32_t limit_type;
int ret = 0;
@@ -2272,15 +2305,20 @@ int smu_get_power_limit(void *handle,
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
- if ((smu->adev->asic_type == CHIP_ALDEBARAN) ||
- (smu->adev->asic_type == CHIP_SIENNA_CICHLID) ||
- (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) ||
- (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) ||
- (smu->adev->asic_type == CHIP_BEIGE_GOBY))
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
NULL,
NULL);
+ break;
+ default:
+ break;
+ }
*limit = smu->current_power_limit;
break;
case SMU_PPT_LIMIT_DEFAULT:
@@ -2310,9 +2348,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
mutex_lock(&smu->mutex);
+ limit &= (1<<24)-1;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
goto out;
}
@@ -2328,7 +2367,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
limit = smu->current_power_limit;
if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
smu->user_dpm_profile.power_limit = limit;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 082f01893f3d..fd1d30a93db5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -436,6 +436,19 @@ static void arcturus_check_bxco_support(struct smu_context *smu)
}
}
+static void arcturus_check_fan_support(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+
+ /* No sort of fan control possible if PPTable has it disabled */
+ smu->adev->pm.no_fan =
+ !(pptable->FeaturesToRun[0] & FEATURE_FAN_CONTROL_MASK);
+ if (smu->adev->pm.no_fan)
+ dev_info_once(smu->adev->dev,
+ "PMFW based fan control disabled");
+}
+
static int arcturus_check_powerplay_table(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -443,6 +456,7 @@ static int arcturus_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
arcturus_check_bxco_support(smu);
+ arcturus_check_fan_support(smu);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 3d4c65bc29dc..cbc3f99e8573 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -47,7 +47,6 @@
/* unit: MHz */
#define CYAN_SKILLFISH_SCLK_MIN 1000
#define CYAN_SKILLFISH_SCLK_MAX 2000
-#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
/* unit: mV */
#define CYAN_SKILLFISH_VDDC_MIN 700
@@ -59,6 +58,8 @@ static struct gfx_user_settings {
uint32_t vddc;
} cyan_skillfish_user_settings;
+static uint32_t cyan_skillfish_sclk_default;
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
@@ -365,13 +366,19 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
return false;
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
-
if (ret)
return false;
feature_enabled = (uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32);
+ /*
+ * cyan_skillfish specific, query default sclk inseted of hard code.
+ */
+ if (!cyan_skillfish_sclk_default)
+ cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK,
+ &cyan_skillfish_sclk_default);
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -444,14 +451,14 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+ if (input[1] < CYAN_SKILLFISH_SCLK_MIN ||
input[1] > CYAN_SKILLFISH_SCLK_MAX) {
dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
return -EINVAL;
}
- if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+ if (input[2] < CYAN_SKILLFISH_VDDC_MIN ||
input[2] > CYAN_SKILLFISH_VDDC_MAX) {
dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
@@ -468,7 +475,7 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+ cyan_skillfish_user_settings.sclk = cyan_skillfish_sclk_default;
cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index b1ad451af06b..71161f6b78fe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -86,21 +86,21 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
- MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
- MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
- MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
- MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0),
MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0),
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
- MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
- MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
- MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0),
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
@@ -345,7 +345,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
/* DPM UCLK enablement should be skipped for navi10 A0 secure board */
if (!(is_asic_secure(smu) &&
- (adev->asic_type == CHIP_NAVI10) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0)) &&
(adev->pm.pp_feature & PP_MCLK_DPM_MASK))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
@@ -354,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
/* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
if (is_asic_secure(smu) &&
- (adev->asic_type == CHIP_NAVI10) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0))
*(uint64_t *)feature_mask &=
~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
@@ -925,18 +925,18 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
- switch (adev->asic_type) {
- case CHIP_NAVI12:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 9):
if (smu_version > 0x00341C00)
ret = navi12_get_smu_metrics_data(smu, member, value);
else
ret = navi12_get_legacy_smu_metrics_data(smu, member, value);
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
default:
- if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
- ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00))
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
ret = navi10_get_smu_metrics_data(smu, member, value);
else
ret = navi10_get_legacy_smu_metrics_data(smu, member, value);
@@ -1509,8 +1509,8 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
uint32_t sclk_freq;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
switch (adev->pdev->revision) {
case 0xf0: /* XTX */
case 0xc0:
@@ -1525,7 +1525,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
break;
}
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
switch (adev->pdev->revision) {
case 0xc7: /* XT */
case 0xf4:
@@ -1548,7 +1548,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
break;
}
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
break;
default:
@@ -2562,8 +2562,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
return false;
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5))
return true;
return false;
@@ -2671,8 +2671,8 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
* - PPSMC_MSG_SetDriverDummyTableDramAddrLow
* - PPSMC_MSG_GetUMCFWWA
*/
- if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
- ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GET_UMC_FW_WA,
0,
@@ -2691,13 +2691,13 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
return 0;
if (umc_fw_disable_cdr) {
- if (adev->asic_type == CHIP_NAVI10)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
return navi10_umc_hybrid_cdr_workaround(smu);
} else {
return navi10_set_dummy_pstates_table_location(smu);
}
} else {
- if (adev->asic_type == CHIP_NAVI10)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
return navi10_umc_hybrid_cdr_workaround(smu);
}
@@ -3151,18 +3151,18 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
return ret;
}
- switch (adev->asic_type) {
- case CHIP_NAVI12:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 9):
if (smu_version > 0x00341C00)
ret = navi12_get_gpu_metrics(smu, table);
else
ret = navi12_get_legacy_gpu_metrics(smu, table);
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
default:
- if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
- ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00))
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
ret = navi10_get_gpu_metrics(smu, table);
else
ret =navi10_get_legacy_gpu_metrics(smu, table);
@@ -3180,7 +3180,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
uint32_t param = 0;
/* Navi12 does not support this */
- if (adev->asic_type == CHIP_NAVI12)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9))
return 0;
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index ca57221e3962..a4108025fe29 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -74,7 +74,7 @@
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
#define GET_PPTABLE_MEMBER(field, member) do {\
- if (smu->adev->asic_type == CHIP_BEIGE_GOBY)\
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\
(*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\
else\
(*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\
@@ -82,7 +82,7 @@
static int get_table_size(struct smu_context *smu)
{
- if (smu->adev->asic_type == CHIP_BEIGE_GOBY)
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
return sizeof(PPTable_beige_goby_t);
else
return sizeof(PPTable_t);
@@ -298,7 +298,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
}
if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
- (adev->asic_type > CHIP_SIENNA_CICHLID) &&
+ (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) &&
!(adev->flags & AMD_IS_APU))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
@@ -496,7 +496,7 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
uint32_t throttler_status = 0;
int i;
- if ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
@@ -517,7 +517,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
SmuMetrics_V2_t *metrics_v2 =
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V2);
- bool use_metrics_v2 = ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ bool use_metrics_v2 = ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) ? true : false;
uint16_t average_gfx_activity;
int ret = 0;
@@ -670,7 +670,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_dpm_table *dpm_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ int i, ret = 0;
DpmDescriptor_t *table_member;
/* socclk dpm table setup */
@@ -746,78 +746,45 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* vclk0 dpm table setup */
- dpm_table = &dpm_context->dpm_tables.vclk_table;
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_VCLK,
- dpm_table);
- if (ret)
- return ret;
- dpm_table->is_fine_grained =
- !table_member[PPCLK_VCLK_0].SnapToDiscrete;
- } else {
- dpm_table->count = 1;
- dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
- dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
- }
+ /* vclk0/1 dpm table setup */
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- /* vclk1 dpm table setup */
- if (adev->vcn.num_vcn_inst > 1) {
- dpm_table = &dpm_context->dpm_tables.vclk1_table;
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_VCLK1,
+ i ? SMU_VCLK1 : SMU_VCLK,
dpm_table);
if (ret)
return ret;
dpm_table->is_fine_grained =
- !table_member[PPCLK_VCLK_1].SnapToDiscrete;
+ !table_member[i ? PPCLK_VCLK_1 : PPCLK_VCLK_0].SnapToDiscrete;
} else {
dpm_table->count = 1;
- dpm_table->dpm_levels[0].value =
- smu->smu_table.boot_values.vclk / 100;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[0].value;
}
}
- /* dclk0 dpm table setup */
- dpm_table = &dpm_context->dpm_tables.dclk_table;
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_DCLK,
- dpm_table);
- if (ret)
- return ret;
- dpm_table->is_fine_grained =
- !table_member[PPCLK_DCLK_0].SnapToDiscrete;
- } else {
- dpm_table->count = 1;
- dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
- dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
- }
-
- /* dclk1 dpm table setup */
- if (adev->vcn.num_vcn_inst > 1) {
- dpm_table = &dpm_context->dpm_tables.dclk1_table;
+ /* dclk0/1 dpm table setup */
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_DCLK1,
+ i ? SMU_DCLK1 : SMU_DCLK,
dpm_table);
if (ret)
return ret;
dpm_table->is_fine_grained =
- !table_member[PPCLK_DCLK_1].SnapToDiscrete;
+ !table_member[i ? PPCLK_DCLK_1 : PPCLK_DCLK_0].SnapToDiscrete;
} else {
dpm_table->count = 1;
- dpm_table->dpm_levels[0].value =
- smu->smu_table.boot_values.dclk / 100;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[0].value;
@@ -902,32 +869,18 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ int i, ret = 0;
- if (enable) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ 0x10000 * i, NULL);
if (ret)
return ret;
- if (adev->vcn.num_vcn_inst > 1) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
- 0x10000, NULL);
- if (ret)
- return ret;
- }
- }
- } else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
- if (ret)
- return ret;
- if (adev->vcn.num_vcn_inst > 1) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
- 0x10000, NULL);
- if (ret)
- return ret;
- }
}
}
@@ -1170,7 +1123,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
* and onwards SMU firmwares.
*/
smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu_version < 0x003a2900))
break;
@@ -1937,7 +1890,7 @@ static void sienna_cichlid_dump_od_table(struct smu_context *smu,
od_table->UclkFmax);
smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (!((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu_version < 0x003a2900)))
dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
}
@@ -2161,7 +2114,7 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
* and onwards SMU firmwares.
*/
smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu_version < 0x003a2900)) {
dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
"only by 58.41.0 and onwards SMU firmwares!\n");
@@ -2865,7 +2818,7 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
int i;
- if (smu->adev->asic_type == CHIP_BEIGE_GOBY) {
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) {
beige_goby_dump_pptable(smu);
return;
}
@@ -3625,7 +3578,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_V2_t *metrics_v2 =
&(metrics_external.SmuMetrics_V2);
struct amdgpu_device *adev = smu->adev;
- bool use_metrics_v2 = ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ bool use_metrics_v2 = ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) ? true : false;
uint16_t average_gfx_activity;
int ret = 0;
@@ -3706,8 +3659,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_fan_speed = use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu->smc_fw_version > 0x003A1E00) ||
- ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu->smc_fw_version > 0x00410400)) {
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) {
gpu_metrics->pcie_link_width = use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth;
gpu_metrics->pcie_link_speed = link_speed[use_metrics_v2 ? metrics_v2->PcieRate : metrics->PcieRate];
} else {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 87b055466a33..28b7c0562b99 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -90,37 +90,38 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
struct amdgpu_firmware_info *ucode = NULL;
if (amdgpu_sriov_vf(adev) &&
- ((adev->asic_type == CHIP_NAVI12) ||
- (adev->asic_type == CHIP_SIENNA_CICHLID)))
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))
return 0;
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
- chip_name = "arcturus";
- break;
- case CHIP_NAVI10:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
chip_name = "navi14";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
chip_name = "navi12";
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(11, 0, 7):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(11, 0, 11):
chip_name = "navy_flounder";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
chip_name = "beige_goby";
break;
+ case IP_VERSION(11, 0, 2):
+ chip_name = "arcturus";
+ break;
default:
- dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
+ dev_err(adev->dev, "Unsupported IP version 0x%x\n",
+ adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
@@ -238,39 +239,40 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (smu->adev->asic_type) {
- case CHIP_ARCTURUS:
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
- break;
- case CHIP_NAVI10:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(11, 0, 7):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(11, 0, 11):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(11, 0, 8):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
break;
+ case IP_VERSION(11, 0, 2):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ break;
default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
+ adev->ip_versions[MP1_HWIP][0]);
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -492,8 +494,9 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
int smu_v11_0_init_power(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
- size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
+ size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ?
sizeof(struct smu_11_5_power_context) :
sizeof(struct smu_11_0_power_context);
@@ -750,8 +753,10 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
/* Navy_Flounder/Dimgrey_Cavefish do not support to change
* display num currently
*/
- if (adev->asic_type >= CHIP_NAVY_FLOUNDER &&
- adev->asic_type <= CHIP_BEIGE_GOBY)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -974,10 +979,16 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
return ret;
}
-int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+int smu_v11_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
{
int power_src;
int ret = 0;
+ uint32_t limit_param;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
@@ -997,16 +1008,16 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
* BIT 16-23: PowerSource
* BIT 0-15: PowerLimit
*/
- n &= 0xFFFF;
- n |= 0 << 24;
- n |= (power_src) << 16;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+ limit_param = (limit & 0xFFFF);
+ limit_param |= 0 << 24;
+ limit_param |= (power_src) << 16;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
}
- smu->current_power_limit = n;
+ smu->current_power_limit = limit;
return 0;
}
@@ -1136,15 +1147,15 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -1630,11 +1641,11 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
mutex_lock(&smu_baco->mutex);
if (state == SMU_BACO_STATE_ENTER) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
if (amdgpu_runtime_pm == 2)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
@@ -1649,7 +1660,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
default:
if (!ras || !adev->ras_enabled ||
adev->gmc.xgmi.pending_reset) {
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
@@ -1931,7 +1942,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
* Separate MCLK and SOCCLK soft min/max settings are not allowed
* on Arcturus.
*/
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index f6ef0ce6e9e2..421f38e8dada 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1386,52 +1386,38 @@ static int vangogh_set_performance_level(struct smu_context *smu,
uint32_t soc_mask, mclk_mask, fclk_mask;
uint32_t vclk_mask = 0, dclk_mask = 0;
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
ret = vangogh_force_dpm_limit_value(smu, true);
+ if (ret)
+ return ret;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
ret = vangogh_force_dpm_limit_value(smu, false);
+ if (ret)
+ return ret;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
ret = vangogh_unforce_dpm_levels(smu);
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetHardMinGfxClk,
- VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetSoftMaxGfxClk,
- VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
if (ret)
return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
+ smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
ret = vangogh_get_profiling_clk_mask(smu, level,
&vclk_mask,
@@ -1446,32 +1432,15 @@ static int vangogh_set_performance_level(struct smu_context *smu,
vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
-
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
- VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
- VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
- if (ret)
- return ret;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
ret = vangogh_get_profiling_clk_mask(smu, level,
NULL,
NULL,
@@ -1484,29 +1453,29 @@ static int vangogh_set_performance_level(struct smu_context *smu,
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
- VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
- if (ret)
- return ret;
+ smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
+ smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
- VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ ret = vangogh_set_peak_clock_by_device(smu);
if (ret)
return ret;
-
- ret = vangogh_set_peak_clock_by_device(smu);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- break;
+ return 0;
}
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq, NULL);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -2144,11 +2113,12 @@ static int vangogh_get_ppt_limit(struct smu_context *smu,
return 0;
}
-static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit)
+static int vangogh_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t ppt_limit)
{
struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
- uint32_t limit_type = ppt_limit >> 24;
+ smu->smu_power.power_context;
int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 5019903db492..59a7d276541d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1241,11 +1241,13 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
return 0;
}
-static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n)
+static int aldebaran_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
{
/* Power limit can be set only through primary die */
if (aldebaran_is_primary(smu))
- return smu_v13_0_set_power_limit(smu, n);
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a0e50f23b1dd..35145db6eedf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -89,12 +89,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
chip_name = "aldebaran";
break;
default:
- dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
+ dev_err(adev->dev, "Unsupported IP version 0x%x\n",
+ adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
@@ -210,15 +211,17 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
- switch (smu->adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
+ smu->adev->ip_versions[MP1_HWIP][0]);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
break;
}
@@ -740,8 +743,9 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->asic_type) {
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -941,22 +945,27 @@ int smu_v13_0_get_current_power_limit(struct smu_context *smu,
return ret;
}
-int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n)
+int smu_v13_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
{
int ret = 0;
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
return -EOPNOTSUPP;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
}
- smu->current_power_limit = n;
+ smu->current_power_limit = limit;
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 8c2ab3d653b7..0562bdaac00c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
return !malidp_hw_format_is_afbc_only(format);
}
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ if (!fourcc_mod_is_vendor(modifier, ARM)) {
DRM_ERROR("Unknown modifier (not Arm)\n");
return false;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 21909642ee4c..147abf1a3968 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -15,6 +15,8 @@
#include "armada_gem.h"
#include "armada_ioctlP.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
@@ -336,7 +338,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_armada_gem_pwrite *args = data;
struct armada_gem_object *dobj;
char __user *ptr;
- int ret;
+ int ret = 0;
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
args->handle, args->offset, args->size, args->ptr);
@@ -349,9 +351,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (!access_ok(ptr, args->size))
return -EFAULT;
- ret = fault_in_pages_readable(ptr, args->size);
- if (ret)
- return ret;
+ if (fault_in_readable(ptr, args->size))
+ return -EFAULT;
dobj = armada_gem_object_lookup(file, args->handle);
if (dobj == NULL)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 39ca338eb80b..2cfce7dc95af 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -158,8 +158,6 @@ struct ast_private {
uint32_t dram_type;
uint32_t mclk;
- int fb_mtrr;
-
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 7592f1b9e1f1..6e999408dda9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -74,35 +74,28 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
-static void ast_mm_release(struct drm_device *dev, void *ptr)
-{
- struct ast_private *ast = to_ast_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- arch_phys_wc_del(ast->fb_mtrr);
- arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-}
-
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ resource_size_t base, size;
u32 vram_size;
int ret;
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, base, size);
+ devm_arch_phys_wc_add(dev->dev, base, size);
+
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
+ ret = drmm_vram_helper_init(dev, base, vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
- arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-
- return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
+ return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6bfaefa01818..1e30eaeb0e1b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
return flags;
}
-static enum drm_connector_status ast_connector_detect(struct drm_connector
- *connector, bool force)
-{
- int r;
-
- r = ast_get_modes(connector);
- if (r <= 0)
- return connector_status_disconnected;
-
- return connector_status_connected;
-}
-
static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
- .detect = ast_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_attach_encoder(connector, encoder);
@@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
drm_mode_config_reset(dev);
- drm_kms_helper_poll_init(dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index a20a45c0b353..28d9becc939c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* adv7511_cec.c - Analog Devices ADV7511/33 cec driver
*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/device.h>
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 14d73fb1dd15..1a871f6b6822 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx,
ret = sp_tx_aux_rd(ctx, 0xf1);
if (ret) {
- sp_tx_rst_aux(ctx);
+ ret = sp_tx_rst_aux(ctx);
DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
} else {
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
@@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
- return 0;
+ return ret;
}
static int segments_edid_read(struct anx7625_data *ctx,
@@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
- return 0;
+ return ret;
}
static int sp_tx_edid_read(struct anx7625_data *ctx,
@@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
- segments_edid_read(ctx, count / 2,
- pblock_buf, offset);
+ ret = segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ if (ret < 0)
+ return ret;
+
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
- segments_edid_read(ctx, count / 2,
- pblock_buf, offset);
+ ret = segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ if (ret < 0)
+ return ret;
+
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
}
/* Reset aux channel */
- sp_tx_rst_aux(ctx);
+ ret = sp_tx_rst_aux(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n");
+ return ret;
+ }
return (blocks_num + 1);
}
@@ -1325,7 +1335,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_MODE_VIDEO_HSE;
if (mipi_dsi_attach(dsi) < 0) {
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index e6e331071a00..d8a15c459b42 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1171,7 +1171,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
{
struct cdns_dsi *dsi;
struct cdns_dsi_input *input;
- struct resource *res;
int ret, irq;
u32 val;
@@ -1183,8 +1182,7 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
input = &dsi->input;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, res);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 2f2a09adb4bc..06b59b422c69 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -889,7 +889,7 @@ unlock:
static int it66121_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- u32 vendor_ids[2], device_ids[2], revision_id;
+ u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
struct it66121_ctx *ctx;
@@ -918,11 +918,26 @@ static int it66121_probe(struct i2c_client *client,
return -EINVAL;
ep = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!ep)
- return -EPROBE_DEFER;
+ if (!ep) {
+ dev_err(ctx->dev, "The endpoint is unconnected\n");
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(ep)) {
+ of_node_put(ep);
+ dev_err(ctx->dev, "The remote device is disabled\n");
+ return -ENODEV;
+ }
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
+ if (!ctx->next_bridge) {
+ dev_dbg(ctx->dev, "Next bridge not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (!ctx->next_bridge)
+ return -EPROBE_DEFER;
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index c916f4b8907e..b32295abd9e7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -9,6 +9,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -332,3 +333,39 @@ struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge)
return &panel_bridge->connector;
}
EXPORT_SYMBOL(drm_panel_bridge_connector);
+
+#ifdef CONFIG_OF
+/**
+ * devm_drm_of_get_bridge - Return next bridge in the chain
+ * @dev: device to tie the bridge lifetime to
+ * @np: device tree node containing encoder output ports
+ * @port: port in the device tree node
+ * @endpoint: endpoint in the device tree node
+ *
+ * Given a DT node's port and endpoint number, finds the connected node
+ * and returns the associated bridge if any, or creates and returns a
+ * drm panel bridge instance if a panel is connected.
+ *
+ * Returns a pointer to the bridge if successful, or an error pointer
+ * otherwise.
+ */
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *np,
+ u32 port, u32 endpoint)
+{
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(np, port, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (panel)
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+
+ return bridge;
+}
+EXPORT_SYMBOL(devm_drm_of_get_bridge);
+#endif
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 7bd0affa057a..3aaa90913bf8 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -9,25 +9,58 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
+#define PAGE0_AUXCH_CFG3 0x76
+#define AUXCH_CFG3_RESET 0xff
+#define PAGE0_SWAUX_ADDR_7_0 0x7d
+#define PAGE0_SWAUX_ADDR_15_8 0x7e
+#define PAGE0_SWAUX_ADDR_23_16 0x7f
+#define SWAUX_ADDR_MASK GENMASK(19, 0)
+#define PAGE0_SWAUX_LENGTH 0x80
+#define SWAUX_LENGTH_MASK GENMASK(3, 0)
+#define SWAUX_NO_PAYLOAD BIT(7)
+#define PAGE0_SWAUX_WDATA 0x81
+#define PAGE0_SWAUX_RDATA 0x82
+#define PAGE0_SWAUX_CTRL 0x83
+#define SWAUX_SEND BIT(0)
+#define PAGE0_SWAUX_STATUS 0x84
+#define SWAUX_M_MASK GENMASK(4, 0)
+#define SWAUX_STATUS_MASK GENMASK(7, 5)
+#define SWAUX_STATUS_NACK (0x1 << 5)
+#define SWAUX_STATUS_DEFER (0x2 << 5)
+#define SWAUX_STATUS_ACKM (0x3 << 5)
+#define SWAUX_STATUS_INVALID (0x4 << 5)
+#define SWAUX_STATUS_I2C_NACK (0x5 << 5)
+#define SWAUX_STATUS_I2C_DEFER (0x6 << 5)
+#define SWAUX_STATUS_TIMEOUT (0x7 << 5)
+
#define PAGE2_GPIO_H 0xa7
-#define PS_GPIO9 BIT(1)
+#define PS_GPIO9 BIT(1)
#define PAGE2_I2C_BYPASS 0xea
-#define I2C_BYPASS_EN 0xd0
+#define I2C_BYPASS_EN 0xd0
#define PAGE2_MCS_EN 0xf3
-#define MCS_EN BIT(0)
+#define MCS_EN BIT(0)
+
#define PAGE3_SET_ADD 0xfe
-#define VDO_CTL_ADD 0x13
-#define VDO_DIS 0x18
-#define VDO_EN 0x1c
-#define DP_NUM_LANES 4
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+
+#define NUM_MIPI_LANES 4
+
+#define COMMON_PS8640_REGMAP_CONFIG \
+ .reg_bits = 8, \
+ .val_bits = 8, \
+ .cache_type = REGCACHE_NONE
/*
* PS8640 uses multiple addresses:
@@ -60,29 +93,197 @@ enum ps8640_vdo_control {
struct ps8640 {
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
+ struct drm_dp_aux aux;
struct mipi_dsi_device *dsi;
struct i2c_client *page[MAX_DEVS];
+ struct regmap *regmap[MAX_DEVS];
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
bool powered;
};
+static const struct regmap_config ps8640_regmap_config[] = {
+ [PAGE0_DP_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xbf,
+ },
+ [PAGE1_VDO_BDG] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE2_TOP_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE3_DSI_CNTL1] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE4_MIPI_PHY] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE5_VPLL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0x7f,
+ },
+ [PAGE6_DSI_CNTL2] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE7_SPI_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+};
+
static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
{
return container_of(e, struct ps8640, bridge);
}
+static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct ps8640, aux);
+}
+
+static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ unsigned int len = msg->size;
+ unsigned int data;
+ unsigned int base;
+ int ret;
+ u8 request = msg->request &
+ ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE);
+ u8 *buf = msg->buffer;
+ u8 addr_len[PAGE0_SWAUX_LENGTH + 1 - PAGE0_SWAUX_ADDR_7_0];
+ u8 i;
+ bool is_native_aux = false;
+
+ if (len > DP_AUX_MAX_PAYLOAD_BYTES)
+ return -EINVAL;
+
+ if (msg->address & ~SWAUX_ADDR_MASK)
+ return -EINVAL;
+
+ switch (request) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_NATIVE_READ:
+ is_native_aux = true;
+ fallthrough;
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(map, PAGE0_AUXCH_CFG3, AUXCH_CFG3_RESET);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to write PAGE0_AUXCH_CFG3: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Assume it's good */
+ msg->reply = 0;
+
+ base = PAGE0_SWAUX_ADDR_7_0;
+ addr_len[PAGE0_SWAUX_ADDR_7_0 - base] = msg->address;
+ addr_len[PAGE0_SWAUX_ADDR_15_8 - base] = msg->address >> 8;
+ addr_len[PAGE0_SWAUX_ADDR_23_16 - base] = (msg->address >> 16) |
+ (msg->request << 4);
+ addr_len[PAGE0_SWAUX_LENGTH - base] = (len == 0) ? SWAUX_NO_PAYLOAD :
+ ((len - 1) & SWAUX_LENGTH_MASK);
+
+ regmap_bulk_write(map, PAGE0_SWAUX_ADDR_7_0, addr_len,
+ ARRAY_SIZE(addr_len));
+
+ if (len && (request == DP_AUX_NATIVE_WRITE ||
+ request == DP_AUX_I2C_WRITE)) {
+ /* Write to the internal FIFO buffer */
+ for (i = 0; i < len; i++) {
+ ret = regmap_write(map, PAGE0_SWAUX_WDATA, buf[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to write WDATA: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ regmap_write(map, PAGE0_SWAUX_CTRL, SWAUX_SEND);
+
+ /* Zero delay loop because i2c transactions are slow already */
+ regmap_read_poll_timeout(map, PAGE0_SWAUX_CTRL, data,
+ !(data & SWAUX_SEND), 0, 50 * 1000);
+
+ regmap_read(map, PAGE0_SWAUX_STATUS, &data);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to read PAGE0_SWAUX_STATUS: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (data & SWAUX_STATUS_MASK) {
+ /* Ignore the DEFER cases as they are already handled in hardware */
+ case SWAUX_STATUS_NACK:
+ case SWAUX_STATUS_I2C_NACK:
+ /*
+ * The programming guide is not clear about whether a I2C NACK
+ * would trigger SWAUX_STATUS_NACK or SWAUX_STATUS_I2C_NACK. So
+ * we handle both cases together.
+ */
+ if (is_native_aux)
+ msg->reply |= DP_AUX_NATIVE_REPLY_NACK;
+ else
+ msg->reply |= DP_AUX_I2C_REPLY_NACK;
+
+ fallthrough;
+ case SWAUX_STATUS_ACKM:
+ len = data & SWAUX_M_MASK;
+ break;
+ case SWAUX_STATUS_INVALID:
+ return -EOPNOTSUPP;
+ case SWAUX_STATUS_TIMEOUT:
+ return -ETIMEDOUT;
+ }
+
+ if (len && (request == DP_AUX_NATIVE_READ ||
+ request == DP_AUX_I2C_READ)) {
+ /* Read from the internal FIFO buffer */
+ for (i = 0; i < len; i++) {
+ ret = regmap_read(map, PAGE0_SWAUX_RDATA, &data);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to read RDATA: %d\n",
+ ret);
+ return ret;
+ }
+
+ buf[i] = data;
+ }
+ }
+
+ return len;
+}
+
static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
const enum ps8640_vdo_control ctrl)
{
- struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1];
u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
int ret;
- ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
- sizeof(vdo_ctrl_buf),
- vdo_ctrl_buf);
+ ret = regmap_bulk_write(map, PAGE3_SET_ADD,
+ vdo_ctrl_buf, sizeof(vdo_ctrl_buf));
+
if (ret < 0) {
DRM_ERROR("failed to %sable VDO: %d\n",
ctrl == ENABLE ? "en" : "dis", ret);
@@ -94,8 +295,7 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
{
- struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
- unsigned long timeout;
+ struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
int ret, status;
if (ps_bridge->powered)
@@ -119,18 +319,12 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
*/
msleep(200);
- timeout = jiffies + msecs_to_jiffies(200) + 1;
-
- while (time_is_after_jiffies(timeout)) {
- status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
- if (status < 0) {
- DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
- goto err_regulators_disable;
- }
- if ((status & PS_GPIO9) == PS_GPIO9)
- break;
+ ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
+ status & PS_GPIO9, 20 * 1000, 200 * 1000);
- msleep(20);
+ if (ret < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", ret);
+ goto err_regulators_disable;
}
msleep(50);
@@ -142,22 +336,15 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
* disabled by the manufacturer. Once disabled, all MCS commands are
* ignored by the display interface.
*/
- status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
- if (status < 0) {
- DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
- goto err_regulators_disable;
- }
- ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
- status & ~MCS_EN);
+ ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0);
if (ret < 0) {
DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
goto err_regulators_disable;
}
/* Switch access edp panel's edid through i2c */
- ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
- I2C_BYPASS_EN);
+ ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
if (ret < 0) {
DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
goto err_regulators_disable;
@@ -254,20 +441,35 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = DP_NUM_LANES;
+ dsi->lanes = NUM_MIPI_LANES;
ret = mipi_dsi_attach(dsi);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to attach dsi device: %d\n", ret);
goto err_dsi_attach;
+ }
+
+ ret = drm_dp_aux_register(&ps_bridge->aux);
+ if (ret) {
+ dev_err(dev, "failed to register DP AUX channel: %d\n", ret);
+ goto err_aux_register;
+ }
/* Attach the panel-bridge to the dsi bridge */
return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
+err_aux_register:
+ mipi_dsi_detach(dsi);
err_dsi_attach:
mipi_dsi_device_unregister(dsi);
return ret;
}
+static void ps8640_bridge_detach(struct drm_bridge *bridge)
+{
+ drm_dp_aux_unregister(&bridge_to_ps8640(bridge)->aux);
+}
+
static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -304,6 +506,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
+ .detach = ps8640_bridge_detach,
.get_edid = ps8640_bridge_get_edid,
.post_disable = ps8640_post_disable,
.pre_enable = ps8640_pre_enable,
@@ -360,19 +563,30 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->page[PAGE0_DP_CNTL] = client;
+ ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config);
+ if (IS_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]))
+ return PTR_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]);
+
for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
client->adapter,
client->addr + i);
- if (IS_ERR(ps_bridge->page[i])) {
- dev_err(dev, "failed i2c dummy device, address %02x\n",
- client->addr + i);
+ if (IS_ERR(ps_bridge->page[i]))
return PTR_ERR(ps_bridge->page[i]);
- }
+
+ ps_bridge->regmap[i] = devm_regmap_init_i2c(ps_bridge->page[i],
+ ps8640_regmap_config + i);
+ if (IS_ERR(ps_bridge->regmap[i]))
+ return PTR_ERR(ps_bridge->regmap[i]);
}
i2c_set_clientdata(client, ps_bridge);
+ ps_bridge->aux.name = "parade-ps8640-aux";
+ ps_bridge->aux.dev = dev;
+ ps_bridge->aux.transfer = ps8640_aux_transfer;
+ drm_dp_aux_init(&ps_bridge->aux);
+
drm_bridge_add(&ps_bridge->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 70ab4fbdc23e..c8f44bcb298a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -265,11 +265,9 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
/* override the module pointer */
cec->adap->owner = THIS_MODULE;
- ret = devm_add_action(&pdev->dev, dw_hdmi_cec_del, cec);
- if (ret) {
- cec_delete_adapter(cec->adap);
+ ret = devm_add_action_or_reset(&pdev->dev, dw_hdmi_cec_del, cec);
+ if (ret)
return ret;
- }
ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
dw_hdmi_cec_hardirq,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 41d48a393e7f..6154bed0af5b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -615,20 +615,8 @@ static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
return drm_bridge_get_modes(pdata->next_bridge, connector);
}
-static enum drm_mode_status
-ti_sn_bridge_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* maximum supported resolution is 4K at 60 fps */
- if (mode->clock > 594000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
.get_modes = ti_sn_bridge_connector_get_modes,
- .mode_valid = ti_sn_bridge_connector_mode_valid,
};
static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
@@ -766,6 +754,18 @@ static void ti_sn_bridge_detach(struct drm_bridge *bridge)
drm_dp_aux_unregister(&bridge_to_ti_sn65dsi86(bridge)->aux);
}
+static enum drm_mode_status
+ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ /* maximum supported resolution is 4K at 60 fps */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static void ti_sn_bridge_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1127,6 +1127,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
+ .mode_valid = ti_sn_bridge_mode_valid,
.pre_enable = ti_sn_bridge_pre_enable,
.enable = ti_sn_bridge_enable,
.disable = ti_sn_bridge_disable,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index a8ed66751c2d..c96847fc0ebc 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -28,6 +28,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -49,12 +50,19 @@
* Chaining multiple bridges to the output of a bridge, or the same bridge to
* the output of different bridges, is not supported.
*
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
+ * CRTCs, encoders or connectors and hence are not visible to userspace. They
+ * just provide additional hooks to get the desired output at the end of the
+ * encoder chain.
+ */
+
+/**
+ * DOC: display driver integration
+ *
* Display drivers are responsible for linking encoders with the first bridge
* in the chains. This is done by acquiring the appropriate bridge with
- * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
- * panel with drm_panel_bridge_add_typed() (or the managed version
- * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
- * attached to the encoder with a call to drm_bridge_attach().
+ * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
+ * encoder with a call to drm_bridge_attach().
*
* Bridges are responsible for linking themselves with the next bridge in the
* chain, if any. This is done the same way as for encoders, with the call to
@@ -85,11 +93,63 @@
* helper to create the &drm_connector, or implement it manually on top of the
* connector-related operations exposed by the bridge (see the overview
* documentation of bridge operations for more details).
- *
- * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
- * CRTCs, encoders or connectors and hence are not visible to userspace. They
- * just provide additional hooks to get the desired output at the end of the
- * encoder chain.
+ */
+
+/**
+ * DOC: special care dsi
+ *
+ * The interaction between the bridges and other frameworks involved in
+ * the probing of the upstream driver and the bridge driver can be
+ * challenging. Indeed, there's multiple cases that needs to be
+ * considered:
+ *
+ * - The upstream driver doesn't use the component framework and isn't a
+ * MIPI-DSI host. In this case, the bridge driver will probe at some
+ * point and the upstream driver should try to probe again by returning
+ * EPROBE_DEFER as long as the bridge driver hasn't probed.
+ *
+ * - The upstream driver doesn't use the component framework, but is a
+ * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
+ * controlled. In this case, the bridge device is a child of the
+ * display device and when it will probe it's assured that the display
+ * device (and MIPI-DSI host) is present. The upstream driver will be
+ * assured that the bridge driver is connected between the
+ * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
+ * Therefore, it must run mipi_dsi_host_register() in its probe
+ * function, and then run drm_bridge_attach() in its
+ * &mipi_dsi_host_ops.attach hook.
+ *
+ * - The upstream driver uses the component framework and is a MIPI-DSI
+ * host. The bridge device uses the MIPI-DCS commands to be
+ * controlled. This is the same situation than above, and can run
+ * mipi_dsi_host_register() in either its probe or bind hooks.
+ *
+ * - The upstream driver uses the component framework and is a MIPI-DSI
+ * host. The bridge device uses a separate bus (such as I2C) to be
+ * controlled. In this case, there's no correlation between the probe
+ * of the bridge and upstream drivers, so care must be taken to avoid
+ * an endless EPROBE_DEFER loop, with each driver waiting for the
+ * other to probe.
+ *
+ * The ideal pattern to cover the last item (and all the others in the
+ * MIPI-DSI host driver case) is to split the operations like this:
+ *
+ * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
+ * probe hook. It will make sure that the MIPI-DSI host sticks around,
+ * and that the driver's bind can be called.
+ *
+ * - In its probe hook, the bridge driver must try to find its MIPI-DSI
+ * host, register as a MIPI-DSI device and attach the MIPI-DSI device
+ * to its host. The bridge driver is now functional.
+ *
+ * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
+ * now add its component. Its bind hook will now be called and since
+ * the bridge driver is attached and registered, we can now look for
+ * and attach it.
+ *
+ * At this point, we're now certain that both the upstream driver and
+ * the bridge driver are functional and we can't have a deadlock-like
+ * situation when probing.
*/
static DEFINE_MUTEX(bridge_lock);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 30cc59fe6ef7..f19d9acbe959 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -31,7 +31,7 @@
#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2ba257b1ae20..3bc782b630b9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -65,6 +65,14 @@
* support can instead use e.g. drm_helper_hpd_irq_event().
*/
+/*
+ * Global connector list for drm_connector_find_by_fwnode().
+ * Note drm_connector_[un]register() first take connector->lock and then
+ * take the connector_list_lock.
+ */
+static DEFINE_MUTEX(connector_list_lock);
+static LIST_HEAD(connector_list);
+
struct drm_conn_prop_enum_list {
int type;
const char *name;
@@ -267,6 +275,7 @@ int drm_connector_init(struct drm_device *dev,
goto out_put_type_id;
}
+ INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
@@ -474,6 +483,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
+ fwnode_handle_put(connector->fwnode);
+ connector->fwnode = NULL;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
@@ -532,6 +543,9 @@ int drm_connector_register(struct drm_connector *connector)
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(connector->dev);
+ mutex_lock(&connector_list_lock);
+ list_add_tail(&connector->global_connector_list_entry, &connector_list);
+ mutex_unlock(&connector_list_lock);
goto unlock;
err_debugfs:
@@ -560,6 +574,10 @@ void drm_connector_unregister(struct drm_connector *connector)
return;
}
+ mutex_lock(&connector_list_lock);
+ list_del_init(&connector->global_connector_list_entry);
+ mutex_unlock(&connector_list_lock);
+
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -1602,7 +1620,7 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
* connectors.
*
* Atomic drivers should use drm_connector_attach_scaling_mode_property()
- * instead to correctly assign &drm_connector_state.picture_aspect_ratio
+ * instead to correctly assign &drm_connector_state.scaling_mode
* in the atomic state.
*/
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
@@ -1722,7 +1740,7 @@ EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
*
* This is used to add support for scaling mode to atomic drivers.
- * The scaling mode will be set to &drm_connector_state.picture_aspect_ratio
+ * The scaling mode will be set to &drm_connector_state.scaling_mode
* and can be used from &drm_connector_helper_funcs->atomic_check for validation.
*
* This is the atomic version of drm_mode_create_scaling_mode_property().
@@ -2543,6 +2561,67 @@ out:
return ret;
}
+/**
+ * drm_connector_find_by_fwnode - Find a connector based on the associated fwnode
+ * @fwnode: fwnode for which to find the matching drm_connector
+ *
+ * This functions looks up a drm_connector based on its associated fwnode. When
+ * a connector is found a reference to the connector is returned. The caller must
+ * call drm_connector_put() to release this reference when it is done with the
+ * connector.
+ *
+ * Returns: A reference to the found connector or an ERR_PTR().
+ */
+struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct drm_connector *connector, *found = ERR_PTR(-ENODEV);
+
+ if (!fwnode)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&connector_list_lock);
+
+ list_for_each_entry(connector, &connector_list, global_connector_list_entry) {
+ if (connector->fwnode == fwnode ||
+ (connector->fwnode && connector->fwnode->secondary == fwnode)) {
+ drm_connector_get(connector);
+ found = connector;
+ break;
+ }
+ }
+
+ mutex_unlock(&connector_list_lock);
+
+ return found;
+}
+
+/**
+ * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
+ * @connector: connector to report the event on
+ *
+ * On some hardware a hotplug event notification may come from outside the display
+ * driver / device. An example of this is some USB Type-C setups where the hardware
+ * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
+ * status bit to the GPU's DP HPD pin.
+ *
+ * This function can be used to report these out-of-band events after obtaining
+ * a drm_connector reference through calling drm_connector_find_by_fwnode().
+ */
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
+{
+ struct drm_connector *connector;
+
+ connector = drm_connector_find_by_fwnode(connector_fwnode);
+ if (IS_ERR(connector))
+ return;
+
+ if (connector->funcs->oob_hotplug_event)
+ connector->funcs->oob_hotplug_event(connector);
+
+ drm_connector_put(connector);
+}
+EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
+
/**
* DOC: Tile group
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index edb772947cb4..63279e984342 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -58,6 +58,7 @@ struct drm_property;
struct edid;
struct kref;
struct work_struct;
+struct fwnode_handle;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -186,6 +187,7 @@ int drm_connector_set_obj_prop(struct drm_mode_object *obj,
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
+struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode);
/* IOCTL */
int drm_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 6d0f2c447f3b..4d0d1e8e51fa 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -130,6 +130,20 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+/* DP 2.0 128b/132b */
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT :
+ DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
+
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
unsigned int lane)
{
@@ -207,15 +221,33 @@ EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
- /* Spec says link_bw = link_rate / 0.27Gbps */
- return link_rate / 27000;
+ switch (link_rate) {
+ case 1000000:
+ return DP_LINK_BW_10;
+ case 1350000:
+ return DP_LINK_BW_13_5;
+ case 2000000:
+ return DP_LINK_BW_20;
+ default:
+ /* Spec says link_bw = link_rate / 0.27Gbps */
+ return link_rate / 27000;
+ }
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
- /* Spec says link_rate = link_bw * 0.27Gbps */
- return link_bw * 27000;
+ switch (link_bw) {
+ case DP_LINK_BW_10:
+ return 1000000;
+ case DP_LINK_BW_13_5:
+ return 1350000;
+ case DP_LINK_BW_20:
+ return 2000000;
+ default:
+ /* Spec says link_rate = link_bw * 0.27Gbps */
+ return link_bw * 27000;
+ }
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
@@ -590,7 +622,7 @@ static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- u8 dpcd_ext[6];
+ u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
int ret;
/*
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 86d13d6bc463..571da0c2f39f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3355,6 +3355,10 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
/**
* drm_dp_update_payload_part1() - Execute payload update part 1
* @mgr: manager to use.
+ * @start_slot: this is the cur slot
+ *
+ * NOTE: start_slot is a temporary workaround for non-atomic drivers,
+ * this will be removed when non-atomic mst helpers are moved out of the helper
*
* This iterates over all proposed virtual channels, and tries to
* allocate space in the link for them. For 0->slots transitions,
@@ -3365,12 +3369,12 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
* after calling this the driver should generate ACT and payload
* packets.
*/
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
{
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
int i, j;
- int cur_slots = 1;
+ int cur_slots = start_slot;
bool skip;
mutex_lock(&mgr->payload_lock);
@@ -4334,10 +4338,6 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
{
int ret;
- /* max. time slots - one slot for MTP header */
- if (slots > 63)
- return -ENOSPC;
-
vcpi->pbn = pbn;
vcpi->aligned_pbn = slots * mgr->pbn_div;
vcpi->num_slots = slots;
@@ -4510,6 +4510,27 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
/**
+ * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
+ * @mst_state: mst_state to update
+ * @link_encoding_cap: the ecoding format on the link
+ */
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
+{
+ if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
+ mst_state->total_avail_slots = 64;
+ mst_state->start_slot = 0;
+ } else {
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+ }
+
+ DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
+ (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
+ mst_state);
+}
+EXPORT_SYMBOL(drm_dp_mst_update_slots);
+
+/**
* drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
* @mgr: manager for this port
* @port: port to allocate a virtual channel for.
@@ -4540,7 +4561,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
if (ret) {
- drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n",
+ drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
drm_dp_mst_topology_put_port(port);
goto out;
@@ -5228,7 +5249,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state)
{
struct drm_dp_vcpi_allocation *vcpi;
- int avail_slots = 63, payload_count = 0;
+ int avail_slots = mst_state->total_avail_slots, payload_count = 0;
list_for_each_entry(vcpi, &mst_state->vcpis, next) {
/* Releasing VCPI is always OK-even if the port is gone */
@@ -5257,7 +5278,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
}
}
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
- mgr, mst_state, avail_slots, 63 - avail_slots);
+ mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
return 0;
}
@@ -5534,6 +5555,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (mst_state == NULL)
return -ENOMEM;
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+
mst_state->mgr = mgr;
INIT_LIST_HEAD(&mst_state->vcpis);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ea9a79bc9583..12893e7be89b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -28,6 +28,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/bitfield.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -49,6 +50,11 @@
(((edid)->version > (maj)) || \
((edid)->version == (maj) && (edid)->revision > (min)))
+static int oui(u8 first, u8 second, u8 third)
+{
+ return (first << 16) | (second << 8) | third;
+}
+
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
@@ -100,122 +106,128 @@ struct detailed_mode_closure {
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
+#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
+{ \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .quirks = _quirks \
+}
+
static const struct edid_quirk {
- char vendor[4];
- int product_id;
+ u32 panel_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
/* Acer F51 */
- { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
/* Belinea 10 15 55 */
- { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
- { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
/* Envision Peripherals, Inc. EN-7100e */
- { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
/* Envision EN2028 */
- { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
/* Funai Electronics PM36B */
- { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM },
+ EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+ EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
/* LG Philips LCD LP154W01-A5 */
- { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
/* Samsung SyncMaster 205BW. Note: irony */
- { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
/* Samsung SyncMaster 22[5-6]BW */
- { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
- { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
/* ViewSonic VA2026w */
- { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+ EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
/* Medion MD 30217 PG */
- { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
/* Lenovo G50 */
- { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+ EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
/* Valve Index Headset */
- { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
/* HTC Vive and Vive Pro VR Headsets */
- { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
- { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
/* Windows Mixed Reality Headsets */
- { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
- { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
- { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
- { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
- { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
- { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
- { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
- { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'P', 'N', 0x3515, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('L', 'E', 'N', 0xb800, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
/* Sony PlayStation VR Headset */
- { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
/* Sensics VR Headsets */
- { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
/* OSVR HDK and HDK2 VR Headsets */
- { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
};
/*
@@ -1914,6 +1926,45 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
+static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data)
+{
+ int *null_edid_counter = connector ? &connector->null_edid_counter : NULL;
+ bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL;
+ void *edid;
+ int i;
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (edid == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(edid, 0, false, edid_corrupt))
+ break;
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
+ if (null_edid_counter)
+ (*null_edid_counter)++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ return edid;
+
+carp:
+ if (connector)
+ connector_bad_edid(connector, edid, 1);
+out:
+ kfree(edid);
+ return NULL;
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1947,25 +1998,11 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
if (override)
return override;
- if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ edid = (u8 *)drm_do_get_edid_base_block(connector, get_edid_block, data);
+ if (!edid)
return NULL;
- /* base block fetch */
- for (i = 0; i < 4; i++) {
- if (get_edid_block(data, edid, 0, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(edid, 0, false,
- &connector->edid_corrupt))
- break;
- if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
- connector->null_edid_counter++;
- goto carp;
- }
- }
- if (i == 4)
- goto carp;
-
- /* if there's no extensions, we're done */
+ /* if there's no extensions or no connector, we're done */
valid_extensions = edid[0x7e];
if (valid_extensions == 0)
return (struct edid *)edid;
@@ -2019,8 +2056,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
return (struct edid *)edid;
-carp:
- connector_bad_edid(connector, edid, 1);
out:
kfree(edid);
return NULL;
@@ -2069,6 +2104,71 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+static u32 edid_extract_panel_id(const struct edid *edid)
+{
+ /*
+ * We represent the ID as a 32-bit number so it can easily be compared
+ * with "==".
+ *
+ * NOTE that we deal with endianness differently for the top half
+ * of this ID than for the bottom half. The bottom half (the product
+ * id) gets decoded as little endian by the EDID_PRODUCT_ID because
+ * that's how everyone seems to interpret it. The top half (the mfg_id)
+ * gets stored as big endian because that makes
+ * drm_edid_encode_panel_id() and drm_edid_decode_panel_id() easier
+ * to write (it's easier to extract the ASCII). It doesn't really
+ * matter, though, as long as the number here is unique.
+ */
+ return (u32)edid->mfg_id[0] << 24 |
+ (u32)edid->mfg_id[1] << 16 |
+ (u32)EDID_PRODUCT_ID(edid);
+}
+
+/**
+ * drm_edid_get_panel_id - Get a panel's ID through DDC
+ * @adapter: I2C adapter to use for DDC
+ *
+ * This function reads the first block of the EDID of a panel and (assuming
+ * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
+ * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
+ * supposed to be different for each different modem of panel.
+ *
+ * This function is intended to be used during early probing on devices where
+ * more than one panel might be present. Because of its intended use it must
+ * assume that the EDID of the panel is correct, at least as far as the ID
+ * is concerned (in other words, we don't process any overrides here).
+ *
+ * NOTE: it's expected that this function and drm_do_get_edid() will both
+ * be read the EDID, but there is no caching between them. Since we're only
+ * reading the first block, hopefully this extra overhead won't be too big.
+ *
+ * Return: A 32-bit ID that should be different for each make/model of panel.
+ * See the functions drm_edid_encode_panel_id() and
+ * drm_edid_decode_panel_id() for some details on the structure of this
+ * ID.
+ */
+
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ u32 panel_id;
+
+ edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter);
+
+ /*
+ * There are no manufacturer IDs of 0, so if there is a problem reading
+ * the EDID then we'll just return 0.
+ */
+ if (!edid)
+ return 0;
+
+ panel_id = edid_extract_panel_id(edid);
+ kfree(edid);
+
+ return panel_id;
+}
+EXPORT_SYMBOL(drm_edid_get_panel_id);
+
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
* @connector: connector we're probing
@@ -2113,25 +2213,6 @@ EXPORT_SYMBOL(drm_edid_duplicate);
/*** EDID parsing ***/
/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(const struct edid *edid, const char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
*
@@ -2139,14 +2220,13 @@ static bool edid_vendor(const struct edid *edid, const char *vendor)
*/
static u32 edid_get_quirks(const struct edid *edid)
{
+ u32 panel_id = edid_extract_panel_id(edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ if (quirk->panel_id == panel_id)
return quirk->quirks;
}
@@ -4122,32 +4202,24 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
- int hdmi_id;
-
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 5)
return false;
- hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
-
- return hdmi_id == HDMI_IEEE_OUI;
+ return oui(db[3], db[2], db[1]) == HDMI_IEEE_OUI;
}
static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
{
- unsigned int oui;
-
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 7)
return false;
- oui = db[3] << 16 | db[2] << 8 | db[1];
-
- return oui == HDMI_FORUM_IEEE_OUI;
+ return oui(db[3], db[2], db[1]) == HDMI_FORUM_IEEE_OUI;
}
static bool cea_db_is_vcdb(const u8 *db)
@@ -5157,6 +5229,71 @@ void drm_get_monitor_range(struct drm_connector *connector,
info->monitor_range.max_vfreq);
}
+static void drm_parse_vesa_mso_data(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ struct displayid_vesa_vendor_specific_block *vesa =
+ (struct displayid_vesa_vendor_specific_block *)block;
+ struct drm_display_info *info = &connector->display_info;
+
+ if (block->num_bytes < 3) {
+ drm_dbg_kms(connector->dev, "Unexpected vendor block size %u\n",
+ block->num_bytes);
+ return;
+ }
+
+ if (oui(vesa->oui[0], vesa->oui[1], vesa->oui[2]) != VESA_IEEE_OUI)
+ return;
+
+ if (sizeof(*vesa) != sizeof(*block) + block->num_bytes) {
+ drm_dbg_kms(connector->dev, "Unexpected VESA vendor block size\n");
+ return;
+ }
+
+ switch (FIELD_GET(DISPLAYID_VESA_MSO_MODE, vesa->mso)) {
+ default:
+ drm_dbg_kms(connector->dev, "Reserved MSO mode value\n");
+ fallthrough;
+ case 0:
+ info->mso_stream_count = 0;
+ break;
+ case 1:
+ info->mso_stream_count = 2; /* 2 or 4 links */
+ break;
+ case 2:
+ info->mso_stream_count = 4; /* 4 links */
+ break;
+ }
+
+ if (!info->mso_stream_count) {
+ info->mso_pixel_overlap = 0;
+ return;
+ }
+
+ info->mso_pixel_overlap = FIELD_GET(DISPLAYID_VESA_MSO_OVERLAP, vesa->mso);
+ if (info->mso_pixel_overlap > 8) {
+ drm_dbg_kms(connector->dev, "Reserved MSO pixel overlap value %u\n",
+ info->mso_pixel_overlap);
+ info->mso_pixel_overlap = 8;
+ }
+
+ drm_dbg_kms(connector->dev, "MSO stream count %u, pixel overlap %u\n",
+ info->mso_stream_count, info->mso_pixel_overlap);
+}
+
+static void drm_update_mso(struct drm_connector *connector, const struct edid *edid)
+{
+ const struct displayid_block *block;
+ struct displayid_iter iter;
+
+ displayid_iter_edid_begin(edid, &iter);
+ displayid_iter_for_each(block, &iter) {
+ if (block->tag == DATA_BLOCK_2_VENDOR_SPECIFIC)
+ drm_parse_vesa_mso_data(connector, block);
+ }
+ displayid_iter_end(&iter);
+}
+
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
@@ -5180,6 +5317,9 @@ drm_reset_display_info(struct drm_connector *connector)
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
+
+ info->mso_stream_count = 0;
+ info->mso_pixel_overlap = 0;
}
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -5258,6 +5398,9 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+
+ drm_update_mso(connector, edid);
+
return quirks;
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 5231104b1498..69fde60e36b3 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -135,6 +135,56 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_fb_swab);
+static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int pixels)
+{
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf[x]);
+ dbuf[x] = ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
+ * @dst: RGB332 destination buffer
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB332 devices that don't natively support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination is a small buffer
+ * containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t width = drm_rect_width(clip);
+ size_t src_len = width * sizeof(u32);
+ unsigned int y;
+ void *sbuf;
+
+ /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ src += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < drm_rect_height(clip); y++) {
+ memcpy(sbuf, src, src_len);
+ drm_fb_xrgb8888_to_rgb332_line(dst, sbuf, width);
+ src += fb->pitches[0];
+ dst += width;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
+
static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
unsigned int pixels,
bool swab)
@@ -251,6 +301,44 @@ static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
}
/**
+ * drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
+ * @dst: RGB888 destination buffer
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB888 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t width = drm_rect_width(clip);
+ size_t src_len = width * sizeof(u32);
+ unsigned int y;
+ void *sbuf;
+
+ /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ src += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < drm_rect_height(clip); y++) {
+ memcpy(sbuf, src, src_len);
+ drm_fb_xrgb8888_to_rgb888_line(dst, sbuf, width);
+ src += fb->pitches[0];
+ dst += width * 3;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+
+/**
* drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
* @dst: RGB565 destination buffer (iomem)
* @dst_pitch: destination buffer pitch
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index eda832f9200d..25837b1d6639 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -133,6 +133,9 @@ const struct drm_format_info *__drm_format_info(u32 format)
{
static const struct drm_format_info formats[] = {
{ .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 3c75d79dbb65..746fd8c73845 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -6,6 +6,7 @@
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
@@ -17,6 +18,8 @@
#include "drm_internal.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
#define AFBC_HEADER_SIZE 16
#define AFBC_TH_LAYOUT_ALIGNMENT 8
#define AFBC_HDR_ALIGN 64
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a61946374c82..7b9f69f21f1e 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,6 +10,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -17,6 +21,8 @@
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
+MODULE_IMPORT_NS(DMA_BUF);
+
/**
* DOC: overview
*
@@ -162,6 +168,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
return PTR_ERR(pages);
}
+ /*
+ * TODO: Allocating WC pages which are correctly flushed is only
+ * supported on x86. Ideal solution would be a GFP_WC flag, which also
+ * ttm_pool.c could use.
+ */
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
+#endif
+
shmem->pages = pages;
return 0;
@@ -203,6 +219,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
if (--shmem->pages_use_count > 0)
return;
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+#endif
+
drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
@@ -542,7 +563,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
} else {
page = shmem->pages[page_offset];
- ret = vmf_insert_page(vma, vmf->address, page);
+ ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
mutex_unlock(&shmem->pages_lock);
@@ -612,7 +633,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return ret;
}
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 43cf7e887d1a..bfa386b98134 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
- ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
kfree(tt);
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index be4a52dc4d6f..8b8744dcf691 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
-/**
- * drm_ioctl_permit - Check ioctl permissions against caller
- *
- * @flags: ioctl permission flags.
- * @file_priv: Pointer to struct drm_file identifying the caller.
- *
- * Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions.
- *
- * Returns:
- * Zero if allowed, -EACCES otherwise.
- */
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
-EXPORT_SYMBOL(drm_ioctl_permit);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \
@@ -725,7 +712,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls)
/**
* DOC: driver specific ioctls
@@ -834,8 +821,8 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
- if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
- return -ENOTTY;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+ return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index f933da1656eb..47e92400548d 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- /*
- * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable
- * console.
- */
- if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
- IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
- !IS_ENABLED(CONFIG_EXPERT))
- request_module_nowait("fbcon");
-
return drm_dp_aux_dev_init();
}
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index dee4f24a1808..d72c2fac0ff1 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -489,12 +489,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
- /* need some objects */
- if (cl->object_count == 0) {
- DRM_DEBUG_LEASE("no objects in lease\n");
- return -EINVAL;
- }
-
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
DRM_DEBUG_LEASE("invalid flags\n");
return -EINVAL;
@@ -510,23 +504,26 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count = cl->object_count;
- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
- array_size(object_count, sizeof(__u32)));
- if (IS_ERR(object_ids)) {
- ret = PTR_ERR(object_ids);
- goto out_lessor;
- }
-
+ /* Handle leased objects, if any */
idr_init(&leases);
+ if (object_count != 0) {
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+ array_size(object_count, sizeof(__u32)));
+ if (IS_ERR(object_ids)) {
+ ret = PTR_ERR(object_ids);
+ idr_destroy(&leases);
+ goto out_lessor;
+ }
- /* fill and validate the object idr */
- ret = fill_object_idr(dev, lessor_priv, &leases,
- object_count, object_ids);
- kfree(object_ids);
- if (ret) {
- DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
- idr_destroy(&leases);
- goto out_lessor;
+ /* fill and validate the object idr */
+ ret = fill_object_idr(dev, lessor_priv, &leases,
+ object_count, object_ids);
+ kfree(object_ids);
+ if (ret) {
+ DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
+ idr_destroy(&leases);
+ goto out_lessor;
+ }
}
/* Allocate a file descriptor for the lease */
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5dd475e82995..18cef04df2f2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -246,6 +246,52 @@ void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_device_unregister);
+static void devm_mipi_dsi_device_unregister(void *arg)
+{
+ struct mipi_dsi_device *dsi = arg;
+
+ mipi_dsi_device_unregister(dsi);
+}
+
+/**
+ * devm_mipi_dsi_device_register_full - create a managed MIPI DSI device
+ * @dev: device to tie the MIPI-DSI device lifetime to
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * This is the managed version of mipi_dsi_device_register_full() which
+ * automatically calls mipi_dsi_device_unregister() when @dev is
+ * unbound.
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev,
+ struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info)
+{
+ struct mipi_dsi_device *dsi;
+ int ret;
+
+ dsi = mipi_dsi_device_register_full(host, info);
+ if (IS_ERR(dsi))
+ return dsi;
+
+ ret = devm_add_action_or_reset(dev,
+ devm_mipi_dsi_device_unregister,
+ dsi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dsi;
+}
+EXPORT_SYMBOL_GPL(devm_mipi_dsi_device_register_full);
+
static DEFINE_MUTEX(host_lock);
static LIST_HEAD(host_list);
@@ -345,6 +391,41 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_detach);
+static void devm_mipi_dsi_detach(void *arg)
+{
+ struct mipi_dsi_device *dsi = arg;
+
+ mipi_dsi_detach(dsi);
+}
+
+/**
+ * devm_mipi_dsi_attach - Attach a MIPI-DSI device to its DSI Host
+ * @dev: device to tie the MIPI-DSI device attachment lifetime to
+ * @dsi: DSI peripheral
+ *
+ * This is the managed version of mipi_dsi_attach() which automatically
+ * calls mipi_dsi_detach() when @dev is unbound.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure.
+ */
+int devm_mipi_dsi_attach(struct device *dev,
+ struct mipi_dsi_device *dsi)
+{
+ int ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_mipi_dsi_detach, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_mipi_dsi_attach);
+
static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
struct mipi_dsi_msg *msg)
{
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index fcfe1a03c4a1..bf8a6e823a15 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -248,7 +248,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
- if (!ww_mutex_trylock(&lock->mutex))
+ if (!ww_mutex_trylock(&lock->mutex, NULL))
return -EBUSY;
else
return 0;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 997b8827fed2..37c34146eea8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -231,6 +231,9 @@ EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
* return either the associated struct drm_panel or drm_bridge device. Either
* @panel or @bridge must not be NULL.
*
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_of_get_bridge() instead.
+ *
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_find_panel_or_bridge(const struct device_node *np,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index f6bdec7fa925..a9359878f4ed 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
+ .width = 1280,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -134,6 +140,26 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO 2021 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Chuwi HiBook (CWI514) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Chuwi Hi10 Pro (CWI529) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -185,6 +211,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 3 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
@@ -193,6 +225,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
+ }, { /* KD Kurio Smart C15200 2-in-1 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
@@ -211,10 +250,15 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* Lenovo Ideapad D330 */
+ }, { /* Lenovo Ideapad D330-10IGM (HD) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
@@ -225,6 +269,19 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
+ }, { /* Samsung GalaxyBook 10.6 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
+ },
+ .driver_data = (void *)&lcd1280x1920_rightside_up,
+ }, { /* Valve Steam Deck */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index deb23dbec8b5..d8ba95744410 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -29,6 +29,7 @@
#include <linux/export.h>
#include <linux/dma-buf.h>
#include <linux/rbtree.h>
+#include <linux/module.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
@@ -39,6 +40,8 @@
#include "drm_internal.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
/**
* DOC: overview and lifetime rules
*
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 5606bca3caa8..61d5c57f23e1 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -795,6 +795,86 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+static bool check_connector_changed(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+ u64 old_epoch_counter;
+
+ /* Only handle HPD capable connectors. */
+ drm_WARN_ON(dev, !(connector->polled & DRM_CONNECTOR_POLL_HPD));
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
+
+ old_status = connector->status;
+ old_epoch_counter = connector->epoch_counter;
+ connector->status = drm_helper_probe_detect(connector, NULL, false);
+
+ if (old_epoch_counter == connector->epoch_counter) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Same epoch counter %llu\n",
+ connector->base.id,
+ connector->name,
+ connector->epoch_counter);
+
+ return false;
+ }
+
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Changed epoch counter %llu => %llu\n",
+ connector->base.id,
+ connector->name,
+ old_epoch_counter,
+ connector->epoch_counter);
+
+ return true;
+}
+
+/**
+ * drm_connector_helper_hpd_irq_event - hotplug processing
+ * @connector: drm_connector
+ *
+ * Drivers can use this helper function to run a detect cycle on a connector
+ * which has the DRM_CONNECTOR_POLL_HPD flag set in its &polled member.
+ *
+ * This helper function is useful for drivers which can track hotplug
+ * interrupts for a single connector. Drivers that want to send a
+ * hotplug event for all connectors or can't track hotplug interrupts
+ * per connector need to use drm_helper_hpd_irq_event().
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ *
+ * Note that a connector can be both polled and probed from the hotplug
+ * handler, in case the hotplug interrupt is known to be unreliable.
+ *
+ * Returns:
+ * A boolean indicating whether the connector status changed or not
+ */
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ bool changed;
+
+ mutex_lock(&dev->mode_config.mutex);
+ changed = check_connector_changed(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ drm_kms_helper_hotplug_event(dev);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n",
+ connector->base.id,
+ connector->name);
+ }
+
+ return changed;
+}
+EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event);
+
/**
* drm_helper_hpd_irq_event - hotplug processing
* @dev: drm_device
@@ -808,23 +888,25 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
* interrupts for each connector.
*
* Drivers which support hotplug interrupts for each connector individually and
- * which have a more fine-grained detect logic should bypass this code and
- * directly call drm_kms_helper_hotplug_event() in case the connector state
- * changed.
+ * which have a more fine-grained detect logic can use
+ * drm_connector_helper_hpd_irq_event(). Alternatively, they should bypass this
+ * code and directly call drm_kms_helper_hotplug_event() in case the connector
+ * state changed.
*
* This function must be called from process context with no mode
* setting locks held.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
+ *
+ * Returns:
+ * A boolean indicating whether the connector status changed or not
*/
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- enum drm_connector_status old_status;
bool changed = false;
- u64 old_epoch_counter;
if (!dev->mode_config.poll_enabled)
return false;
@@ -836,33 +918,8 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- old_status = connector->status;
-
- old_epoch_counter = connector->epoch_counter;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Old epoch counter %llu\n", connector->base.id,
- connector->name,
- old_epoch_counter);
-
- connector->status = drm_helper_probe_detect(connector, NULL, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] New epoch counter %llu\n",
- connector->base.id,
- connector->name,
- connector->epoch_counter);
-
- /*
- * Check if epoch counter had changed, meaning that we need
- * to send a uevent.
- */
- if (old_epoch_counter != connector->epoch_counter)
+ if (check_connector_changed(connector))
changed = true;
-
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 6c353c9dc772..dfec479830e4 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -127,8 +127,7 @@ struct drm_property *drm_property_create(struct drm_device *dev,
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_list);
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
- property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ strscpy_pad(property->name, name, DRM_PROP_NAME_LEN);
list_add_tail(&property->head, &dev->mode_config.property_list);
@@ -421,8 +420,7 @@ int drm_property_add_enum(struct drm_property *property,
if (!prop_enum)
return -ENOMEM;
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ strscpy_pad(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->value = value;
property->values[index] = value;
@@ -475,8 +473,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!property)
return -ENOENT;
- strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
- out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ strscpy_pad(out_resp->name, property->name, DRM_PROP_NAME_LEN);
out_resp->flags = property->flags;
value_count = property->num_values;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 968a9560b4aa..76ff6ec3421b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -10,6 +10,7 @@
* Copyright (c) 2003-2004 IBM Corp.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -50,8 +51,45 @@ static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
+static struct device_type drm_sysfs_device_connector = {
+ .name = "drm_connector",
+};
+
struct class *drm_class;
+#ifdef CONFIG_ACPI
+static bool drm_connector_acpi_bus_match(struct device *dev)
+{
+ return dev->type == &drm_sysfs_device_connector;
+}
+
+static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev)
+{
+ struct drm_connector *connector = to_drm_connector(dev);
+
+ return to_acpi_device_node(connector->fwnode);
+}
+
+static struct acpi_bus_type drm_connector_acpi_bus = {
+ .name = "drm_connector",
+ .match = drm_connector_acpi_bus_match,
+ .find_companion = drm_connector_acpi_find_companion,
+};
+
+static void drm_sysfs_acpi_register(void)
+{
+ register_acpi_bus_type(&drm_connector_acpi_bus);
+}
+
+static void drm_sysfs_acpi_unregister(void)
+{
+ unregister_acpi_bus_type(&drm_connector_acpi_bus);
+}
+#else
+static void drm_sysfs_acpi_register(void) { }
+static void drm_sysfs_acpi_unregister(void) { }
+#endif
+
static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -85,6 +123,8 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
+
+ drm_sysfs_acpi_register();
return 0;
}
@@ -97,11 +137,17 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
+ drm_sysfs_acpi_unregister();
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
}
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/*
* Connector properties
*/
@@ -273,27 +319,47 @@ static const struct attribute_group *connector_dev_groups[] = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct device *kdev;
+ int r;
if (connector->kdev)
return 0;
- connector->kdev =
- device_create_with_groups(drm_class, dev->primary->kdev, 0,
- connector, connector_dev_groups,
- "card%d-%s", dev->primary->index,
- connector->name);
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ device_initialize(kdev);
+ kdev->class = drm_class;
+ kdev->type = &drm_sysfs_device_connector;
+ kdev->parent = dev->primary->kdev;
+ kdev->groups = connector_dev_groups;
+ kdev->release = drm_sysfs_release;
+ dev_set_drvdata(kdev, connector);
+
+ r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name);
+ if (r)
+ goto err_free;
+
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
- if (IS_ERR(connector->kdev)) {
- DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
- return PTR_ERR(connector->kdev);
+ r = device_add(kdev);
+ if (r) {
+ drm_err(dev, "failed to register connector device: %d\n", r);
+ goto err_free;
}
+ connector->kdev = kdev;
+
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
return 0;
+
+err_free:
+ put_device(kdev);
+ return r;
}
void drm_sysfs_connector_remove(struct drm_connector *connector)
@@ -374,11 +440,6 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_status_event);
-static void drm_sysfs_release(struct device *dev)
-{
- kfree(dev);
-}
-
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 6d8bed9c739d..6788ea8490d1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -5,10 +5,13 @@
#include <drm/drm_prime.h>
#include <linux/dma-buf.h>
+#include <linux/module.h>
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
static struct lock_class_key etnaviv_prime_lock_class;
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index cc5b07f86346..242a5fd8b932 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1733,7 +1733,6 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
DBG("%s", dev_name(gpu->dev));
- flush_workqueue(gpu->wq);
destroy_workqueue(gpu->wq);
etnaviv_sched_fini(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index feb6da1b6ceb..180bb633d5c5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -163,6 +163,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
if (ret)
goto out_unlock;
+ drm_sched_job_arm(&submit->sched_job);
+
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
@@ -176,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
- drm_sched_entity_push_job(&submit->sched_job, sched_entity);
+ drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
mutex_unlock(&submit->gpu->fence_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4396224227d1..0a0c042a3155 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -9,6 +9,7 @@
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
#include <linux/shmem_fs.h>
+#include <linux/module.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
@@ -17,6 +18,8 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 9e90258541a4..46b9c0f13d6d 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -16,7 +16,7 @@
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static void do_gma_backlight_set(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
backlight_update_status(dev_priv->backlight_device);
}
#endif
@@ -24,7 +24,7 @@ static void do_gma_backlight_set(struct drm_device *dev)
void gma_backlight_enable(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
@@ -36,7 +36,7 @@ void gma_backlight_enable(struct drm_device *dev)
void gma_backlight_disable(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = false;
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = 0;
@@ -48,7 +48,7 @@ void gma_backlight_disable(struct drm_device *dev)
void gma_backlight_set(struct drm_device *dev, int v)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_level = v;
if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
dev_priv->backlight_device->props.brightness = v;
@@ -60,7 +60,7 @@ void gma_backlight_set(struct drm_device *dev, int v)
int gma_backlight_init(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
return dev_priv->ops->backlight_init(dev);
#else
@@ -71,7 +71,7 @@ int gma_backlight_init(struct drm_device *dev)
void gma_backlight_exit(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = 0;
backlight_update_status(dev_priv->backlight_device);
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1342e7fb382f..d7c6cca23e94 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -38,7 +38,7 @@ static void cdv_disable_vga(struct drm_device *dev)
static int cdv_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
drm_mode_create_scaling_mode_property(dev);
@@ -146,7 +146,7 @@ static const struct backlight_ops cdv_ops = {
static int cdv_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -206,7 +206,7 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
static void cdv_init_pm(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
int domain = pci_domain_nr(pdev->bus);
@@ -259,7 +259,7 @@ static void cdv_errata(struct drm_device *dev)
*/
static int cdv_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
@@ -314,7 +314,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
*/
static int cdv_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
@@ -383,7 +383,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
static int cdv_power_down(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
@@ -405,7 +405,7 @@ static int cdv_power_down(struct drm_device *dev)
static int cdv_power_up(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
@@ -429,7 +429,7 @@ static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
hotplug_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -440,7 +440,7 @@ static void cdv_hotplug_work_func(struct work_struct *work)
static int cdv_hotplug_event(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
schedule_work(&dev_priv->hotplug_work);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
return 1;
@@ -468,7 +468,7 @@ static const char *force_audio_names[] = {
void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
@@ -497,7 +497,7 @@ static const char *broadcast_rgb_names[] = {
void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
@@ -574,7 +574,7 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index c3a9f6b3c848..94ebc48a4349 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -455,7 +455,7 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -489,7 +489,7 @@ void cdv_disable_sr(struct drm_device *dev)
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
/* Is only one pipe enabled? */
@@ -574,7 +574,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -829,7 +829,7 @@ static void i8xx_clock(int refclk, struct gma_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -910,7 +910,7 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 595b765ecc71..ba6ad1466374 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -515,7 +515,7 @@ cdv_intel_dp_mode_valid(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
int max_lanes = cdv_intel_dp_max_lane_count(encoder);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -896,7 +896,7 @@ static bool
cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(encoder->dev);
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
int lane_count, clock;
@@ -988,7 +988,7 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
@@ -1744,7 +1744,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
if (is_edp(intel_encoder)) {
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
cdv_intel_edp_panel_vdd_off(intel_encoder);
if (ret) {
@@ -1809,7 +1809,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -1908,7 +1908,7 @@ static void cdv_intel_dp_add_properties(struct drm_connector *connector)
/* check the VBT to see whether the eDP is on DP-D port */
static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct child_device_config *p_child;
int i;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8a2219fcf9b4..9e1cdb11023c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -57,7 +57,7 @@ struct cdv_intel_lvds_priv {
*/
static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 retval;
if (gma_power_begin(dev, false)) {
@@ -81,7 +81,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
*/
static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
@@ -105,7 +105,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
static void cdv_intel_lvds_set_power(struct drm_device *dev,
struct drm_encoder *encoder, bool on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pp_status;
if (!gma_power_begin(dev, true))
@@ -154,7 +154,7 @@ static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *conn
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
@@ -180,7 +180,7 @@ static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
@@ -227,7 +227,7 @@ static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -245,7 +245,7 @@ static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
@@ -260,7 +260,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
u32 pfit_control;
@@ -297,7 +297,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
@@ -428,7 +428,7 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
static bool lvds_is_present_in_vbt(struct drm_device *dev,
u8 *i2c_pin)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (!dev_priv->child_dev_num)
@@ -486,7 +486,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan;
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 lvds;
int pipe;
u8 pin;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 0b8648396fb2..321e416489a9 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -81,7 +81,7 @@ static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct drm_framebuffer *fb = vma->vm_private_data;
struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int page_num;
int i;
@@ -261,7 +261,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
@@ -374,7 +374,7 @@ static int psbfb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned int fb_size;
int bytespp;
@@ -422,7 +422,7 @@ static int psb_fbdev_destroy(struct drm_device *dev,
int psb_fbdev_init(struct drm_device *dev)
{
struct drm_fb_helper *fb_helper;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
@@ -457,7 +457,7 @@ free:
static void psb_fbdev_fini(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!dev_priv->fb_helper)
return;
@@ -474,7 +474,7 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
static void psb_setup_outputs(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
@@ -533,7 +533,7 @@ static void psb_setup_outputs(struct drm_device *dev)
void psb_modeset_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
@@ -566,7 +566,7 @@ void psb_modeset_init(struct drm_device *dev)
void psb_modeset_cleanup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index fbf420051ef5..5ae54c9d2819 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -147,7 +147,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
- dev_priv = dev->dev_private;
+ dev_priv = to_drm_psb_private(dev);
r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 4c91e86f4b14..954f3a275d81 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -15,7 +15,7 @@ void gma_get_core_freq(struct drm_device *dev)
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index b03f7b8241f2..cbcecbaa041b 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -51,7 +51,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct gtt_range *gtt;
@@ -136,7 +136,7 @@ gma_pipe_set_base_exit:
void gma_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
int palreg = map->palette;
@@ -189,7 +189,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -324,7 +324,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -553,7 +553,7 @@ int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
if (!dev_priv->rpm_enabled)
@@ -572,7 +572,7 @@ int gma_crtc_set_config(struct drm_mode_set *set,
void gma_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
@@ -615,7 +615,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
void gma_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index df9b611b856a..55a2a6919533 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -53,7 +53,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
*/
static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long offset;
offset = r->resource.start - dev_priv->gtt_mem->start;
@@ -118,7 +118,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
*/
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 __iomem *gtt_slot;
u32 pte;
int i;
@@ -188,7 +188,7 @@ int psb_gtt_pin(struct gtt_range *gt)
{
int ret = 0;
struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -226,7 +226,7 @@ out:
void psb_gtt_unpin(struct gtt_range *gt)
{
struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -266,7 +266,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed, u32 align)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gtt_range *gt;
struct resource *r = dev_priv->gtt_mem;
int ret;
@@ -322,13 +322,13 @@ void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
static void psb_gtt_alloc(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
init_rwsem(&dev_priv->gtt.sem);
}
void psb_gtt_takedown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (dev_priv->gtt_map) {
@@ -347,7 +347,7 @@ void psb_gtt_takedown(struct drm_device *dev)
int psb_gtt_init(struct drm_device *dev, int resume)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
@@ -496,7 +496,7 @@ out_err:
int psb_gtt_restore(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct resource *r = dev_priv->gtt_mem->child;
struct gtt_range *range;
unsigned int restored = 0, total = 0, size = 0;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d838369f0119..d5ca5f241974 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -207,7 +207,7 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
if (!lvds_bl) {
- dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+ dev_err(dev_priv->dev.dev, "out of memory for backlight data\n");
return;
}
dev_priv->lvds_bl = lvds_bl;
@@ -248,7 +248,7 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
GFP_KERNEL);
if (panel_fixed_mode == NULL) {
- dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ dev_err(dev_priv->dev.dev, "out of memory for fixed panel mode\n");
return;
}
@@ -259,7 +259,7 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
drm_mode_debug_printmodeline(panel_fixed_mode);
} else {
- dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+ dev_dbg(dev_priv->dev.dev, "ignoring invalid LVDS VBT\n");
dev_priv->lvds_vbt = 0;
kfree(panel_fixed_mode);
}
@@ -515,7 +515,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
*/
int psb_intel_init_bios(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
@@ -579,7 +579,7 @@ int psb_intel_init_bios(struct drm_device *dev)
*/
void psb_intel_destroy_bios(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
kfree(dev_priv->sdvo_lvds_vbt_mode);
kfree(dev_priv->lfp_lvds_vbt_mode);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index c17cbafa468a..09cedabf4776 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -75,7 +75,7 @@ struct intel_gpio {
void
gma_intel_i2c_reset(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
GMBUS_REG_WRITE(GMBUS0, 0);
}
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = dev_priv->dev->dev;
+ gpio->adapter.dev.parent = dev_priv->dev.dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
@@ -226,7 +226,7 @@ intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
adapter);
int ret;
- gma_intel_i2c_reset(dev_priv->dev);
+ gma_intel_i2c_reset(&dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
set_data(gpio, 1);
@@ -394,7 +394,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
"reserved",
"dpd",
};
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret, i;
dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
@@ -432,7 +432,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
bus->force_bit = intel_gpio_create(dev_priv, i);
}
- gma_intel_i2c_reset(dev_priv->dev);
+ gma_intel_i2c_reset(&dev_priv->dev);
return 0;
@@ -480,7 +480,7 @@ void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
void gma_intel_teardown_gmbus(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (dev_priv->gmbus == NULL)
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 68e787924ed0..7e76790c6a81 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -18,7 +18,7 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
@@ -94,7 +94,7 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->dev.dev);
int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
@@ -106,8 +106,7 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
pci_dev_put(pci_gfx_root);
- dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
- dev_priv->platform_rev_id);
+ dev_dbg(dev_priv->dev.dev, "platform_rev_id is %x\n", dev_priv->platform_rev_id);
}
struct mid_vbt_header {
@@ -270,7 +269,7 @@ out:
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
@@ -325,7 +324,7 @@ out:
int mid_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mid_get_fuse_settings(dev);
mid_get_vbt_data(dev_priv);
mid_get_pci_revID(dev_priv);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index d856580b8111..fe9ace2a7967 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -66,7 +66,7 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (atomic_read(&driver->needs_tlbflush) || force) {
uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
@@ -94,7 +94,7 @@ static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
void psb_mmu_flush(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t val;
down_write(&driver->sem);
@@ -120,7 +120,7 @@ void psb_mmu_flush(struct psb_mmu_driver *driver)
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
struct drm_device *dev = pd->driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
@@ -230,7 +230,7 @@ void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_mmu_pt *pt;
int i;
@@ -409,7 +409,7 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd);
@@ -422,7 +422,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
atomic_t *msvdx_mmu_invaldc)
{
struct psb_mmu_driver *driver;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 129f87971002..c6b115954b7d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -82,7 +82,7 @@ static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
{
const struct gma_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
@@ -214,7 +214,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -361,7 +361,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
@@ -589,7 +589,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
int pipe = gma_crtc->pipe;
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 454156fcbec7..5c75eae630b5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -20,7 +20,7 @@
static int oaktrail_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->iLVDS_enable)
oaktrail_lvds_init(dev, &dev_priv->mode_dev);
else
@@ -51,7 +51,7 @@ static int oaktrail_brightness;
static int oaktrail_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int level = bd->props.brightness;
u32 blc_pwm_ctl;
u32 max_pwm_blc;
@@ -96,7 +96,7 @@ static int oaktrail_get_brightness(struct backlight_device *bd)
static int device_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
u16 bl_max_freq;
uint32_t value;
@@ -133,7 +133,7 @@ static const struct backlight_ops oaktrail_ops = {
static int oaktrail_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
struct backlight_properties props;
@@ -175,7 +175,7 @@ static int oaktrail_backlight_init(struct drm_device *dev)
*/
static int oaktrail_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = &regs->pipe[0];
int i;
@@ -289,7 +289,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
*/
static int oaktrail_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
@@ -404,7 +404,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
*/
static int oaktrail_power_down(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask ;
u32 pwr_sts;
@@ -428,7 +428,7 @@ static int oaktrail_power_down(struct drm_device *dev)
*/
static int oaktrail_power_up(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
u32 pwr_sts, pwr_cnt;
@@ -500,7 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
@@ -524,7 +524,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
static void oaktrail_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
gma_intel_teardown_gmbus(dev);
oaktrail_hdmi_teardown(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index a097a59a9eae..6eef60a5ac27 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -130,7 +130,7 @@ static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(HDMI_HCR, 0x67);
@@ -145,7 +145,7 @@ static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(0x51a8, 0x0);
@@ -264,7 +264,7 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int pipe = 1;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
@@ -494,7 +494,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
static int dpms_mode = -1;
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
@@ -529,7 +529,7 @@ oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
@@ -665,7 +665,7 @@ failed_connector:
void oaktrail_hdmi_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev;
struct oaktrail_hdmi_dev *hdmi_dev;
int ret;
@@ -718,7 +718,7 @@ out:
void oaktrail_hdmi_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct pci_dev *pdev;
@@ -735,7 +735,7 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
/* save HDMI register state */
void oaktrail_hdmi_save(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
@@ -788,7 +788,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* restore HDMI register state */
void oaktrail_hdmi_restore(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index f9b1f88c73bd..28b995ef2844 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -37,7 +37,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
bool on)
{
u32 pp_status;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!gma_power_begin(dev, true))
return;
@@ -83,7 +83,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector = NULL;
@@ -155,7 +155,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
@@ -171,7 +171,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
@@ -191,7 +191,7 @@ static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
@@ -215,7 +215,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct drm_display_mode *mode = NULL;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
mode_dev->panel_fixed_mode = NULL;
@@ -294,7 +294,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct edid *edid;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index 1d2dd6ea1c71..d1ae91fcd224 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -133,7 +133,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_i2c_chan *chan;
chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index a1ffc6a1c255..fef04ff8c3a9 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -147,7 +147,7 @@ static struct psb_intel_opregion *system_opregion;
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
struct backlight_device *bd = dev_priv->backlight_device;
@@ -190,7 +190,7 @@ static void psb_intel_opregion_asle_work(struct work_struct *work)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
+ asle_stat |= asle_set_backlight(&dev_priv->dev, asle->bclp);
asle->aslc = asle_stat;
@@ -198,7 +198,7 @@ static void psb_intel_opregion_asle_work(struct work_struct *work)
void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
@@ -211,7 +211,7 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
void psb_intel_opregion_enable_asle(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle && system_opregion ) {
@@ -258,7 +258,7 @@ static struct notifier_block psb_intel_opregion_notifier = {
void psb_intel_opregion_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -278,7 +278,7 @@ void psb_intel_opregion_init(struct drm_device *dev)
void psb_intel_opregion_fini(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -304,7 +304,7 @@ void psb_intel_opregion_fini(struct drm_device *dev)
int psb_intel_opregion_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 20ace6010f9f..d2a46d96e746 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(power_ctrl_lock); /* Serialize power claim */
*/
void gma_power_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* FIXME: Move APM/OSPM base into relevant device code */
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
@@ -82,7 +82,7 @@ void gma_power_uninit(struct drm_device *dev)
*/
static void gma_suspend_display(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->suspended)
return;
@@ -101,7 +101,7 @@ static void gma_suspend_display(struct drm_device *dev)
static void gma_resume_display(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* turn on the display power island */
dev_priv->ops->power_up(dev);
@@ -125,7 +125,7 @@ static void gma_resume_display(struct pci_dev *pdev)
static void gma_suspend_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int bsm, vbt;
if (dev_priv->suspended)
@@ -155,7 +155,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
static bool gma_resume_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
if (!dev_priv->suspended)
@@ -189,7 +189,7 @@ int gma_power_suspend(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mutex_lock(&power_mutex);
if (!dev_priv->suspended) {
@@ -234,7 +234,7 @@ int gma_power_resume(struct device *_dev)
*/
bool gma_power_is_on(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return dev_priv->display_power;
}
@@ -248,7 +248,7 @@ bool gma_power_is_on(struct drm_device *dev)
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
unsigned long flags;
@@ -288,7 +288,7 @@ out_false:
*/
void gma_power_end(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long flags;
spin_lock_irqsave(&power_ctrl_lock, flags);
dev_priv->display_count--;
@@ -310,7 +310,7 @@ int psb_runtime_resume(struct device *dev)
int psb_runtime_idle(struct device *dev)
{
struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
- struct drm_psb_private *dev_priv = drmdev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(drmdev);
if (dev_priv->display_count)
return 0;
else
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 951725a0f7a3..3030f18ba022 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -18,7 +18,7 @@
static int psb_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_intel_lvds_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
@@ -55,7 +55,7 @@ static int psb_get_brightness(struct backlight_device *bd)
static int psb_backlight_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
/* u32 bl_max_freq; */
/* unsigned long value; */
@@ -110,7 +110,7 @@ static const struct backlight_ops psb_ops = {
static int psb_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
struct backlight_properties props;
@@ -149,7 +149,7 @@ static int psb_backlight_init(struct drm_device *dev)
static void psb_init_pm(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
gating &= ~3; /* Disable 2D clock gating */
@@ -167,7 +167,7 @@ static void psb_init_pm(struct drm_device *dev)
*/
static int psb_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_crtc *crtc;
struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
@@ -205,7 +205,7 @@ static int psb_save_display_registers(struct drm_device *dev)
*/
static int psb_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_crtc *crtc;
struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
@@ -300,7 +300,7 @@ static const struct psb_offset psb_regmap[2] = {
static int psb_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->regmap = psb_regmap;
gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
@@ -311,7 +311,7 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 58bce1a60a4d..7a10bb39ef0b 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -124,7 +124,7 @@ void psb_spank(struct drm_psb_private *dev_priv)
static int psb_do_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
uint32_t stolen_gtt;
@@ -163,71 +163,74 @@ static int psb_do_init(struct drm_device *dev)
static void psb_driver_unload(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* TODO: Kill vblank etc here */
- if (dev_priv) {
- if (dev_priv->backlight_device)
- gma_backlight_exit(dev);
- psb_modeset_cleanup(dev);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
- if (dev_priv->ops->chip_teardown)
- dev_priv->ops->chip_teardown(dev);
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
- psb_intel_opregion_fini(dev);
+ psb_intel_opregion_fini(dev);
- if (dev_priv->pf_pd) {
- psb_mmu_free_pagedir(dev_priv->pf_pd);
- dev_priv->pf_pd = NULL;
- }
- if (dev_priv->mmu) {
- struct psb_gtt *pg = &dev_priv->gtt;
-
- down_read(&pg->sem);
- psb_mmu_remove_pfn_sequence(
- psb_mmu_get_default_pd
- (dev_priv->mmu),
- pg->mmu_gatt_start,
- dev_priv->vram_stolen_size >> PAGE_SHIFT);
- up_read(&pg->sem);
- psb_mmu_driver_takedown(dev_priv->mmu);
- dev_priv->mmu = NULL;
- }
- psb_gtt_takedown(dev);
- if (dev_priv->scratch_page) {
- set_pages_wb(dev_priv->scratch_page, 1);
- __free_page(dev_priv->scratch_page);
- dev_priv->scratch_page = NULL;
- }
- if (dev_priv->vdc_reg) {
- iounmap(dev_priv->vdc_reg);
- dev_priv->vdc_reg = NULL;
- }
- if (dev_priv->sgx_reg) {
- iounmap(dev_priv->sgx_reg);
- dev_priv->sgx_reg = NULL;
- }
- if (dev_priv->aux_reg) {
- iounmap(dev_priv->aux_reg);
- dev_priv->aux_reg = NULL;
- }
- pci_dev_put(dev_priv->aux_pdev);
- pci_dev_put(dev_priv->lpc_pdev);
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+ if (dev_priv->aux_reg) {
+ iounmap(dev_priv->aux_reg);
+ dev_priv->aux_reg = NULL;
+ }
+ pci_dev_put(dev_priv->aux_pdev);
+ pci_dev_put(dev_priv->lpc_pdev);
- /* Destroy VBT data */
- psb_intel_destroy_bios(dev);
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
- kfree(dev_priv);
- dev->dev_private = NULL;
- }
gma_power_uninit(dev);
}
+static void psb_device_release(void *data)
+{
+ struct drm_device *dev = data;
+
+ psb_driver_unload(dev);
+}
+
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct drm_psb_private *dev_priv;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long resource_start, resource_len;
unsigned long irqflags;
int ret = -ENOMEM;
@@ -235,14 +238,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
struct gma_encoder *gma_encoder;
struct psb_gtt *pg;
- /* allocating and initializing driver private data */
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
+ /* initializing driver private data */
dev_priv->ops = (struct psb_ops *)flags;
- dev_priv->dev = dev;
- dev->dev_private = (void *) dev_priv;
pg = &dev_priv->gtt;
@@ -409,8 +407,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
pm_runtime_enable(dev->dev);
pm_runtime_set_active(dev->dev);
#endif
- /* Intel drm driver load is done, continue doing pvr load */
- return 0;
+
+ return devm_add_action_or_reset(dev->dev, psb_device_release, dev);
+
out_err:
psb_driver_unload(dev);
return ret;
@@ -431,7 +430,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
static unsigned int runtime_allowed;
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
@@ -445,38 +444,30 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct drm_psb_private *dev_priv;
struct drm_device *dev;
int ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ dev_priv = devm_drm_dev_alloc(&pdev->dev, &driver, struct drm_psb_private, dev);
+ if (IS_ERR(dev_priv))
+ return PTR_ERR(dev_priv);
+ dev = &dev_priv->dev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
if (ret)
- goto err_drm_dev_put;
+ return ret;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_psb_driver_unload;
+ return ret;
return 0;
-
-err_psb_driver_unload:
- psb_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
}
static void psb_pci_remove(struct pci_dev *pdev)
@@ -484,8 +475,6 @@ static void psb_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- psb_driver_unload(dev);
- drm_dev_put(dev);
}
static const struct dev_pm_ops psb_pm_ops = {
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index f2bae270ca7b..0439b10d3db5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -389,7 +389,8 @@ struct psb_ops;
struct intel_scu_ipc_dev;
struct drm_psb_private {
- struct drm_device *dev;
+ struct drm_device dev;
+
struct pci_dev *aux_pdev; /* Currently only used by mrst */
struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
@@ -567,6 +568,10 @@ struct drm_psb_private {
uint8_t panel_type;
};
+static inline struct drm_psb_private *to_drm_psb_private(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_psb_private, dev);
+}
/* Operations for each board type */
struct psb_ops {
@@ -618,11 +623,6 @@ struct psb_ops {
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev);
-static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
-{
- return (struct drm_psb_private *) dev->dev_private;
-}
-
/* psb_irq.c */
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@@ -729,13 +729,13 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return ioread32(dev_priv->vdc_reg + reg);
}
static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return ioread32(dev_priv->aux_reg + reg);
}
@@ -761,14 +761,14 @@ static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite32((val), dev_priv->vdc_reg + (reg));
}
static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite32((val), dev_priv->aux_reg + (reg));
}
@@ -789,7 +789,7 @@ static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
static inline void REGISTER_WRITE16(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite16((val), dev_priv->vdc_reg + (reg));
}
@@ -798,7 +798,7 @@ static inline void REGISTER_WRITE16(struct drm_device *dev,
static inline void REGISTER_WRITE8(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite8((val), dev_priv->vdc_reg + (reg));
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 359606429316..f5f259fde88e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -95,7 +95,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = gma_crtc->pipe;
@@ -298,7 +298,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
@@ -380,7 +380,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -451,7 +451,7 @@ const struct gma_clock_funcs psb_clock_funcs = {
static void psb_intel_cursor_init(struct drm_device *dev,
struct gma_crtc *gma_crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
struct gtt_range *cursor_gt;
@@ -481,7 +481,7 @@ out:
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc;
int i;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ace95d4bdb6f..ac97e0d3c7dd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -59,7 +59,7 @@ struct psb_intel_lvds_priv {
*/
static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
@@ -88,8 +88,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
unsigned int level)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
u8 out_buf[2];
@@ -128,8 +127,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 max_pwm_blc;
u32 blc_pwm_duty_cycle;
@@ -161,7 +159,7 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
*/
void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_dbg(dev->dev, "backlight level is %d\n", level);
@@ -183,7 +181,7 @@ void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
*/
static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
@@ -208,7 +206,7 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
*/
static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
u32 pp_status;
@@ -254,8 +252,7 @@ static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
static void psb_intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
@@ -335,7 +332,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
@@ -365,7 +362,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
@@ -426,7 +423,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -444,7 +441,7 @@ static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
static void psb_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
@@ -459,7 +456,7 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pfit_control;
/*
@@ -493,7 +490,7 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
@@ -641,7 +638,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 lvds;
int pipe;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 355da2856389..042c4392e676 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1217,7 +1217,7 @@ psb_intel_sdvo_get_edid(struct drm_connector *connector)
static struct edid *
psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
@@ -1486,7 +1486,7 @@ static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct drm_display_mode *newmode;
/*
@@ -1570,7 +1570,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -1878,7 +1878,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi
static u8
psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (IS_SDVOB(sdvo_reg)) {
@@ -2415,7 +2415,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index deb1fbc1f748..ccf402007beb 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -76,12 +76,12 @@ psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
u32 reg = psb_pipestat(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
- if (gma_power_begin(dev_priv->dev, false)) {
+ if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal |= (mask | (mask >> 16));
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
- gma_power_end(dev_priv->dev);
+ gma_power_end(&dev_priv->dev);
}
}
}
@@ -92,12 +92,12 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = psb_pipestat(pipe);
dev_priv->pipestat[pipe] &= ~mask;
- if (gma_power_begin(dev_priv->dev, false)) {
+ if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal &= ~mask;
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
- gma_power_end(dev_priv->dev);
+ gma_power_end(&dev_priv->dev);
}
}
}
@@ -107,8 +107,7 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t pipe_stat_val = 0;
uint32_t pipe_stat_reg = psb_pipestat(pipe);
@@ -178,7 +177,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
*/
static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 val, addr;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
@@ -226,7 +225,7 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
static irqreturn_t psb_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
u32 sgx_stat_1, sgx_stat_2;
int handled = 0;
@@ -277,8 +276,7 @@ static irqreturn_t psb_irq_handler(int irq, void *arg)
void psb_irq_preinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -307,7 +305,7 @@ void psb_irq_preinstall(struct drm_device *dev)
void psb_irq_postinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
unsigned int i;
@@ -356,7 +354,7 @@ int psb_irq_install(struct drm_device *dev, unsigned int irq)
void psb_irq_uninstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long irqflags;
unsigned int i;
@@ -397,7 +395,7 @@ int psb_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
@@ -433,7 +431,7 @@ void psb_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index 97b0c52bfd8a..58a7fe392636 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -14,7 +14,7 @@
static void psb_lid_timer_func(struct timer_list *t)
{
struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
u32 __iomem *lid_state = dev_priv->opregion.lid_state;
diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig
index 1c8601bf4d91..9c1e61f9eec3 100644
--- a/drivers/gpu/drm/gud/Kconfig
+++ b/drivers/gpu/drm/gud/Kconfig
@@ -2,7 +2,7 @@
config DRM_GUD
tristate "GUD USB Display"
- depends on DRM && USB
+ depends on DRM && USB && MMU
select LZ4_COMPRESS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index eb4e08846da4..3f9d4b9a1e3d 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -523,7 +523,13 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
switch (format) {
case GUD_DRM_FORMAT_R1:
fallthrough;
+ case DRM_FORMAT_R8:
+ fallthrough;
case GUD_DRM_FORMAT_XRGB1111:
+ fallthrough;
+ case DRM_FORMAT_RGB332:
+ fallthrough;
+ case DRM_FORMAT_RGB888:
if (!xrgb8888_emulation_format)
xrgb8888_emulation_format = info;
break;
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 2a388e27d5d7..e351a1f1420d 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -80,10 +80,16 @@ static inline u8 gud_from_fourcc(u32 fourcc)
switch (fourcc) {
case GUD_DRM_FORMAT_R1:
return GUD_PIXEL_FORMAT_R1;
+ case DRM_FORMAT_R8:
+ return GUD_PIXEL_FORMAT_R8;
case GUD_DRM_FORMAT_XRGB1111:
return GUD_PIXEL_FORMAT_XRGB1111;
+ case DRM_FORMAT_RGB332:
+ return GUD_PIXEL_FORMAT_RGB332;
case DRM_FORMAT_RGB565:
return GUD_PIXEL_FORMAT_RGB565;
+ case DRM_FORMAT_RGB888:
+ return GUD_PIXEL_FORMAT_RGB888;
case DRM_FORMAT_XRGB8888:
return GUD_PIXEL_FORMAT_XRGB8888;
case DRM_FORMAT_ARGB8888:
@@ -98,10 +104,16 @@ static inline u32 gud_to_fourcc(u8 format)
switch (format) {
case GUD_PIXEL_FORMAT_R1:
return GUD_DRM_FORMAT_R1;
+ case GUD_PIXEL_FORMAT_R8:
+ return DRM_FORMAT_R8;
case GUD_PIXEL_FORMAT_XRGB1111:
return GUD_DRM_FORMAT_XRGB1111;
+ case GUD_PIXEL_FORMAT_RGB332:
+ return DRM_FORMAT_RGB332;
case GUD_PIXEL_FORMAT_RGB565:
return DRM_FORMAT_RGB565;
+ case GUD_PIXEL_FORMAT_RGB888:
+ return DRM_FORMAT_RGB888;
case GUD_PIXEL_FORMAT_XRGB8888:
return DRM_FORMAT_XRGB8888;
case GUD_PIXEL_FORMAT_ARGB8888:
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index b9b0e435ea0f..daf75c178c2b 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -189,8 +189,14 @@ retry:
ret = -ENOMEM;
goto end_cpu_access;
}
+ } else if (format->format == DRM_FORMAT_R8) {
+ drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, rect);
+ } else if (format->format == DRM_FORMAT_RGB332) {
+ drm_fb_xrgb8888_to_rgb332(buf, vaddr, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian());
+ } else if (format->format == DRM_FORMAT_RGB888) {
+ drm_fb_xrgb8888_to_rgb888(buf, vaddr, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index f960f5d7664e..84b6fc70cbf5 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -126,11 +126,23 @@ config DRM_I915_GVT_KVMGT
depends on DRM_I915_GVT
depends on KVM
depends on VFIO_MDEV
+ select KVM_EXTERNAL_WRITE_TRACKING
default n
help
Choose this option if you want to enable KVMGT support for
Intel GVT-g.
+config DRM_I915_PXP
+ bool "Enable Intel PXP support"
+ depends on DRM_I915
+ depends on INTEL_MEI && INTEL_MEI_PXP
+ default n
+ help
+ PXP (Protected Xe Path) is an i915 component, available on graphics
+ version 12 and newer GPUs, that helps to establish the hardware
+ protected session and manage the status of the alive software session,
+ as well as its life cycle.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 335ba9f43d8f..660bb03de6fc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,13 +13,11 @@
# will most likely get a sudden build breakage... Hopefully we will fix
# new warnings before CI updates!
subdir-ccflags-y := -Wall -Wextra
-subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
-subdir-ccflags-y += $(call cc-disable-warning, type-limits)
-subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+subdir-ccflags-y += -Wno-unused-parameter
+subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-sign-compare
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
-# clang warnings
-subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
@@ -49,13 +47,15 @@ i915-y += i915_drv.o \
intel_dram.o \
intel_memory_region.o \
intel_pch.o \
+ intel_pcode.o \
intel_pm.o \
intel_region_ttm.o \
intel_runtime_pm.o \
- intel_sideband.o \
+ intel_sbi.o \
intel_step.o \
intel_uncore.o \
intel_wakeref.o \
+ vlv_sideband.o \
vlv_suspend.o
# core library code
@@ -78,9 +78,6 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
gt-y += \
- gt/debugfs_engines.o \
- gt/debugfs_gt.o \
- gt/debugfs_gt_pm.o \
gt/gen2_engine_cs.o \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
@@ -100,8 +97,11 @@ gt-y += \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
gt/intel_gt_clock_utils.o \
+ gt/intel_gt_debugfs.o \
+ gt/intel_gt_engines_debugfs.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
+ gt/intel_gt_pm_debugfs.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
gt/intel_gtt.o \
@@ -154,6 +154,7 @@ gem-y += \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
+ gem/i915_gem_ttm_pm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
@@ -211,8 +212,11 @@ i915-y += \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
+ display/intel_dpt.o \
+ display/intel_drrs.o \
display/intel_dsb.o \
display/intel_fb.o \
+ display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
@@ -222,6 +226,7 @@ i915-y += \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
display/intel_overlay.o \
+ display/intel_plane_initial.o \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
@@ -247,6 +252,7 @@ i915-y += \
display/g4x_dp.o \
display/g4x_hdmi.o \
display/icl_dsi.o \
+ display/intel_backlight.o \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_ddi_buf_trans.o \
@@ -277,6 +283,16 @@ i915-y += \
i915-y += i915_perf.o
+# Protected execution platform (PXP) support
+i915-$(CONFIG_DRM_I915_PXP) += \
+ pxp/intel_pxp.o \
+ pxp/intel_pxp_cmd.o \
+ pxp/intel_pxp_debugfs.o \
+ pxp/intel_pxp_irq.o \
+ pxp/intel_pxp_pm.o \
+ pxp/intel_pxp_session.o \
+ pxp/intel_pxp_tee.o
+
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index de0f358184aa..dc41868d01ef 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,6 +7,7 @@
#include "g4x_dp.h"
#include "intel_audio.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -16,9 +17,8 @@
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_panel.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
struct dp_link_dpll {
int clock;
@@ -211,7 +211,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
@@ -251,7 +251,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
@@ -426,7 +426,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
- u32 DP = intel_dp->DP;
if (drm_WARN_ON(&dev_priv->drm,
(intel_de_read(dev_priv, intel_dp->output_reg) &
@@ -437,17 +436,17 @@ intel_dp_link_down(struct intel_encoder *encoder,
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- DP &= ~DP_LINK_TRAIN_MASK;
- DP |= DP_LINK_TRAIN_PAT_IDLE;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
/*
@@ -464,14 +463,14 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_dp->DP &= ~DP_PORT_EN;
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
@@ -481,8 +480,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
msleep(intel_dp->pps.panel_power_down_delay);
- intel_dp->DP = DP;
-
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t wakeref;
@@ -582,19 +579,18 @@ cpt_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 *DP = &intel_dp->DP;
- *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
default:
MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
@@ -611,19 +607,18 @@ g4x_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 *DP = &intel_dp->DP;
- *DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_2;
break;
default:
MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
@@ -642,7 +637,7 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
/* enable with pattern 1 (as per spec) */
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- DP_TRAINING_PATTERN_1);
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_1);
/*
* Magic for VLV/CHV. We _must_ first set up the register
@@ -813,10 +808,10 @@ static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
-static void vlv_set_signal_levels(struct intel_dp *intel_dp,
+static void vlv_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
u8 train_set = intel_dp->train_set[0];
@@ -899,10 +894,10 @@ static void vlv_set_signal_levels(struct intel_dp *intel_dp,
uniqtranscale_reg_value, 0);
}
-static void chv_set_signal_levels(struct intel_dp *intel_dp,
+static void chv_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 deemph_reg_value, margin_reg_value;
bool uniq_trans_scale = false;
u8 train_set = intel_dp->train_set[0];
@@ -1020,10 +1015,11 @@ static u32 g4x_signal_levels(u8 train_set)
}
static void
-g4x_set_signal_levels(struct intel_dp *intel_dp,
+g4x_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
@@ -1067,10 +1063,11 @@ static u32 snb_cpu_edp_signal_levels(u8 train_set)
}
static void
-snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
+snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
@@ -1118,10 +1115,11 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set)
}
static void
-ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
+ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
@@ -1334,7 +1332,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->sync_state = intel_dp_sync_state;
intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
- intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->update_pipe = intel_backlight_update;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->shutdown = intel_dp_encoder_shutdown;
if (IS_CHERRYVIEW(dev_priv)) {
@@ -1364,15 +1362,15 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->dp.set_link_train = g4x_set_link_train;
if (IS_CHERRYVIEW(dev_priv))
- dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ intel_encoder->set_signal_levels = chv_set_signal_levels;
else if (IS_VALLEYVIEW(dev_priv))
- dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ intel_encoder->set_signal_levels = vlv_set_signal_levels;
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels;
else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A)
- dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels;
else
- dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+ intel_encoder->set_signal_levels = g4x_set_signal_levels;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
(HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index be352e9f0afc..88c427f3c346 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -14,8 +14,8 @@
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_sideband.h"
#include "intel_sdvo.h"
+#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a3eae3f3eadc..168c84a74d30 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_mipi_dsi.h>
#include "intel_atomic.h"
+#include "intel_backlight.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_crtc.h"
@@ -54,20 +55,28 @@ static int payload_credits_available(struct drm_i915_private *dev_priv,
>> FREE_PLOAD_CREDIT_SHIFT;
}
-static void wait_for_header_credits(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static bool wait_for_header_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans, int hdr_credit)
{
if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
- MAX_HEADER_CREDIT, 100))
+ hdr_credit, 100)) {
drm_err(&dev_priv->drm, "DSI header credits not released\n");
+ return false;
+ }
+
+ return true;
}
-static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static bool wait_for_payload_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans, int payld_credit)
{
if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
- MAX_PLOAD_CREDIT, 100))
+ payld_credit, 100)) {
drm_err(&dev_priv->drm, "DSI payload credits not released\n");
+ return false;
+ }
+
+ return true;
}
static enum transcoder dsi_port_to_transcoder(enum port port)
@@ -90,8 +99,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for header/payload credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- wait_for_header_credits(dev_priv, dsi_trans);
- wait_for_payload_credits(dev_priv, dsi_trans);
+ wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
+ wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT);
}
/* send nop DCS command */
@@ -108,7 +117,7 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for header credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- wait_for_header_credits(dev_priv, dsi_trans);
+ wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
}
/* wait for LP TX in progress bit to be cleared */
@@ -120,54 +129,52 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
}
}
-static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
- u32 len)
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ const struct mipi_dsi_packet *packet)
{
struct intel_dsi *intel_dsi = host->intel_dsi;
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
- int free_credits;
+ const u8 *data = packet->payload;
+ u32 len = packet->payload_length;
int i, j;
+ /* payload queue can accept *256 bytes*, check limit */
+ if (len > MAX_PLOAD_CREDIT * 4) {
+ drm_err(&i915->drm, "payload size exceeds max queue limit\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < len; i += 4) {
u32 tmp = 0;
- free_credits = payload_credits_available(dev_priv, dsi_trans);
- if (free_credits < 1) {
- drm_err(&dev_priv->drm,
- "Payload credit not available\n");
- return false;
- }
+ if (!wait_for_payload_credits(i915, dsi_trans, 1))
+ return -EBUSY;
for (j = 0; j < min_t(u32, len - i, 4); j++)
tmp |= *data++ << 8 * j;
- intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp);
+ intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp);
}
- return true;
+ return 0;
}
static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
- struct mipi_dsi_packet pkt, bool enable_lpdt)
+ const struct mipi_dsi_packet *packet,
+ bool enable_lpdt)
{
struct intel_dsi *intel_dsi = host->intel_dsi;
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
u32 tmp;
- int free_credits;
- /* check if header credit available */
- free_credits = header_credits_available(dev_priv, dsi_trans);
- if (free_credits < 1) {
- drm_err(&dev_priv->drm,
- "send pkt header failed, not enough hdr credits\n");
- return -1;
- }
+ if (!wait_for_header_credits(dev_priv, dsi_trans, 1))
+ return -EBUSY;
tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
- if (pkt.payload)
+ if (packet->payload)
tmp |= PAYLOAD_PRESENT;
else
tmp &= ~PAYLOAD_PRESENT;
@@ -178,37 +185,15 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
tmp |= LP_DATA_TRANSFER;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
- tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
- tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
- tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
- tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT);
intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
return 0;
}
-static int dsi_send_pkt_payld(struct intel_dsi_host *host,
- struct mipi_dsi_packet pkt)
-{
- struct intel_dsi *intel_dsi = host->intel_dsi;
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- /* payload queue can accept *256 bytes*, check limit */
- if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
- drm_err(&i915->drm, "payload size exceeds max queue limit\n");
- return -1;
- }
-
- /* load data into command payload queue */
- if (!add_payld_to_queue(host, pkt.payload,
- pkt.payload_length)) {
- drm_err(&i915->drm, "adding payload to queue failed\n");
- return -1;
- }
-
- return 0;
-}
-
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -248,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
@@ -262,7 +247,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
tmp |= RTERM_SELECT(0x6);
intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
@@ -470,7 +455,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
@@ -485,7 +470,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp);
tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_LN0(phy));
+ ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
@@ -504,7 +489,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
@@ -525,7 +510,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
@@ -538,7 +523,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
@@ -1270,6 +1255,26 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
IGNORE_KVMR_PIPE_A,
enable ? IGNORE_KVMR_PIPE_A : 0);
}
+
+/*
+ * Wa_16012360555:adl-p
+ * SW will have to program the "LP to HS Wakeup Guardband"
+ * to account for the repeaters on the HS Request/Ready
+ * PPI signaling between the Display engine and the DPHY.
+ */
+static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ if (DISPLAY_VER(i915) == 13) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
+ TGL_DSI_CHKN_LSHS_GB, 0x4);
+ }
+}
+
static void gen11_dsi_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1283,11 +1288,14 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* Wa_1409054076:icl,jsl,ehl */
icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
+ /* Wa_16012360555:adl-p */
+ adlp_set_lp_hs_wakeup_gb(encoder);
+
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
/* step7: enable backlight */
- intel_panel_enable_backlight(crtc_state, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
intel_crtc_vblank_on(crtc_state);
@@ -1440,7 +1448,7 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
/* step2d,e: disable transcoder and wait */
gen11_dsi_disable_transcoder(encoder);
@@ -1650,16 +1658,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- const struct drm_display_mode *fixed_mode =
- intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
int ret;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- intel_fixed_panel_mode(fixed_mode, adjusted_mode);
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
+
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
@@ -1815,18 +1824,18 @@ static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
if (msg->flags & MIPI_DSI_MSG_USE_LPM)
enable_lpdt = true;
- /* send packet header */
- ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
- if (ret < 0)
- return ret;
-
/* only long packet contains payload */
if (mipi_dsi_packet_format_is_long(msg->type)) {
- ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ ret = dsi_send_pkt_payld(intel_dsi_host, &dsi_pkt);
if (ret < 0)
return ret;
}
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, &dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
//TODO: add payload receive code if needed
ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
@@ -2014,7 +2023,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
encoder->sync_state = gen11_dsi_sync_state;
- encoder->update_pipe = intel_panel_update_backlight;
+ encoder->update_pipe = intel_backlight_update;
encoder->compute_config = gen11_dsi_compute_config;
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->initial_fastset_check = gen11_dsi_initial_fastset_check;
@@ -2048,7 +2057,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 68abeaf2d7d4..e78430001f07 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -285,3 +285,49 @@ void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
}
drm_connector_list_iter_end(&conn_iter);
}
+
+/* NOTE: The connector order must be final before this is called. */
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_device *drm_dev = &i915->drm;
+ struct fwnode_handle *fwnode = NULL;
+ struct drm_connector *connector;
+ struct acpi_device *adev;
+
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ /* Always getting the next, even when the last was not used. */
+ fwnode = device_get_next_child_node(drm_dev->dev, fwnode);
+ if (!fwnode)
+ break;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ /*
+ * Integrated displays have a specific address 0x1f on
+ * most Intel platforms, but not on all of them.
+ */
+ adev = acpi_find_child_device(ACPI_COMPANION(drm_dev->dev),
+ 0x1f, 0);
+ if (adev) {
+ connector->fwnode =
+ fwnode_handle_get(acpi_fwnode_handle(adev));
+ break;
+ }
+ fallthrough;
+ default:
+ connector->fwnode = fwnode_handle_get(fwnode);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ /*
+ * device_get_next_child_node() takes a reference on the fwnode, if
+ * we stopped iterating because we are out of connectors we need to
+ * put this, otherwise fwnode is NULL and the put is a no-op.
+ */
+ fwnode_handle_put(fwnode);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 9f197401c313..4a760a2baed9 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -13,6 +13,7 @@ void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
void intel_acpi_device_id_update(struct drm_i915_private *i915);
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
@@ -20,6 +21,8 @@ static inline
void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; }
static inline
void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 47234d898549..0be8c00e3db9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -39,8 +39,10 @@
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+#include "gt/intel_rps.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
@@ -601,6 +603,213 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @_plane: drm plane to prepare for
+ * @_new_plane_state: the plane state being prepared
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_prepare_plane_fb(struct drm_plane *_plane,
+ struct drm_plane_state *_new_plane_state)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
+ int ret;
+
+ if (old_obj) {
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer. Note that we rely on userspace rendering
+ * into the buffer attached to the pipe they are waiting
+ * on. If not, userspace generates a GPU hang with IPEHR
+ * point to the MI_WAIT_FOR_EVENT.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ if (intel_crtc_needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ old_obj->base.resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
+ new_plane_state->uapi.fence,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!obj)
+ return 0;
+
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ return ret;
+
+ i915_gem_object_wait_priority(obj, 0, &attr);
+
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
+ struct dma_fence *fence;
+
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ obj->base.resv, NULL,
+ false,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ goto unpin_fb;
+
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
+ if (fence) {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
+ dma_fence_put(fence);
+ }
+ } else {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
+ }
+
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!state->rps_interactive) {
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+ state->rps_interactive = true;
+ }
+
+ return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @_old_plane_state: the state from the previous modeset
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+static void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *_old_plane_state)
+{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(old_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
+
+ if (state->rps_interactive) {
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+ state->rps_interactive = false;
+ }
+
+ /* Should only be called after a successful intel_prepare_plane_fb()! */
+ intel_plane_unpin_fb(old_plane_state);
+}
+
static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 4e0f96bf6158..03e8c05a74f6 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -848,10 +848,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->display.audio_codec_enable)
- dev_priv->display.audio_codec_enable(encoder,
- crtc_state,
- conn_state);
+ if (dev_priv->audio_funcs)
+ dev_priv->audio_funcs->audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
mutex_lock(&dev_priv->av_mutex);
encoder->audio_connector = connector;
@@ -893,10 +893,10 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- if (dev_priv->display.audio_codec_disable)
- dev_priv->display.audio_codec_disable(encoder,
- old_crtc_state,
- old_conn_state);
+ if (dev_priv->audio_funcs)
+ dev_priv->audio_funcs->audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
mutex_lock(&dev_priv->av_mutex);
encoder->audio_connector = NULL;
@@ -915,6 +915,21 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
}
+static const struct intel_audio_funcs g4x_audio_funcs = {
+ .audio_codec_enable = g4x_audio_codec_enable,
+ .audio_codec_disable = g4x_audio_codec_disable,
+};
+
+static const struct intel_audio_funcs ilk_audio_funcs = {
+ .audio_codec_enable = ilk_audio_codec_enable,
+ .audio_codec_disable = ilk_audio_codec_disable,
+};
+
+static const struct intel_audio_funcs hsw_audio_funcs = {
+ .audio_codec_enable = hsw_audio_codec_enable,
+ .audio_codec_disable = hsw_audio_codec_disable,
+};
+
/**
* intel_init_audio_hooks - Set up chip specific audio hooks
* @dev_priv: device private
@@ -922,17 +937,13 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
- dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
- dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+ dev_priv->audio_funcs = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
- dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ dev_priv->audio_funcs = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
- dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+ dev_priv->audio_funcs = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
- dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ dev_priv->audio_funcs = &ilk_audio_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
new file mode 100644
index 000000000000..9523411cddd8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -0,0 +1,1776 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/pwm.h>
+
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux_backlight.h"
+#include "intel_dsi_dcs_backlight.h"
+#include "intel_panel.h"
+
+/**
+ * scale - scale values from one range to another
+ * @source_val: value in range [@source_min..@source_max]
+ * @source_min: minimum legal value for @source_val
+ * @source_max: maximum legal value for @source_val
+ * @target_min: corresponding target value for @source_min
+ * @target_max: corresponding target value for @source_max
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static u32 scale(u32 source_val,
+ u32 source_min, u32 source_max,
+ u32 target_min, u32 target_max)
+{
+ u64 target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = mul_u32_u32(source_val - source_min,
+ target_max - target_min);
+ target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/*
+ * Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max].
+ */
+static u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
+u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness < 0)
+ return val;
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
+ }
+
+ return val;
+}
+
+void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_backlight_invert_pwm_level(connector, val);
+}
+
+u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val;
+
+ val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(dev_priv) < 4)
+ val >>= 1;
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
+ val *= lbpc;
+ }
+
+ return val;
+}
+
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return 0;
+
+ return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_de_read(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller));
+}
+
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+ struct pwm_state state;
+
+ pwm_get_state(panel->backlight.pwm, &state);
+ return pwm_get_relative_duty_cycle(&state, 100);
+}
+
+static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
+}
+
+static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
+}
+
+static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp, mask;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
+ level /= lbpc;
+ pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
+ }
+
+ if (DISPLAY_VER(dev_priv) == 4) {
+ mask = BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ level <<= 1;
+ mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+ }
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+ intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+}
+
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+static void
+intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+
+ panel->backlight.funcs->set(conn_state, level);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ /*
+ * Lack of crtc may occur during driver init because
+ * connection_mutex isn't held across the entire backlight
+ * setup + modeset readout, and the BIOS can issue the
+ * requests at any time.
+ */
+ if (!panel->backlight.present || !conn_state->crtc)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, level);
+
+ /*
+ * Although we don't support or enable CPU PWM with LPT/SPT based
+ * systems, it may have been enabled prior to loading the
+ * driver. Disable to avoid warnings on LCPLL disable.
+ *
+ * This needs rework if we need to add support for CPU PWM on PCH split
+ * platforms.
+ */
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ if (tmp & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu backlight was enabled, disabling\n");
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ tmp & ~BLM_PWM_ENABLE);
+ }
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ intel_backlight_set_pwm_level(old_conn_state, val);
+}
+
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ tmp & ~BLM_PWM_ENABLE);
+}
+
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
+
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val &= ~UTIL_PIN_ENABLE;
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ }
+}
+
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
+}
+
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_state.enabled = false;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ /*
+ * Do not disable backlight on the vga_switcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ if (panel->backlight.device)
+ panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
+ panel->backlight.enabled = false;
+ panel->backlight.funcs->disable(old_conn_state, 0);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2, schicken;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (HAS_PCH_LPT(dev_priv)) {
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= LPT_PWM_GRANULARITY;
+ else
+ schicken &= ~LPT_PWM_GRANULARITY;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
+ } else {
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= SPT_PWM_GRANULARITY;
+ else
+ schicken &= ~SPT_PWM_GRANULARITY;
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
+ }
+
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ /* After LPT, override is the default. */
+ if (HAS_PCH_LPT(dev_priv))
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_backlight_set_pwm_level(conn_state, level);
+}
+
+static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ cpu_ctl2 = BLM_TRANSCODER_EDP;
+ else
+ cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, freq;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ intel_de_write(dev_priv, BLC_PWM_CTL, 0);
+ }
+
+ freq = panel->backlight.pwm_level_max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 17;
+ if (panel->backlight.combination_mode)
+ ctl |= BLM_LEGACY_MODE;
+ if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
+ ctl |= BLM_POLARITY_PNV;
+
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL);
+
+ /* XXX: combine this into above write? */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ /*
+ * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+ * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+ * that has backlight.
+ */
+ if (DISPLAY_VER(dev_priv) == 2)
+ intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+}
+
+static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
+ u32 ctl, ctl2, freq;
+
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ }
+
+ freq = panel->backlight.pwm_level_max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 16;
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+}
+
+static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ u32 ctl, ctl2;
+
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
+
+ ctl = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
+
+ /* XXX: combine this into above write? */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ ctl2 = 0;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ ctl2 | BLM_PWM_ENABLE);
+}
+
+static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ u32 pwm_ctl, val;
+
+ /* Controller 1 uses the utility pin. */
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ if (val & UTIL_PIN_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "util pin already enabled\n");
+ val &= ~UTIL_PIN_ENABLE;
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ }
+
+ val = 0;
+ if (panel->backlight.util_pin_active_low)
+ val |= UTIL_PIN_POLARITY;
+ intel_de_write(dev_priv, UTIL_PIN_CTL,
+ val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+ }
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ }
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.pwm_level_max);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ }
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.pwm_level_max);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ panel->backlight.pwm_state.enabled = true;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (panel->backlight.level <= panel->backlight.min) {
+ panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
+ }
+
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
+ panel->backlight.enabled = true;
+ if (panel->backlight.device)
+ panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+}
+
+void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+
+ if (!panel->backlight.present)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ __intel_backlight_enable(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
+
+ mutex_unlock(&dev_priv->backlight_lock);
+
+ drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ return val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static int intel_backlight_device_update_status(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_device *dev = connector->base.dev;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
+ intel_panel_set_backlight(connector->base.state, bd->props.brightness,
+ bd->props.max_brightness);
+
+ /*
+ * Allow flipping bl_power as a sub-state of enabled. Sadly the
+ * backlight class device does not make it easy to differentiate
+ * between callbacks for brightness and bl_power, so our backlight_power
+ * callback needs to take this into account.
+ */
+ if (panel->backlight.enabled) {
+ if (panel->backlight.power) {
+ bool enable = bd->props.power == FB_BLANK_UNBLANK &&
+ bd->props.brightness != 0;
+ panel->backlight.power(connector, enable);
+ }
+ } else {
+ bd->props.power = FB_BLANK_POWERDOWN;
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return 0;
+}
+
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ u32 hw_level;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector,
+ hw_level, bd->props.max_brightness);
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+
+ return ret;
+}
+
+static const struct backlight_ops intel_backlight_device_ops = {
+ .update_status = intel_backlight_device_update_status,
+ .get_brightness = intel_backlight_device_get_brightness,
+};
+
+int intel_backlight_device_register(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct backlight_properties props;
+ struct backlight_device *bd;
+ const char *name;
+ int ret = 0;
+
+ if (WARN_ON(panel->backlight.device))
+ return -ENODEV;
+
+ if (!panel->backlight.present)
+ return 0;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
+ props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
+
+ if (panel->backlight.enabled)
+ props.power = FB_BLANK_UNBLANK;
+ else
+ props.power = FB_BLANK_POWERDOWN;
+
+ name = kstrdup("intel_backlight", GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
+
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
+ if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
+ i915->drm.primary->index, connector->base.name);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
+ }
+
+ if (IS_ERR(bd)) {
+ drm_err(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
+ connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
+ ret = PTR_ERR(bd);
+ goto out;
+ }
+
+ panel->backlight.device = bd;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s registered\n",
+ connector->base.base.id, connector->base.name, name);
+
+out:
+ kfree(name);
+
+ return ret;
+}
+
+void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.device) {
+ backlight_device_unregister(panel->backlight.device);
+ panel->backlight.device = NULL;
+ }
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
+ * PWM increment = 1
+ */
+static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz);
+}
+
+/*
+ * BXT: PWM clock frequency = 19.2 MHz.
+ */
+static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
+}
+
+/*
+ * SPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 16 (default increment) or 128 (alternate increment selected in
+ * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
+ */
+static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 mul;
+
+ if (panel->backlight.alternate_pwm_increment)
+ mul = 128;
+ else
+ mul = 16;
+
+ return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
+}
+
+/*
+ * LPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 128 (default increment) or 16 (alternate increment, selected in
+ * LPT SOUTH_CHICKEN2 register bit 5).
+ */
+static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 mul, clock;
+
+ if (panel->backlight.alternate_pwm_increment)
+ mul = 16;
+ else
+ mul = 128;
+
+ if (HAS_PCH_LPT_H(dev_priv))
+ clock = MHz(135); /* LPT:H */
+ else
+ clock = MHz(24); /* LPT:LP */
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+}
+
+/*
+ * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
+ * display raw clocks multiplied by 128.
+ */
+static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz * 128);
+}
+
+/*
+ * Gen2: This field determines the number of time base events (display core
+ * clock frequency/32) in total for a complete cycle of modulated backlight
+ * control.
+ *
+ * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
+ * divided by 32.
+ */
+static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int clock;
+
+ if (IS_PINEVIEW(dev_priv))
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ else
+ clock = KHz(dev_priv->cdclk.hw.cdclk);
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
+}
+
+/*
+ * Gen4: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] HRAW clocks) multiplied by 128.
+ *
+ */
+static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int clock;
+
+ if (IS_G4X(dev_priv))
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ else
+ clock = KHz(dev_priv->cdclk.hw.cdclk);
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
+}
+
+/*
+ * VLV: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
+ * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
+ */
+static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int mul, clock;
+
+ if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (IS_CHERRYVIEW(dev_priv))
+ clock = KHz(19200);
+ else
+ clock = MHz(25);
+ mul = 16;
+ } else {
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ mul = 128;
+ }
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+}
+
+static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
+{
+ u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+
+ if (pwm_freq_hz) {
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ } else {
+ pwm_freq_hz = 200;
+ drm_dbg_kms(&dev_priv->drm,
+ "default backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ }
+
+ return pwm_freq_hz;
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
+ u32 pwm;
+
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
+ return 0;
+ }
+
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
+ if (!pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion failed\n");
+ return 0;
+ }
+
+ return pwm;
+}
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ */
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int min;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ /*
+ * XXX: If the vbt value is 255, it makes min equal to max, which leads
+ * to problems. There are such machines out there. Either our
+ * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+ * against this by letting the minimum be at most (arbitrarily chosen)
+ * 25% of the max.
+ */
+ min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+ if (min != dev_priv->vbt.backlight.min_brightness) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
+ }
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
+}
+
+static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ bool alt, cpu_mode;
+
+ if (HAS_PCH_LPT(dev_priv))
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ else
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ panel->backlight.alternate_pwm_increment = alt;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
+ !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
+ (cpu_ctl2 & BLM_PWM_ENABLE);
+
+ if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "CPU backlight register was enabled, switching to PCH override\n");
+
+ /* Write converted CPU PWM value to PCH override register */
+ lpt_set_backlight(connector->base.state, val);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ cpu_ctl2 & ~BLM_PWM_ENABLE);
+ }
+
+ return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE);
+
+ return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, val;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+
+ if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+ if (IS_PINEVIEW(dev_priv))
+ panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+ panel->backlight.pwm_level_max = ctl >> 17;
+
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
+ }
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ if (panel->backlight.combination_mode)
+ panel->backlight.pwm_level_max *= 0xff;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_backlight_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ panel->backlight.pwm_enabled = val != 0;
+
+ return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2;
+
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ panel->backlight.pwm_level_max = ctl >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ if (panel->backlight.combination_mode)
+ panel->backlight.pwm_level_max *= 0xff;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+
+ return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2;
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return -ENODEV;
+
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
+ panel->backlight.pwm_level_max = ctl >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+
+ return 0;
+}
+
+static int
+bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl, val;
+
+ panel->backlight.controller = dev_priv->vbt.backlight.controller;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+ /* Controller 1 uses the utility pin. */
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ panel->backlight.util_pin_active_low =
+ val & UTIL_PIN_POLARITY;
+ }
+
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+
+ return 0;
+}
+
+static int
+cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ /*
+ * CNP has the BXT implementation of backlight, but with only one
+ * controller. TODO: ICP has multiple controllers but we only use
+ * controller 0 for now.
+ */
+ panel->backlight.controller = 0;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+
+ return 0;
+}
+
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_panel *panel = &connector->panel;
+ const char *desc;
+ u32 level;
+
+ /* Get the right PWM chip for DSI backlight according to VBT */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ desc = "SoC";
+ }
+
+ if (IS_ERR(panel->backlight.pwm)) {
+ drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ desc);
+ panel->backlight.pwm = NULL;
+ return -ENODEV;
+ }
+
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ if (pwm_is_enabled(panel->backlight.pwm)) {
+ /* PWM is already enabled, use existing settings */
+ pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+
+ level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
+ 100);
+ level = intel_backlight_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
+
+ drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
+ get_vbt_pwm_freq(dev_priv), level);
+ } else {
+ /* Set period from VBT frequency, leave other settings at 0. */
+ panel->backlight.pwm_state.period =
+ NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
+ }
+
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ desc);
+ return 0;
+}
+
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_backlight_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
+void intel_backlight_update(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+ if (!panel->backlight.enabled)
+ __intel_backlight_enable(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (!dev_priv->vbt.backlight.present) {
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT\n");
+ return 0;
+ }
+ }
+
+ /* ensure intel_panel has been initialized first */
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
+ return -ENODEV;
+
+ /* set level and max in panel struct */
+ mutex_lock(&dev_priv->backlight_lock);
+ ret = panel->backlight.funcs->setup(connector, pipe);
+ mutex_unlock(&dev_priv->backlight_lock);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to setup backlight for connector %s\n",
+ connector->base.name);
+ return ret;
+ }
+
+ panel->backlight.present = true;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->base.name,
+ enableddisabled(panel->backlight.enabled),
+ panel->backlight.level, panel->backlight.max);
+
+ return 0;
+}
+
+void intel_backlight_destroy(struct intel_panel *panel)
+{
+ /* dispose of the pwm */
+ if (panel->backlight.pwm)
+ pwm_put(panel->backlight.pwm);
+
+ panel->backlight.present = false;
+}
+
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
+/* Set up chip specific backlight functions */
+void intel_backlight_init_funcs(struct intel_panel *panel)
+{
+ struct intel_connector *connector =
+ container_of(panel, struct intel_connector, panel);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+ intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
+ if (HAS_PCH_LPT(dev_priv))
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
+ else
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
+ } else {
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 4) {
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
+ } else {
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
+ }
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.h b/drivers/gpu/drm/i915/display/intel_backlight.h
new file mode 100644
index 000000000000..339643f63897
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_H__
+#define __INTEL_BACKLIGHT_H__
+
+#include <linux/types.h>
+
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+enum pipe;
+
+void intel_backlight_init_funcs(struct intel_panel *panel);
+int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe);
+void intel_backlight_destroy(struct intel_panel *panel);
+
+void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_backlight_update(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_backlight_disable(const struct drm_connector_state *old_conn_state);
+
+void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
+ u32 level, u32 max);
+void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
+ u32 level);
+u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static inline int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+#endif /* __INTEL_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index fd71346aac7b..b99907c656bb 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -493,6 +493,9 @@ parse_lfp_backlight(struct drm_i915_private *i915,
level = 255;
}
i915->vbt.backlight.min_brightness = min_level;
+
+ i915->vbt.backlight.brightness_precision_bits =
+ backlight_data->brightness_precision_bits[panel_type];
} else {
level = backlight_data->level[panel_type];
i915->vbt.backlight.min_brightness = entry->min_brightness;
@@ -1511,39 +1514,130 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static const u8 cnp_ddc_pin_map[] = {
+ [0] = 0, /* N/A */
+ [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+ [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+ [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
+ [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
+};
+
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static const u8 adls_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static const u8 gen9bc_tgp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
+{
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (IS_ALDERLAKE_S(i915)) {
+ ddc_pin_map = adls_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adls_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ return vbt_pin;
+ } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
+ } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
+ ddc_pin_map = gen9bc_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(i915)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
+ }
+
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ drm_dbg_kms(&i915->drm,
+ "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
+}
+
static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
{
- const struct ddi_vbt_port_info *info;
+ const struct intel_bios_encoder_data *devdata;
enum port port;
if (!ddc_pin)
return PORT_NONE;
for_each_port(port) {
- info = &i915->vbt.ddi_port_info[port];
+ devdata = i915->vbt.ports[port];
- if (info->devdata && ddc_pin == info->alternate_ddc_pin)
+ if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
}
return PORT_NONE;
}
-static void sanitize_ddc_pin(struct drm_i915_private *i915,
+static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
+ struct drm_i915_private *i915 = devdata->i915;
struct child_device_config *child;
+ u8 mapped_ddc_pin;
enum port p;
- p = get_port_by_ddc_pin(i915, info->alternate_ddc_pin);
+ if (!devdata->child.ddc_pin)
+ return;
+
+ mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin);
+ if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) {
+ drm_dbg_kms(&i915->drm,
+ "Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), mapped_ddc_pin);
+ devdata->child.ddc_pin = 0;
+ return;
+ }
+
+ p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin);
if (p == PORT_NONE)
return;
drm_dbg_kms(&i915->drm,
"port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
- port_name(port), info->alternate_ddc_pin,
+ port_name(port), mapped_ddc_pin,
port_name(p), port_name(p));
/*
@@ -1555,48 +1649,47 @@ static void sanitize_ddc_pin(struct drm_i915_private *i915,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- info = &i915->vbt.ddi_port_info[p];
- child = &info->devdata->child;
+ child = &i915->vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
- info->alternate_ddc_pin = 0;
+ child->ddc_pin = 0;
}
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
{
- const struct ddi_vbt_port_info *info;
+ const struct intel_bios_encoder_data *devdata;
enum port port;
if (!aux_ch)
return PORT_NONE;
for_each_port(port) {
- info = &i915->vbt.ddi_port_info[port];
+ devdata = i915->vbt.ports[port];
- if (info->devdata && aux_ch == info->alternate_aux_channel)
+ if (devdata && aux_ch == devdata->child.aux_channel)
return port;
}
return PORT_NONE;
}
-static void sanitize_aux_ch(struct drm_i915_private *i915,
+static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
+ struct drm_i915_private *i915 = devdata->i915;
struct child_device_config *child;
enum port p;
- p = get_port_by_aux_ch(i915, info->alternate_aux_channel);
+ p = get_port_by_aux_ch(i915, devdata->child.aux_channel);
if (p == PORT_NONE)
return;
drm_dbg_kms(&i915->drm,
"port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
- port_name(port), info->alternate_aux_channel,
+ port_name(port), devdata->child.aux_channel,
port_name(p), port_name(p));
/*
@@ -1608,88 +1701,10 @@ static void sanitize_aux_ch(struct drm_i915_private *i915,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- info = &i915->vbt.ddi_port_info[p];
- child = &info->devdata->child;
+ child = &i915->vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- info->alternate_aux_channel = 0;
-}
-
-static const u8 cnp_ddc_pin_map[] = {
- [0] = 0, /* N/A */
- [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
- [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
- [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
- [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
-};
-
-static const u8 icp_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
- [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
- [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
- [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
- [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
- [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
- [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
-};
-
-static const u8 rkl_pch_tgp_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
- [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
-};
-
-static const u8 adls_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
- [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
- [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
- [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
-};
-
-static const u8 gen9bc_tgp_ddc_pin_map[] = {
- [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
- [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
-};
-
-static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
-{
- const u8 *ddc_pin_map;
- int n_entries;
-
- if (IS_ALDERLAKE_S(i915)) {
- ddc_pin_map = adls_ddc_pin_map;
- n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
- return vbt_pin;
- } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
- ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
- n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
- ddc_pin_map = gen9bc_tgp_ddc_pin_map;
- n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
- ddc_pin_map = icp_ddc_pin_map;
- n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
- ddc_pin_map = cnp_ddc_pin_map;
- n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
- } else {
- /* Assuming direct map */
- return vbt_pin;
- }
-
- if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
- return ddc_pin_map[vbt_pin];
-
- drm_dbg_kms(&i915->drm,
- "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
- vbt_pin);
- return 0;
+ child->aux_channel = 0;
}
static enum port __dvo_port_to_port(int n_ports, int n_dvo,
@@ -1825,6 +1840,17 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
+static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->vbt.version < 216)
+ return 0;
+
+ if (devdata->i915->vbt.version >= 230)
+ return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
+ else
+ return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -1878,6 +1904,76 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
+static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->vbt.version < 158)
+ return -1;
+
+ return devdata->child.hdmi_level_shifter_value;
+}
+
+static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->vbt.version < 204)
+ return 0;
+
+ switch (devdata->child.hdmi_max_data_rate) {
+ default:
+ MISSING_CASE(devdata->child.hdmi_max_data_rate);
+ fallthrough;
+ case HDMI_MAX_DATA_RATE_PLATFORM:
+ return 0;
+ case HDMI_MAX_DATA_RATE_297:
+ return 297000;
+ case HDMI_MAX_DATA_RATE_165:
+ return 165000;
+ }
+}
+
+static enum port get_edp_port(struct drm_i915_private *i915)
+{
+ const struct intel_bios_encoder_data *devdata;
+ enum port port;
+
+ for_each_port(port) {
+ devdata = i915->vbt.ports[port];
+
+ if (devdata && intel_bios_encoder_supports_edp(devdata))
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
+/*
+ * FIXME: The power sequencer and backlight code currently do not support more
+ * than one set registers, at least not on anything other than VLV/CHV. It will
+ * clobber the registers. As a temporary workaround, gracefully prevent more
+ * than one eDP from being registered.
+ */
+static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ struct child_device_config *child = &devdata->child;
+ enum port p;
+
+ /* CHV might not clobber PPS registers. */
+ if (IS_CHERRYVIEW(i915))
+ return;
+
+ p = get_edp_port(i915);
+ if (p == PORT_NONE)
+ return;
+
+ drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, "
+ "disabling port %c eDP\n", port_name(p), port_name(port),
+ port_name(port));
+
+ child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR;
+}
+
static bool is_port_valid(struct drm_i915_private *i915, enum port port)
{
/*
@@ -1895,9 +1991,8 @@ static void parse_ddi_port(struct drm_i915_private *i915,
struct intel_bios_encoder_data *devdata)
{
const struct child_device_config *child = &devdata->child;
- struct ddi_vbt_port_info *info;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
- int dp_boost_level, hdmi_boost_level;
+ int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
enum port port;
port = dvo_port_to_port(i915, child->dvo_port);
@@ -1911,9 +2006,7 @@ static void parse_ddi_port(struct drm_i915_private *i915,
return;
}
- info = &i915->vbt.ddi_port_info[port];
-
- if (info->devdata) {
+ if (i915->vbt.ports[port]) {
drm_dbg_kms(&i915->drm,
"More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -1938,62 +2031,27 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- if (is_dvi) {
- u8 ddc_pin;
+ if (is_edp)
+ sanitize_dual_edp(devdata, port);
- ddc_pin = map_ddc_pin(i915, child->ddc_pin);
- if (intel_gmbus_is_valid_pin(i915, ddc_pin)) {
- info->alternate_ddc_pin = ddc_pin;
- sanitize_ddc_pin(i915, port);
- } else {
- drm_dbg_kms(&i915->drm,
- "Port %c has invalid DDC pin %d, "
- "sticking to defaults\n",
- port_name(port), ddc_pin);
- }
- }
+ if (is_dvi)
+ sanitize_ddc_pin(devdata, port);
- if (is_dp) {
- info->alternate_aux_channel = child->aux_channel;
+ if (is_dp)
+ sanitize_aux_ch(devdata, port);
- sanitize_aux_ch(i915, port);
- }
-
- if (i915->vbt.version >= 158) {
- /* The VBT HDMI level shift values match the table we have. */
- u8 hdmi_level_shift = child->hdmi_level_shifter_value;
+ hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI level shift: %d\n",
- port_name(port),
- hdmi_level_shift);
- info->hdmi_level_shift = hdmi_level_shift;
- info->hdmi_level_shift_set = true;
+ port_name(port), hdmi_level_shift);
}
- if (i915->vbt.version >= 204) {
- int max_tmds_clock;
-
- switch (child->hdmi_max_data_rate) {
- default:
- MISSING_CASE(child->hdmi_max_data_rate);
- fallthrough;
- case HDMI_MAX_DATA_RATE_PLATFORM:
- max_tmds_clock = 0;
- break;
- case HDMI_MAX_DATA_RATE_297:
- max_tmds_clock = 297000;
- break;
- case HDMI_MAX_DATA_RATE_165:
- max_tmds_clock = 165000;
- break;
- }
-
- if (max_tmds_clock)
- drm_dbg_kms(&i915->drm,
- "Port %c VBT HDMI max TMDS clock: %d kHz\n",
- port_name(port), max_tmds_clock);
- info->max_tmds_clock = max_tmds_clock;
- }
+ max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ if (max_tmds_clock)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI max TMDS clock: %d kHz\n",
+ port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
@@ -2008,19 +2066,13 @@ static void parse_ddi_port(struct drm_i915_private *i915,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
- /* DP max link rate for GLK+ */
- if (i915->vbt.version >= 216) {
- if (i915->vbt.version >= 230)
- info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate);
- else
- info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate);
-
+ dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ if (dp_max_link_rate)
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
- port_name(port), info->dp_max_link_rate);
- }
+ port_name(port), dp_max_link_rate);
- info->devdata = devdata;
+ i915->vbt.ports[port] = devdata;
}
static void parse_ddi_ports(struct drm_i915_private *i915)
@@ -2558,12 +2610,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
- if (HAS_DDI(i915)) {
- const struct ddi_vbt_port_info *port_info =
- &i915->vbt.ddi_port_info[port];
-
- return port_info->devdata;
- }
+ if (HAS_DDI(i915))
+ return i915->vbt.ports[port];
/* FIXME maybe deal with port A as well? */
if (drm_WARN_ON(&i915->drm,
@@ -2814,8 +2862,7 @@ bool
intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata =
- i915->vbt.ddi_port_info[port].devdata;
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
if (drm_WARN_ON_ONCE(&i915->drm,
!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
@@ -2835,8 +2882,7 @@ bool
intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata =
- i915->vbt.ddi_port_info[port].devdata;
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
}
@@ -2852,8 +2898,7 @@ bool
intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata =
- i915->vbt.ddi_port_info[port].devdata;
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
return devdata && devdata->child.lane_reversal;
}
@@ -2861,11 +2906,10 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
enum port port)
{
- const struct ddi_vbt_port_info *info =
- &i915->vbt.ddi_port_info[port];
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
enum aux_ch aux_ch;
- if (!info->alternate_aux_channel) {
+ if (!devdata || !devdata->child.aux_channel) {
aux_ch = (enum aux_ch)port;
drm_dbg_kms(&i915->drm,
@@ -2881,7 +2925,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
*/
- switch (info->alternate_aux_channel) {
+ switch (devdata->child.aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
@@ -2942,7 +2986,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_I;
break;
default:
- MISSING_CASE(info->alternate_aux_channel);
+ MISSING_CASE(devdata->child.aux_channel);
aux_ch = AUX_CH_A;
break;
}
@@ -2956,17 +3000,18 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
- return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock;
+ return _intel_bios_max_tmds_clock(devdata);
}
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct ddi_vbt_port_info *info =
- &i915->vbt.ddi_port_info[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
- return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
+ return _intel_bios_hdmi_level_shift(devdata);
}
int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
@@ -2988,15 +3033,20 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
- return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate;
+ return _intel_bios_dp_max_link_rate(devdata);
}
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+
+ if (!devdata || !devdata->child.ddc_pin)
+ return 0;
- return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
+ return map_ddc_pin(i915, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
@@ -3012,5 +3062,5 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->vbt.ddi_port_info[port].devdata;
+ return i915->vbt.ports[port];
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 4b94256d7319..8d9d888e9316 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -9,8 +9,8 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 34fa4130d5c4..9e466d829019 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -28,8 +28,9 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_pcode.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
/**
* DOC: CDCLK / RAWCLK
@@ -59,6 +60,37 @@
* dividers can be programmed correctly.
*/
+void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
+}
+
+int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state);
+}
+
+static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe);
+}
+
+static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_config)
+{
+ return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config);
+}
+
+static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
+ int cdclk)
+{
+ return dev_priv->cdclk_funcs->calc_voltage_level(cdclk);
+}
+
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
@@ -1466,7 +1498,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
* at least what the CDCLK frequency requires.
*/
cdclk_config->voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_config->cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1777,7 +1809,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
cdclk_config.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
@@ -1789,7 +1821,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
cdclk_config.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
@@ -1932,7 +1964,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk))
return;
intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
@@ -1956,7 +1988,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
&dev_priv->gmbus_mutex);
}
- dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
+ intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2140,6 +2172,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
/*
+ * When we decide to use only one VDSC engine, since
+ * each VDSC operates with 1 ppc throughput, pixel clock
+ * cannot be higher than the VDSC clock (cdclk)
+ */
+ if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split)
+ min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
+ /*
* HACK. Currently for TGL platforms we calculate
* min_cdclk initially based on pixel_rate divided
* by 2, accounting for also plane requirements,
@@ -2414,7 +2454,7 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
cdclk_state->logical.cdclk = cdclk;
cdclk_state->logical.voltage_level =
max_t(int, min_voltage_level,
- dev_priv->display.calc_voltage_level(cdclk));
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk));
if (!cdclk_state->active_pipes) {
cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
@@ -2423,7 +2463,7 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
cdclk_state->actual.vco = vco;
cdclk_state->actual.cdclk = cdclk;
cdclk_state->actual.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk);
} else {
cdclk_state->actual = cdclk_state->logical;
}
@@ -2515,7 +2555,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
new_cdclk_state->active_pipes =
intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
- ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state);
+ ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state);
if (ret)
return ret;
@@ -2695,7 +2735,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+ intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -2845,6 +2885,157 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq;
}
+static struct intel_cdclk_funcs tgl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = tgl_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs ehl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = ehl_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs icl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = icl_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs bxt_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = bxt_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs skl_cdclk_funcs = {
+ .get_cdclk = skl_get_cdclk,
+ .set_cdclk = skl_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = skl_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs bdw_cdclk_funcs = {
+ .get_cdclk = bdw_get_cdclk,
+ .set_cdclk = bdw_set_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bdw_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs chv_cdclk_funcs = {
+ .get_cdclk = vlv_get_cdclk,
+ .set_cdclk = chv_set_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs vlv_cdclk_funcs = {
+ .get_cdclk = vlv_get_cdclk,
+ .set_cdclk = vlv_set_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs hsw_cdclk_funcs = {
+ .get_cdclk = hsw_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* SNB, IVB, 965G, 945G */
+static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
+ .get_cdclk = fixed_400mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs ilk_cdclk_funcs = {
+ .get_cdclk = fixed_450mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs gm45_cdclk_funcs = {
+ .get_cdclk = gm45_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* G45 uses G33 */
+
+static struct intel_cdclk_funcs i965gm_cdclk_funcs = {
+ .get_cdclk = i965gm_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* i965G uses fixed 400 */
+
+static struct intel_cdclk_funcs pnv_cdclk_funcs = {
+ .get_cdclk = pnv_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs g33_cdclk_funcs = {
+ .get_cdclk = g33_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i945gm_cdclk_funcs = {
+ .get_cdclk = i945gm_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* i945G uses fixed 400 */
+
+static struct intel_cdclk_funcs i915gm_cdclk_funcs = {
+ .get_cdclk = i915gm_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i915g_cdclk_funcs = {
+ .get_cdclk = fixed_333mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i865g_cdclk_funcs = {
+ .get_cdclk = fixed_266mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i85x_cdclk_funcs = {
+ .get_cdclk = i85x_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i845g_cdclk_funcs = {
+ .get_cdclk = fixed_200mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i830_cdclk_funcs = {
+ .get_cdclk = fixed_133mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
/**
* intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
* @dev_priv: i915 device
@@ -2852,119 +3043,78 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
dev_priv->cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
dev_priv->cdclk.table = adlp_a_step_cdclk_table;
else
dev_priv->cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
dev_priv->cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_JSL_EHL(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &ehl_cdclk_funcs;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &icl_cdclk_funcs;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
+ dev_priv->cdclk_funcs = &bxt_cdclk_funcs;
if (IS_GEMINILAKE(dev_priv))
dev_priv->cdclk.table = glk_cdclk_table;
else
dev_priv->cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = skl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &skl_cdclk_funcs;
} else if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = bdw_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &bdw_cdclk_funcs;
+ } else if (IS_HASWELL(dev_priv)) {
+ dev_priv->cdclk_funcs = &hsw_cdclk_funcs;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &chv_cdclk_funcs;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
- } else {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &vlv_cdclk_funcs;
+ } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_IRONLAKE(dev_priv)) {
+ dev_priv->cdclk_funcs = &ilk_cdclk_funcs;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->cdclk_funcs = &gm45_cdclk_funcs;
+ } else if (IS_G45(dev_priv)) {
+ dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ } else if (IS_I965GM(dev_priv)) {
+ dev_priv->cdclk_funcs = &i965gm_cdclk_funcs;
+ } else if (IS_I965G(dev_priv)) {
+ dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ dev_priv->cdclk_funcs = &pnv_cdclk_funcs;
+ } else if (IS_G33(dev_priv)) {
+ dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ } else if (IS_I945GM(dev_priv)) {
+ dev_priv->cdclk_funcs = &i945gm_cdclk_funcs;
+ } else if (IS_I945G(dev_priv)) {
+ dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_I915GM(dev_priv)) {
+ dev_priv->cdclk_funcs = &i915gm_cdclk_funcs;
+ } else if (IS_I915G(dev_priv)) {
+ dev_priv->cdclk_funcs = &i915g_cdclk_funcs;
+ } else if (IS_I865G(dev_priv)) {
+ dev_priv->cdclk_funcs = &i865g_cdclk_funcs;
+ } else if (IS_I85X(dev_priv)) {
+ dev_priv->cdclk_funcs = &i85x_cdclk_funcs;
+ } else if (IS_I845G(dev_priv)) {
+ dev_priv->cdclk_funcs = &i845g_cdclk_funcs;
+ } else if (IS_I830(dev_priv)) {
+ dev_priv->cdclk_funcs = &i830_cdclk_funcs;
}
- if (DISPLAY_VER(dev_priv) >= 10 || IS_BROXTON(dev_priv))
- dev_priv->display.get_cdclk = bxt_get_cdclk;
- else if (DISPLAY_VER(dev_priv) == 9)
- dev_priv->display.get_cdclk = skl_get_cdclk;
- else if (IS_BROADWELL(dev_priv))
- dev_priv->display.get_cdclk = bdw_get_cdclk;
- else if (IS_HASWELL(dev_priv))
- dev_priv->display.get_cdclk = hsw_get_cdclk;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.get_cdclk = vlv_get_cdclk;
- else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
- dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_IRONLAKE(dev_priv))
- dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
- else if (IS_GM45(dev_priv))
- dev_priv->display.get_cdclk = gm45_get_cdclk;
- else if (IS_G45(dev_priv))
- dev_priv->display.get_cdclk = g33_get_cdclk;
- else if (IS_I965GM(dev_priv))
- dev_priv->display.get_cdclk = i965gm_get_cdclk;
- else if (IS_I965G(dev_priv))
- dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.get_cdclk = pnv_get_cdclk;
- else if (IS_G33(dev_priv))
- dev_priv->display.get_cdclk = g33_get_cdclk;
- else if (IS_I945GM(dev_priv))
- dev_priv->display.get_cdclk = i945gm_get_cdclk;
- else if (IS_I945G(dev_priv))
- dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_I915GM(dev_priv))
- dev_priv->display.get_cdclk = i915gm_get_cdclk;
- else if (IS_I915G(dev_priv))
- dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk;
- else if (IS_I865G(dev_priv))
- dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk;
- else if (IS_I85X(dev_priv))
- dev_priv->display.get_cdclk = i85x_get_cdclk;
- else if (IS_I845G(dev_priv))
- dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
- else if (IS_I830(dev_priv))
- dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
-
- if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk,
- "Unknown platform. Assuming 133 MHz CDCLK\n"))
- dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
+ if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs,
+ "Unknown platform. Assuming i830\n"))
+ dev_priv->cdclk_funcs = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index b34eb00fb327..309b3f394e24 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -68,7 +68,9 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
-
+void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config);
+int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index afcb4bf3826c..5359b7305a78 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -25,6 +25,8 @@
#include "intel_color.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_dsi.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -1137,14 +1139,14 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->display.load_luts(crtc_state);
+ dev_priv->color_funcs->load_luts(crtc_state);
}
void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->display.color_commit(crtc_state);
+ dev_priv->color_funcs->color_commit(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -1200,15 +1202,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- return dev_priv->display.color_check(crtc_state);
+ return dev_priv->color_funcs->color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->display.read_luts)
- dev_priv->display.read_luts(crtc_state);
+ if (dev_priv->color_funcs->read_luts)
+ dev_priv->color_funcs->read_luts(crtc_state);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -2092,6 +2094,76 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
}
}
+static const struct intel_color_funcs chv_color_funcs = {
+ .color_check = chv_color_check,
+ .color_commit = i9xx_color_commit,
+ .load_luts = chv_load_luts,
+ .read_luts = chv_read_luts,
+};
+
+static const struct intel_color_funcs i965_color_funcs = {
+ .color_check = i9xx_color_check,
+ .color_commit = i9xx_color_commit,
+ .load_luts = i965_load_luts,
+ .read_luts = i965_read_luts,
+};
+
+static const struct intel_color_funcs i9xx_color_funcs = {
+ .color_check = i9xx_color_check,
+ .color_commit = i9xx_color_commit,
+ .load_luts = i9xx_load_luts,
+ .read_luts = i9xx_read_luts,
+};
+
+static const struct intel_color_funcs icl_color_funcs = {
+ .color_check = icl_color_check,
+ .color_commit = skl_color_commit,
+ .load_luts = icl_load_luts,
+ .read_luts = icl_read_luts,
+};
+
+static const struct intel_color_funcs glk_color_funcs = {
+ .color_check = glk_color_check,
+ .color_commit = skl_color_commit,
+ .load_luts = glk_load_luts,
+ .read_luts = glk_read_luts,
+};
+
+static const struct intel_color_funcs skl_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = skl_color_commit,
+ .load_luts = bdw_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs bdw_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = hsw_color_commit,
+ .load_luts = bdw_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs hsw_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = hsw_color_commit,
+ .load_luts = ivb_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs ivb_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = ilk_color_commit,
+ .load_luts = ivb_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs ilk_color_funcs = {
+ .color_check = ilk_color_check,
+ .color_commit = ilk_color_commit,
+ .load_luts = ilk_load_luts,
+ .read_luts = ilk_read_luts,
+};
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2101,52 +2173,28 @@ void intel_color_init(struct intel_crtc *crtc)
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.color_check = chv_color_check;
- dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = chv_load_luts;
- dev_priv->display.read_luts = chv_read_luts;
+ dev_priv->color_funcs = &chv_color_funcs;
} else if (DISPLAY_VER(dev_priv) >= 4) {
- dev_priv->display.color_check = i9xx_color_check;
- dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = i965_load_luts;
- dev_priv->display.read_luts = i965_read_luts;
+ dev_priv->color_funcs = &i965_color_funcs;
} else {
- dev_priv->display.color_check = i9xx_color_check;
- dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = i9xx_load_luts;
- dev_priv->display.read_luts = i9xx_read_luts;
+ dev_priv->color_funcs = &i9xx_color_funcs;
}
} else {
if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->display.color_check = icl_color_check;
- else if (DISPLAY_VER(dev_priv) >= 10)
- dev_priv->display.color_check = glk_color_check;
- else if (DISPLAY_VER(dev_priv) >= 7)
- dev_priv->display.color_check = ivb_color_check;
- else
- dev_priv->display.color_check = ilk_color_check;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- dev_priv->display.color_commit = skl_color_commit;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- dev_priv->display.color_commit = hsw_color_commit;
- else
- dev_priv->display.color_commit = ilk_color_commit;
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->display.load_luts = icl_load_luts;
- dev_priv->display.read_luts = icl_read_luts;
- } else if (DISPLAY_VER(dev_priv) == 10) {
- dev_priv->display.load_luts = glk_load_luts;
- dev_priv->display.read_luts = glk_read_luts;
- } else if (DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->display.load_luts = bdw_load_luts;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
- dev_priv->display.load_luts = ivb_load_luts;
- } else {
- dev_priv->display.load_luts = ilk_load_luts;
- dev_priv->display.read_luts = ilk_read_luts;
- }
+ dev_priv->color_funcs = &icl_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 10)
+ dev_priv->color_funcs = &glk_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ dev_priv->color_funcs = &skl_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 8)
+ dev_priv->color_funcs = &bdw_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 7) {
+ if (IS_HASWELL(dev_priv))
+ dev_priv->color_funcs = &hsw_color_funcs;
+ else
+ dev_priv->color_funcs = &ivb_color_funcs;
+ } else
+ dev_priv->color_funcs = &ilk_color_funcs;
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index bacdf8a16bcb..634e8d449457 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -220,13 +220,13 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
return false;
if (DISPLAY_VER(dev_priv) >= 12) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy),
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
DCC_MODE_SELECT_MASK,
DCC_MODE_SELECT_CONTINUOSLY);
}
@@ -343,13 +343,13 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
skip_phy_misc:
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy));
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
val |= DCC_MODE_SELECT_CONTINUOSLY;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 9bed1ccecea0..c65f95a9a1ec 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -29,13 +29,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include "display/intel_panel.h"
-
#include "i915_drv.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
@@ -124,7 +124,7 @@ int intel_connector_register(struct drm_connector *connector)
goto err_backlight;
}
- intel_connector_debugfs_add(connector);
+ intel_connector_debugfs_add(intel_connector);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 408f82b0dc7d..1c161eeed82f 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -251,7 +251,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -314,7 +314,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- intel_enable_pipe(crtc_state);
+ intel_enable_transcoder(crtc_state);
lpt_pch_enable(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index c7618fef0143..11842f212613 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -17,7 +17,7 @@
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_fb.h"
-
+#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
@@ -536,8 +536,10 @@ static void i9xx_update_cursor(struct intel_plane *plane,
if (DISPLAY_VER(dev_priv) >= 9)
skl_write_cursor_wm(plane, crtc_state);
- if (!intel_crtc_needs_modeset(crtc_state))
+ if (plane_state)
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ else
+ intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
@@ -637,8 +639,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* FIXME bigjoiner fastpath would be good
*/
if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner ||
- crtc_state->enable_psr2_sel_fetch)
+ crtc_state->update_pipe || crtc_state->bigjoiner)
goto slow;
/*
@@ -696,7 +697,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
goto out_free;
intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
- ORIGIN_FLIP);
+ ORIGIN_CURSOR_UPDATE);
intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
to_intel_frontbuffer(new_plane_state->hw.fb),
plane->frontbuffer_bit);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index bd184325d0c7..1dcfe31e6c6f 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -29,6 +29,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
+#include "intel_backlight.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_crtc.h"
@@ -40,6 +41,7 @@
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
+#include "intel_drrs.h"
#include "intel_dsi.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -48,7 +50,6 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
-#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_snps_phy.h"
@@ -73,24 +74,27 @@ static const u8 index_to_dp_signal_levels[] = {
};
static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_ddi_buf_trans *trans)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int n_entries, level, default_entry;
+ int level;
- n_entries = intel_ddi_hdmi_num_entries(encoder, crtc_state, &default_entry);
- if (n_entries == 0)
- return 0;
level = intel_bios_hdmi_level_shift(encoder);
if (level < 0)
- level = default_entry;
-
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
+ level = trans->hdmi_default_entry;
return level;
}
+static bool has_buf_trans_select(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+}
+
+static bool has_iboost(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. This function programs the correct values for
@@ -103,22 +107,22 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
- const struct intel_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *trans;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (has_iboost(dev_priv) &&
intel_bios_encoder_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
- ddi_translations->entries[i].hsw.trans1 | iboost_bit);
+ trans->entries[i].hsw.trans1 | iboost_bit);
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
- ddi_translations->entries[i].hsw.trans2);
+ trans->entries[i].hsw.trans2);
}
}
@@ -128,31 +132,29 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
* HDMI/DVI use cases.
*/
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
enum port port = encoder->port;
- const struct intel_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *trans;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (has_iboost(dev_priv) &&
intel_bios_encoder_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
- ddi_translations->entries[level].hsw.trans1 | iboost_bit);
+ trans->entries[level].hsw.trans1 | iboost_bit);
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
- ddi_translations->entries[level].hsw.trans2);
+ trans->entries[level].hsw.trans2);
}
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -281,13 +283,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(i915, encoder->port);
+ /* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
intel_dp->DP = dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
- intel_dp->DP |= DDI_PORT_WIDTH(crtc_state->lane_count);
+ DDI_PORT_WIDTH(crtc_state->lane_count) |
+ DDI_BUF_TRANS_SELECT(0);
if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
- if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
}
@@ -407,6 +410,20 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
return master_transcoder + 1;
}
+static void
+intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
+
+ if (intel_dp_is_uhbr(crtc_state))
+ val = TRANS_DP2_128B132B_CHANNEL_CODING;
+
+ intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val);
+}
+
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -488,10 +505,13 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
- temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ if (intel_dp_is_uhbr(crtc_state))
+ temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -678,8 +698,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
ret = false;
break;
- case TRANS_DDI_MODE_SELECT_FDI:
- ret = type == DRM_MODE_CONNECTOR_VGA;
+ case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
+ if (HAS_DP20(dev_priv))
+ /* 128b/132b */
+ ret = false;
+ else
+ /* FDI */
+ ret = type == DRM_MODE_CONNECTOR_VGA;
break;
default:
@@ -766,8 +791,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if ((tmp & port_mask) != ddi_select)
continue;
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
- TRANS_DDI_MODE_SELECT_DP_MST)
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST ||
+ (HAS_DP20(dev_priv) &&
+ (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B))
mst_pipe_mask |= BIT(p);
*pipe_mask |= BIT(p);
@@ -861,8 +887,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(encoder);
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -947,16 +972,14 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
if (iboost == 0) {
- const struct intel_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *trans;
int n_entries;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
- iboost = ddi_translations->entries[level].hsw.i_boost;
+ iboost = trans->entries[level].hsw.i_boost;
}
/* Make sure that the requested I_boost is valid */
@@ -971,28 +994,6 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
-static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_ddi_buf_trans *ddi_translations;
- enum port port = encoder->port;
- int n_entries;
-
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
- return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
-
- bxt_ddi_phy_set_signal_level(dev_priv, port,
- ddi_translations->entries[level].bxt.margin,
- ddi_translations->entries[level].bxt.scale,
- ddi_translations->entries[level].bxt.enable,
- ddi_translations->entries[level].bxt.deemphasis);
-}
-
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1022,33 +1023,43 @@ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
+static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_state,
+ int lane)
+{
+ if (crtc_state->port_clock > 600000)
+ return 0;
+
+ if (crtc_state->lane_count == 4)
+ return lane >= 1 ? LOADGEN_SELECT : 0;
+ else
+ return lane == 1 || lane == 2 ? LOADGEN_SELECT : 0;
+}
+
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_ddi_buf_trans *ddi_translations;
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int n_entries, ln;
u32 val;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
- intel_dp->hobl_active = is_hobl_buf_trans(ddi_translations);
+ intel_dp->hobl_active = is_hobl_buf_trans(trans);
intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
@@ -1057,52 +1068,48 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(ddi_translations->entries[level].icl.dw2_swing_sel);
- val |= SWING_SEL_LOWER(ddi_translations->entries[level].icl.dw2_swing_sel);
+ val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel);
+ val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
- for (ln = 0; ln <= 3; ln++) {
+ for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
- val |= POST_CURSOR_1(ddi_translations->entries[level].icl.dw4_post_cursor_1);
- val |= POST_CURSOR_2(ddi_translations->entries[level].icl.dw4_post_cursor_2);
- val |= CURSOR_COEFF(ddi_translations->entries[level].icl.dw4_cursor_coeff);
+ val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1);
+ val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2);
+ val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy));
val &= ~N_SCALAR_MASK;
- val |= N_SCALAR(ddi_translations->entries[level].icl.dw7_n_scalar);
+ val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar);
intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
}
-static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- int width, rate, ln;
u32 val;
-
- width = crtc_state->lane_count;
- rate = crtc_state->port_clock;
+ int ln;
/*
* 1. If port type is eDP or DP,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
@@ -1111,19 +1118,15 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* 2. Program loadgen select */
/*
- * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
+ * Program PORT_TX_DW4 depending on Bit rate and used lanes
* <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
* <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
- for (ln = 0; ln <= 3; ln++) {
+ for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
-
- if ((rate <= 600000 && width == 4 && ln >= 1) ||
- (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
- val |= LOADGEN_SELECT;
- }
+ val |= icl_combo_phy_loadgen_select(crtc_state, ln);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
@@ -1133,37 +1136,35 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(encoder, crtc_state, level);
+ icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
}
-static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- const struct intel_ddi_buf_trans *ddi_translations;
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
u32 val;
- if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
@@ -1181,13 +1182,13 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
+ trans->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
+ trans->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
}
@@ -1197,9 +1198,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
+ trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
- ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
+ trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
@@ -1207,9 +1208,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
+ trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
- ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
+ trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
@@ -1269,45 +1270,29 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
}
}
-static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_combo(dev_priv, phy))
- icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- icl_mg_phy_ddi_vswing_sequence(encoder, crtc_state, level);
-}
-
-static void
-tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- const struct intel_ddi_buf_trans *ddi_translations;
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
- if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK);
- dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control);
- dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control);
- dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot);
for (ln = 0; ln < 2; ln++) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
@@ -1329,30 +1314,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
-
- if ((intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->port_clock == 162000) ||
- (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- crtc_state->port_clock == 594000))
- val |= DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
- else
- val &= ~DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
}
}
-static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_combo(dev_priv, phy))
- icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- tgl_dkl_phy_ddi_vswing_sequence(encoder, crtc_state, level);
-}
-
static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
@@ -1371,65 +1335,63 @@ static int translate_signal_level(struct intel_dp *intel_dp,
return 0;
}
-static int intel_ddi_dp_level(struct intel_dp *intel_dp)
+static int intel_ddi_dp_level(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
{
- u8 train_set = intel_dp->train_set[0];
- u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
-
- return translate_signal_level(intel_dp, signal_levels);
-}
+ u8 train_set = intel_dp->train_set[lane];
-static void
-dg2_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
+ if (intel_dp_is_uhbr(crtc_state)) {
+ return train_set & DP_TX_FFE_PRESET_VALUE_MASK;
+ } else {
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
- intel_snps_phy_ddi_vswing_sequence(encoder, level);
+ return translate_signal_level(intel_dp, signal_levels);
+ }
}
-static void
-tgl_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+int intel_ddi_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
-
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
-}
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
+ int level, n_entries;
-static void
-icl_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ return 0;
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
-}
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ level = intel_ddi_hdmi_level(encoder, trans);
+ else
+ level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
+ lane);
-static void
-bxt_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
+ if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ level = n_entries - 1;
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
+ return level;
}
static void
-hsw_set_signal_levels(struct intel_dp *intel_dp,
+hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_dp_level(intel_dp);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
+ if (has_iboost(dev_priv))
+ skl_ddi_set_iboost(encoder, crtc_state, level);
+
+ /* HDMI ignores the rest */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return;
+
signal_levels = DDI_BUF_TRANS_SELECT(level);
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
@@ -1438,9 +1400,6 @@ hsw_set_signal_levels(struct intel_dp *intel_dp,
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- skl_ddi_set_iboost(encoder, crtc_state, level);
-
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
@@ -2059,7 +2018,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
u8 width;
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode == TC_PORT_TBT_ALT)
+ intel_tc_port_in_tbt_alt_mode(dig_port))
return;
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -2084,7 +2043,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
switch (pin_assignment) {
case 0x0:
drm_WARN_ON(&dev_priv->drm,
- dig_port->tc_mode != TC_PORT_LEGACY);
+ !intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -2329,15 +2288,19 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- int level = intel_ddi_dp_level(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count);
/*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ /*
* 1. Enable Power Wells
*
* This was handled at the beginning of intel_atomic_commit_tail(),
@@ -2353,8 +2316,7 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
/* 4. Enable IO power */
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -2374,7 +2336,8 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_enable_pipe_clock(encoder, crtc_state);
- /* 5.b Not relevant to i915 for now */
+ /* 5.b Configure transcoder for DP 2.0 128b/132b */
+ intel_ddi_config_transcoder_dp2(encoder, crtc_state);
/*
* 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
@@ -2391,21 +2354,12 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 5.e Configure voltage swing and related IO settings */
- intel_snps_phy_ddi_vswing_sequence(encoder, level);
-
- /*
- * 5.f Configure and enable DDI_BUF_CTL
- * 5.g Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
- * after 1200 us.
- *
- * We only configure what the register value will be here. Actual
- * enabling happens during link training farther down.
- */
- intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+ encoder->set_signal_levels(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -2413,6 +2367,8 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
* training
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
/*
* 5.h Follow DisplayPort specification training sequence (see notes for
@@ -2439,16 +2395,20 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- int level = intel_ddi_dp_level(intel_dp);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
crtc_state->lane_count);
/*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ /*
* 1. Enable Power Wells
*
* This was handled at the beginning of intel_atomic_commit_tail(),
@@ -2476,8 +2436,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -2517,7 +2476,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 7.e Configure voltage swing and related IO settings */
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
+ encoder->set_signal_levels(encoder, crtc_state);
/*
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
@@ -2530,16 +2489,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_mso_configure(crtc_state);
- /*
- * 7.g Configure and enable DDI_BUF_CTL
- * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
- * after 500 us.
- *
- * We only configure what the register value will be here. Actual
- * enabling happens during link training farther down.
- */
- intel_ddi_init_dp_buf_reg(encoder, crtc_state);
-
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -2582,10 +2531,8 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- int level = intel_ddi_dp_level(intel_dp);
if (DISPLAY_VER(dev_priv) < 11)
drm_WARN_ON(&dev_priv->drm,
@@ -2597,12 +2544,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state->port_clock,
crtc_state->lane_count);
+ /*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
intel_pps_on(intel_dp);
intel_ddi_enable_clock(encoder, crtc_state);
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -2610,16 +2562,13 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
- else
+ if (has_buf_trans_select(dev_priv))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
+ encoder->set_signal_levels(encoder, crtc_state);
+
intel_ddi_power_up_lanes(encoder, crtc_state);
- intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
@@ -2772,7 +2721,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (!is_mst)
intel_dp_set_infoframes(encoder, false,
@@ -2815,8 +2763,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
@@ -2862,7 +2809,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
intel_vrr_disable(old_crtc_state);
@@ -3005,12 +2952,11 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
intel_dp_stop_link_train(intel_dp, crtc_state);
intel_edp_backlight_on(crtc_state, conn_state);
- intel_psr_enable(intel_dp, crtc_state, conn_state);
if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_edp_drrs_enable(intel_dp, crtc_state);
+ intel_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
@@ -3046,7 +2992,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
- int level = intel_ddi_hdmi_level(encoder, crtc_state);
enum port port = encoder->port;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -3056,19 +3001,10 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (IS_DG2(dev_priv))
- intel_snps_phy_ddi_vswing_sequence(encoder, U32_MAX);
- else if (DISPLAY_VER(dev_priv) >= 12)
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level);
+ if (has_buf_trans_select(dev_priv))
+ hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- skl_ddi_set_iboost(encoder, crtc_state, level);
+ encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
@@ -3133,7 +3069,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_vrr_enable(encoder, crtc_state);
- intel_enable_pipe(crtc_state);
+ intel_enable_transcoder(crtc_state);
intel_crtc_vblank_on(crtc_state);
@@ -3198,7 +3134,7 @@ static void intel_pre_disable_ddi(struct intel_atomic_state *state,
return;
intel_dp = enc_to_intel_dp(encoder);
- intel_edp_drrs_disable(intel_dp, old_crtc_state);
+ intel_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
}
@@ -3226,11 +3162,10 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
intel_ddi_set_dp_msa(crtc_state, conn_state);
- intel_psr_update(intel_dp, crtc_state, conn_state);
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_edp_drrs_update(intel_dp, crtc_state);
+ intel_drrs_update(intel_dp, crtc_state);
- intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
+ intel_backlight_update(state, encoder, crtc_state, conn_state);
}
void intel_ddi_update_pipe(struct intel_atomic_state *state,
@@ -3293,7 +3228,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
intel_ddi_main_link_aux_domain(dig_port));
}
- if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (is_tc_port && !intel_tc_port_in_tbt_alt_mode(dig_port))
/*
* Program the lane count for static/dynamic connections on
* Type-C ports. Skip this step for TBT.
@@ -3553,9 +3488,6 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
break;
- case TRANS_DDI_MODE_SELECT_FDI:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- break;
case TRANS_DDI_MODE_SELECT_DP_SST:
if (encoder->type == INTEL_OUTPUT_EDP)
pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
@@ -3584,6 +3516,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
+ case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
+ if (!HAS_DP20(dev_priv)) {
+ /* FDI */
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ break;
+ }
+ fallthrough; /* 128b/132b */
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
@@ -3995,13 +3934,15 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp_encoder_flush_work(encoder);
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_flush_work(dig_port);
intel_display_power_flush_work(i915);
drm_encoder_cleanup(encoder);
- if (dig_port)
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
@@ -4022,7 +3963,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4035,17 +3975,6 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
dig_port->dp.set_link_train = intel_ddi_set_link_train;
dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
- if (IS_DG2(dev_priv))
- dig_port->dp.set_signal_levels = dg2_set_signal_levels;
- else if (DISPLAY_VER(dev_priv) >= 12)
- dig_port->dp.set_signal_levels = tgl_set_signal_levels;
- else if (DISPLAY_VER(dev_priv) >= 11)
- dig_port->dp.set_signal_levels = icl_set_signal_levels;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dig_port->dp.set_signal_levels = bxt_set_signal_levels;
- else
- dig_port->dp.set_signal_levels = hsw_set_signal_levels;
-
dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
@@ -4421,7 +4350,7 @@ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
if (!intel_phy_is_tc(i915, phy))
return;
- intel_tc_port_disconnect_phy(dig_port);
+ intel_tc_port_flush_work(dig_port);
}
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
@@ -4436,7 +4365,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
if (!intel_phy_is_tc(i915, phy))
return;
- intel_tc_port_disconnect_phy(dig_port);
+ intel_tc_port_flush_work(dig_port);
}
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
@@ -4617,6 +4546,24 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_config = hsw_ddi_get_config;
}
+ if (IS_DG2(dev_priv)) {
+ encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
+ else
+ encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
+ else
+ encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels;
+ } else {
+ encoder->set_signal_levels = hsw_set_signal_levels;
+ }
+
intel_ddi_buf_trans_init(encoder);
if (DISPLAY_VER(dev_priv) >= 13)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 7d448485d887..d6971717ef9c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -59,13 +59,12 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
-u32 bxt_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-u32 ddi_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int intel_ddi_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int lane);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index ba2c08f1a797..78cd8f77b49d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -8,12 +8,13 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
*/
-static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _hsw_trans_dp[] = {
{ .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } },
{ .hsw = { 0x00C30FFF, 0x00040006, 0x0 } },
@@ -25,12 +26,12 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = {
{ .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } },
};
-static const struct intel_ddi_buf_trans hsw_ddi_translations_dp = {
- .entries = _hsw_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_hsw_ddi_translations_dp),
+static const struct intel_ddi_buf_trans hsw_trans_dp = {
+ .entries = _hsw_trans_dp,
+ .num_entries = ARRAY_SIZE(_hsw_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = {
+static const union intel_ddi_buf_trans_entry _hsw_trans_fdi[] = {
{ .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } },
{ .hsw = { 0x00C30FFF, 0x00060006, 0x0 } },
@@ -42,12 +43,12 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = {
{ .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } },
};
-static const struct intel_ddi_buf_trans hsw_ddi_translations_fdi = {
- .entries = _hsw_ddi_translations_fdi,
- .num_entries = ARRAY_SIZE(_hsw_ddi_translations_fdi),
+static const struct intel_ddi_buf_trans hsw_trans_fdi = {
+ .entries = _hsw_trans_fdi,
+ .num_entries = ARRAY_SIZE(_hsw_trans_fdi),
};
-static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _hsw_trans_hdmi[] = {
/* Idx NT mV d T mV d db */
{ .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */
{ .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */
@@ -63,13 +64,13 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = {
{ .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */
};
-static const struct intel_ddi_buf_trans hsw_ddi_translations_hdmi = {
- .entries = _hsw_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_hsw_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans hsw_trans_hdmi = {
+ .entries = _hsw_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_hsw_trans_hdmi),
.hdmi_default_entry = 6,
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_edp[] = {
{ .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } },
{ .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } },
{ .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } },
@@ -81,12 +82,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = {
{ .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } },
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_edp = {
- .entries = _bdw_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_edp),
+static const struct intel_ddi_buf_trans bdw_trans_edp = {
+ .entries = _bdw_trans_edp,
+ .num_entries = ARRAY_SIZE(_bdw_trans_edp),
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_dp[] = {
{ .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } },
{ .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } },
@@ -98,12 +99,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = {
{ .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } },
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_dp = {
- .entries = _bdw_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_dp),
+static const struct intel_ddi_buf_trans bdw_trans_dp = {
+ .entries = _bdw_trans_dp,
+ .num_entries = ARRAY_SIZE(_bdw_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_fdi[] = {
{ .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } },
{ .hsw = { 0x00C30FFF, 0x00070006, 0x0 } },
@@ -115,12 +116,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = {
{ .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } },
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_fdi = {
- .entries = _bdw_ddi_translations_fdi,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_fdi),
+static const struct intel_ddi_buf_trans bdw_trans_fdi = {
+ .entries = _bdw_trans_fdi,
+ .num_entries = ARRAY_SIZE(_bdw_trans_fdi),
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_hdmi[] = {
/* Idx NT mV d T mV df db */
{ .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */
{ .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */
@@ -134,14 +135,14 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = {
{ .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_hdmi = {
- .entries = _bdw_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans bdw_trans_hdmi = {
+ .entries = _bdw_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_bdw_trans_hdmi),
.hdmi_default_entry = 7,
};
/* Skylake H and S */
-static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _skl_trans_dp[] = {
{ .hsw = { 0x00002016, 0x000000A0, 0x0 } },
{ .hsw = { 0x00005012, 0x0000009B, 0x0 } },
{ .hsw = { 0x00007011, 0x00000088, 0x0 } },
@@ -153,13 +154,13 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans skl_ddi_translations_dp = {
- .entries = _skl_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_skl_ddi_translations_dp),
+static const struct intel_ddi_buf_trans skl_trans_dp = {
+ .entries = _skl_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_trans_dp),
};
/* Skylake U */
-static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _skl_u_trans_dp[] = {
{ .hsw = { 0x0000201B, 0x000000A2, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x1 } },
@@ -171,13 +172,13 @@ static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans skl_u_ddi_translations_dp = {
- .entries = _skl_u_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_dp),
+static const struct intel_ddi_buf_trans skl_u_trans_dp = {
+ .entries = _skl_u_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_u_trans_dp),
};
/* Skylake Y */
-static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_trans_dp[] = {
{ .hsw = { 0x00000018, 0x000000A2, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x3 } },
@@ -189,13 +190,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans skl_y_ddi_translations_dp = {
- .entries = _skl_y_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_dp),
+static const struct intel_ddi_buf_trans skl_y_trans_dp = {
+ .entries = _skl_y_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_dp),
};
/* Kabylake H and S */
-static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _kbl_trans_dp[] = {
{ .hsw = { 0x00002016, 0x000000A0, 0x0 } },
{ .hsw = { 0x00005012, 0x0000009B, 0x0 } },
{ .hsw = { 0x00007011, 0x00000088, 0x0 } },
@@ -207,13 +208,13 @@ static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans kbl_ddi_translations_dp = {
- .entries = _kbl_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_kbl_ddi_translations_dp),
+static const struct intel_ddi_buf_trans kbl_trans_dp = {
+ .entries = _kbl_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_trans_dp),
};
/* Kabylake U */
-static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _kbl_u_trans_dp[] = {
{ .hsw = { 0x0000201B, 0x000000A1, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x3 } },
@@ -225,13 +226,13 @@ static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans kbl_u_ddi_translations_dp = {
- .entries = _kbl_u_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_kbl_u_ddi_translations_dp),
+static const struct intel_ddi_buf_trans kbl_u_trans_dp = {
+ .entries = _kbl_u_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_u_trans_dp),
};
/* Kabylake Y */
-static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _kbl_y_trans_dp[] = {
{ .hsw = { 0x00001017, 0x000000A1, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x3 } },
@@ -243,16 +244,16 @@ static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans kbl_y_ddi_translations_dp = {
- .entries = _kbl_y_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_kbl_y_ddi_translations_dp),
+static const struct intel_ddi_buf_trans kbl_y_trans_dp = {
+ .entries = _kbl_y_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_y_trans_dp),
};
/*
* Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
-static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_trans_edp[] = {
{ .hsw = { 0x00000018, 0x000000A8, 0x0 } },
{ .hsw = { 0x00004013, 0x000000A9, 0x0 } },
{ .hsw = { 0x00007011, 0x000000A2, 0x0 } },
@@ -265,16 +266,16 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = {
{ .hsw = { 0x00000018, 0x000000DF, 0x0 } },
};
-static const struct intel_ddi_buf_trans skl_ddi_translations_edp = {
- .entries = _skl_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_skl_ddi_translations_edp),
+static const struct intel_ddi_buf_trans skl_trans_edp = {
+ .entries = _skl_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_trans_edp),
};
/*
* Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
-static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_u_trans_edp[] = {
{ .hsw = { 0x00000018, 0x000000A8, 0x0 } },
{ .hsw = { 0x00004013, 0x000000A9, 0x0 } },
{ .hsw = { 0x00007011, 0x000000A2, 0x0 } },
@@ -287,16 +288,16 @@ static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = {
{ .hsw = { 0x00000018, 0x000000DF, 0x0 } },
};
-static const struct intel_ddi_buf_trans skl_u_ddi_translations_edp = {
- .entries = _skl_u_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_edp),
+static const struct intel_ddi_buf_trans skl_u_trans_edp = {
+ .entries = _skl_u_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_u_trans_edp),
};
/*
* Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
-static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_trans_edp[] = {
{ .hsw = { 0x00000018, 0x000000A8, 0x0 } },
{ .hsw = { 0x00004013, 0x000000AB, 0x0 } },
{ .hsw = { 0x00007011, 0x000000A4, 0x0 } },
@@ -309,13 +310,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = {
{ .hsw = { 0x00000018, 0x0000008A, 0x0 } },
};
-static const struct intel_ddi_buf_trans skl_y_ddi_translations_edp = {
- .entries = _skl_y_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_edp),
+static const struct intel_ddi_buf_trans skl_y_trans_edp = {
+ .entries = _skl_y_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_edp),
};
/* Skylake/Kabylake U, H and S */
-static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _skl_trans_hdmi[] = {
{ .hsw = { 0x00000018, 0x000000AC, 0x0 } },
{ .hsw = { 0x00005012, 0x0000009D, 0x0 } },
{ .hsw = { 0x00007011, 0x00000088, 0x0 } },
@@ -329,14 +330,14 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = {
{ .hsw = { 0x80000018, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans skl_ddi_translations_hdmi = {
- .entries = _skl_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_skl_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans skl_trans_hdmi = {
+ .entries = _skl_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_trans_hdmi),
.hdmi_default_entry = 8,
};
/* Skylake/Kabylake Y */
-static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_trans_hdmi[] = {
{ .hsw = { 0x00000018, 0x000000A1, 0x0 } },
{ .hsw = { 0x00005012, 0x000000DF, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CB, 0x3 } },
@@ -350,13 +351,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = {
{ .hsw = { 0x80000018, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans skl_y_ddi_translations_hdmi = {
- .entries = _skl_y_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans skl_y_trans_hdmi = {
+ .entries = _skl_y_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_hdmi),
.hdmi_default_entry = 8,
};
-static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _bxt_trans_dp[] = {
/* Idx NT mV diff db */
{ .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
{ .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
@@ -370,12 +371,12 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = {
{ .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
};
-static const struct intel_ddi_buf_trans bxt_ddi_translations_dp = {
- .entries = _bxt_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_bxt_ddi_translations_dp),
+static const struct intel_ddi_buf_trans bxt_trans_dp = {
+ .entries = _bxt_trans_dp,
+ .num_entries = ARRAY_SIZE(_bxt_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _bxt_trans_edp[] = {
/* Idx NT mV diff db */
{ .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */
{ .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */
@@ -389,15 +390,15 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = {
{ .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */
};
-static const struct intel_ddi_buf_trans bxt_ddi_translations_edp = {
- .entries = _bxt_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_bxt_ddi_translations_edp),
+static const struct intel_ddi_buf_trans bxt_trans_edp = {
+ .entries = _bxt_trans_edp,
+ .num_entries = ARRAY_SIZE(_bxt_trans_edp),
};
/* BSpec has 2 recommended values - entries 0 and 8.
* Using the entry with higher vswing.
*/
-static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _bxt_trans_hdmi[] = {
/* Idx NT mV diff db */
{ .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
{ .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
@@ -411,14 +412,14 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = {
{ .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
};
-static const struct intel_ddi_buf_trans bxt_ddi_translations_hdmi = {
- .entries = _bxt_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_bxt_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_bxt_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans bxt_trans_hdmi = {
+ .entries = _bxt_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_bxt_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_bxt_trans_hdmi) - 1,
};
-/* icl_combo_phy_ddi_translations */
-static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3[] = {
+/* icl_combo_phy_trans */
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_dp_hbr2_edp_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -432,12 +433,12 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3 = {
- .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3),
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_dp_hbr2_edp_hbr3 = {
+ .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3),
};
-static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
@@ -451,12 +452,12 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp
{ .icl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _icl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_edp_hbr2 = {
+ .entries = _icl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_hdmi[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
{ .icl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */
@@ -467,13 +468,13 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdm
{ .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
};
-static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = {
- .entries = _icl_combo_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = {
+ .entries = _icl_combo_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
@@ -487,12 +488,12 @@ static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = {
- .entries = _ehl_combo_phy_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_dp),
+static const struct intel_ddi_buf_trans ehl_combo_phy_trans_dp = {
+ .entries = _ehl_combo_phy_trans_dp,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
@@ -506,12 +507,12 @@ static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _ehl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans ehl_combo_phy_trans_edp_hbr2 = {
+ .entries = _ehl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
@@ -525,12 +526,12 @@ static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr = {
- .entries = _jsl_combo_phy_ddi_translations_edp_hbr,
- .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr),
+static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr = {
+ .entries = _jsl_combo_phy_trans_edp_hbr,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr),
};
-static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
@@ -544,12 +545,12 @@ static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _jsl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr2 = {
+ .entries = _jsl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_rbr_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
@@ -563,12 +564,12 @@ static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr = {
- .entries = _dg1_combo_phy_ddi_translations_dp_rbr_hbr,
- .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_rbr_hbr),
+static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_rbr_hbr = {
+ .entries = _dg1_combo_phy_trans_dp_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_rbr_hbr),
};
-static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
@@ -582,12 +583,12 @@ static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _dg1_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hbr[] = {
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_rbr_hbr[] = {
/* Voltage swing pre-emphasis */
{ .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
{ .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
@@ -601,12 +602,12 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hb
{ .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
};
-static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr = {
- .entries = _icl_mg_phy_ddi_translations_rbr_hbr,
- .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_rbr_hbr),
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_rbr_hbr = {
+ .entries = _icl_mg_phy_trans_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_rbr_hbr),
};
-static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hbr2_hbr3[] = {
/* Voltage swing pre-emphasis */
{ .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
{ .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
@@ -620,12 +621,12 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_h
{ .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
};
-static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3 = {
- .entries = _icl_mg_phy_ddi_translations_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hbr2_hbr3),
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_hbr2_hbr3 = {
+ .entries = _icl_mg_phy_trans_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hdmi[] = {
/* HDMI Preset VS Pre-emph */
{ .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */
{ .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */
@@ -639,13 +640,13 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[]
{ .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */
};
-static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi = {
- .entries = _icl_mg_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_hdmi = {
+ .entries = _icl_mg_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
@@ -659,12 +660,12 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hb
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
};
-static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr = {
- .entries = _tgl_dkl_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr = {
+ .entries = _tgl_dkl_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr2[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
@@ -678,12 +679,12 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hb
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
};
-static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr2 = {
- .entries = _tgl_dkl_phy_ddi_translations_dp_hbr2,
- .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr2),
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr2 = {
+ .entries = _tgl_dkl_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_hdmi[] = {
/* HDMI Preset VS Pre-emph */
{ .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */
{ .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */
@@ -697,13 +698,13 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[
{ .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */
};
-static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_hdmi = {
- .entries = _tgl_dkl_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_hdmi = {
+ .entries = _tgl_dkl_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -717,12 +718,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr = {
- .entries = _tgl_combo_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr = {
+ .entries = _tgl_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -736,12 +737,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 = {
- .entries = _tgl_combo_phy_ddi_translations_dp_hbr2,
- .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr2),
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr2 = {
+ .entries = _tgl_combo_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_trans_dp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
@@ -755,16 +756,16 @@ static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2 = {
- .entries = _tgl_uy_combo_phy_ddi_translations_dp_hbr2,
- .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_ddi_translations_dp_hbr2),
+static const struct intel_ddi_buf_trans tgl_uy_combo_phy_trans_dp_hbr2 = {
+ .entries = _tgl_uy_combo_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_trans_dp_hbr2),
};
/*
* Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
* that DisplayPort specification requires
*/
-static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_edp_hbr2_hobl[] = {
/* VS pre-emp */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */
@@ -777,12 +778,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */
};
-static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl = {
- .entries = _tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
- .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_edp_hbr2_hobl),
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_edp_hbr2_hobl = {
+ .entries = _tgl_combo_phy_trans_edp_hbr2_hobl,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_edp_hbr2_hobl),
};
-static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -796,12 +797,12 @@ static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr = {
- .entries = _rkl_combo_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr = {
+ .entries = _rkl_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
@@ -815,12 +816,12 @@ static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _rkl_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -834,12 +835,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _adls_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adls_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_dp_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x9, 0x73, 0x3D, 0x00, 0x02 } }, /* 200 200 0.0 */
{ .icl = { 0x9, 0x7A, 0x3C, 0x00, 0x03 } }, /* 200 250 1.9 */
@@ -853,12 +854,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_ed
{ .icl = { 0x4, 0x6C, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _adls_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr2 = {
+ .entries = _adls_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -872,12 +873,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_ed
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3 = {
- .entries = _adls_combo_phy_ddi_translations_edp_hbr3,
- .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr3),
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = {
+ .entries = _adls_combo_phy_trans_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_hdmi[] = {
/* NT mV Trans mV db */
{ .icl = { 0x6, 0x60, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
{ .icl = { 0x6, 0x68, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
@@ -891,13 +892,13 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hd
{ .icl = { 0xB, 0x7F, 0x33, 0x00, 0x0C } }, /* Full Red -3.0 */
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_hdmi = {
- .entries = _adlp_combo_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_hdmi = {
+ .entries = _adlp_combo_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -911,12 +912,12 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr = {
- .entries = _adlp_combo_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr = {
+ .entries = _adlp_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -930,22 +931,22 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adlp_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_hbr3),
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_hbr3 = {
- .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_hbr3 = {
+ .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3),
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_up_to_hbr2 = {
- .entries = _icl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_up_to_hbr2 = {
+ .entries = _icl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */
@@ -959,12 +960,12 @@ static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_h
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
};
-static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr = {
- .entries = _adlp_dkl_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr = {
+ .entries = _adlp_dkl_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr2_hbr3[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */
@@ -978,21 +979,64 @@ static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_h
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
};
-static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adlp_dkl_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = {
+ { .snps = { 26, 0, 0 } }, /* VS 0, pre-emph 0 */
+ { .snps = { 33, 0, 6 } }, /* VS 0, pre-emph 1 */
+ { .snps = { 38, 0, 12 } }, /* VS 0, pre-emph 2 */
+ { .snps = { 43, 0, 19 } }, /* VS 0, pre-emph 3 */
+ { .snps = { 39, 0, 0 } }, /* VS 1, pre-emph 0 */
+ { .snps = { 44, 0, 8 } }, /* VS 1, pre-emph 1 */
+ { .snps = { 47, 0, 15 } }, /* VS 1, pre-emph 2 */
+ { .snps = { 52, 0, 0 } }, /* VS 2, pre-emph 0 */
+ { .snps = { 51, 0, 10 } }, /* VS 2, pre-emph 1 */
+ { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */
+};
+
+static const struct intel_ddi_buf_trans dg2_snps_trans = {
+ .entries = _dg2_snps_trans,
+ .num_entries = ARRAY_SIZE(_dg2_snps_trans),
+ .hdmi_default_entry = ARRAY_SIZE(_dg2_snps_trans) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = {
+ { .snps = { 62, 0, 0 } }, /* preset 0 */
+ { .snps = { 56, 0, 6 } }, /* preset 1 */
+ { .snps = { 51, 0, 11 } }, /* preset 2 */
+ { .snps = { 48, 0, 14 } }, /* preset 3 */
+ { .snps = { 43, 0, 19 } }, /* preset 4 */
+ { .snps = { 59, 3, 0 } }, /* preset 5 */
+ { .snps = { 53, 3, 6 } }, /* preset 6 */
+ { .snps = { 49, 3, 10 } }, /* preset 7 */
+ { .snps = { 45, 3, 14 } }, /* preset 8 */
+ { .snps = { 42, 3, 17 } }, /* preset 9 */
+ { .snps = { 56, 6, 0 } }, /* preset 10 */
+ { .snps = { 50, 6, 6 } }, /* preset 11 */
+ { .snps = { 47, 6, 9 } }, /* preset 12 */
+ { .snps = { 42, 6, 14 } }, /* preset 13 */
+ { .snps = { 46, 8, 8 } }, /* preset 14 */
+ { .snps = { 56, 3, 3 } }, /* preset 15 */
+};
+
+static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = {
+ .entries = _dg2_snps_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr),
};
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
- return table == &tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+ return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
}
static const struct intel_ddi_buf_trans *
-intel_get_buf_trans(const struct intel_ddi_buf_trans *ddi_translations, int *num_entries)
+intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries)
{
- *num_entries = ddi_translations->num_entries;
- return ddi_translations;
+ *num_entries = trans->num_entries;
+ return trans;
}
static const struct intel_ddi_buf_trans *
@@ -1001,11 +1045,11 @@ hsw_get_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- return intel_get_buf_trans(&hsw_ddi_translations_fdi, n_entries);
+ return intel_get_buf_trans(&hsw_trans_fdi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&hsw_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&hsw_trans_hdmi, n_entries);
else
- return intel_get_buf_trans(&hsw_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&hsw_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1016,14 +1060,14 @@ bdw_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- return intel_get_buf_trans(&bdw_ddi_translations_fdi, n_entries);
+ return intel_get_buf_trans(&bdw_trans_fdi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&bdw_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&bdw_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return intel_get_buf_trans(&bdw_ddi_translations_edp, n_entries);
+ return intel_get_buf_trans(&bdw_trans_edp, n_entries);
else
- return intel_get_buf_trans(&bdw_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&bdw_trans_dp, n_entries);
}
static int skl_buf_trans_num_entries(enum port port, int n_entries)
@@ -1037,12 +1081,12 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
static const struct intel_ddi_buf_trans *
_skl_get_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_ddi_buf_trans *ddi_translations,
+ const struct intel_ddi_buf_trans *trans,
int *n_entries)
{
- ddi_translations = intel_get_buf_trans(ddi_translations, n_entries);
+ trans = intel_get_buf_trans(trans, n_entries);
*n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
+ return trans;
}
static const struct intel_ddi_buf_trans *
@@ -1053,12 +1097,12 @@ skl_y_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1069,12 +1113,12 @@ skl_u_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1085,12 +1129,12 @@ skl_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1101,12 +1145,12 @@ kbl_y_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &kbl_y_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1117,12 +1161,12 @@ kbl_u_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &kbl_u_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1133,12 +1177,12 @@ kbl_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &kbl_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1149,12 +1193,12 @@ bxt_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&bxt_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&bxt_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return intel_get_buf_trans(&bxt_ddi_translations_edp, n_entries);
+ return intel_get_buf_trans(&bxt_trans_edp, n_entries);
else
- return intel_get_buf_trans(&bxt_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&bxt_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1162,7 +1206,7 @@ icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
}
@@ -1174,10 +1218,10 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1190,7 +1234,7 @@ icl_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1203,10 +1247,10 @@ icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hbr2_hbr3,
+ return intel_get_buf_trans(&icl_mg_phy_trans_hbr2_hbr3,
n_entries);
} else {
- return intel_get_buf_trans(&icl_mg_phy_ddi_translations_rbr_hbr,
+ return intel_get_buf_trans(&icl_mg_phy_trans_rbr_hbr,
n_entries);
}
}
@@ -1217,7 +1261,7 @@ icl_get_mg_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_mg_phy_trans_hdmi, n_entries);
else
return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1228,9 +1272,9 @@ ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&ehl_combo_phy_trans_edp_hbr2, n_entries);
else
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1241,12 +1285,12 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
dev_priv->vbt.edp.low_vswing)
return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
- return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1255,9 +1299,9 @@ jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr2, n_entries);
else
- return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr, n_entries);
+ return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1268,12 +1312,12 @@ jsl_get_combo_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
dev_priv->vbt.edp.low_vswing)
return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1285,14 +1329,14 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
if (crtc_state->port_clock > 270000) {
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- return intel_get_buf_trans(&tgl_uy_combo_phy_ddi_translations_dp_hbr2,
+ return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
n_entries);
} else {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr2,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr2,
n_entries);
}
} else {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr,
n_entries);
}
}
@@ -1306,13 +1350,13 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1325,7 +1369,7 @@ tgl_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1338,10 +1382,10 @@ dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_hbr2_hbr3,
+ return intel_get_buf_trans(&dg1_combo_phy_trans_dp_hbr2_hbr3,
n_entries);
else
- return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_rbr_hbr,
+ return intel_get_buf_trans(&dg1_combo_phy_trans_dp_rbr_hbr,
n_entries);
}
@@ -1354,13 +1398,13 @@ dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000)
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed)
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
else if (dev_priv->vbt.edp.low_vswing)
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
else
return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1372,7 +1416,7 @@ dg1_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1385,9 +1429,9 @@ rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr2_hbr3, n_entries);
else
- return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr, n_entries);
+ return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1399,13 +1443,13 @@ rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1418,7 +1462,7 @@ rkl_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1431,9 +1475,9 @@ adls_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&adls_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ return intel_get_buf_trans(&adls_combo_phy_trans_dp_hbr2_hbr3, n_entries);
else
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, n_entries);
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1445,11 +1489,11 @@ adls_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000)
- return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr3, n_entries);
+ return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries);
else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed)
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, n_entries);
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries);
else if (i915->vbt.edp.low_vswing)
- return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries);
else
return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1460,7 +1504,7 @@ adls_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1473,9 +1517,9 @@ adlp_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr2_hbr3, n_entries);
else
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr, n_entries);
+ return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1487,13 +1531,13 @@ adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_hbr3,
+ return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_up_to_hbr2,
+ return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2,
n_entries);
}
@@ -1506,7 +1550,7 @@ adlp_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&adlp_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1519,10 +1563,10 @@ tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr2,
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr2,
n_entries);
} else {
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr,
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr,
n_entries);
}
}
@@ -1533,7 +1577,7 @@ tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries);
else
return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1544,10 +1588,10 @@ adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3,
+ return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr2_hbr3,
n_entries);
} else {
- return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr,
+ return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr,
n_entries);
}
}
@@ -1558,29 +1602,21 @@ adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries);
else
return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
-int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *default_entry)
+static const struct intel_ddi_buf_trans *
+dg2_get_snps_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_ddi_buf_trans *ddi_translations;
- int n_entries;
-
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
-
- if (drm_WARN_ON(&dev_priv->drm, !ddi_translations)) {
- *default_entry = 0;
- return 0;
- }
-
- *default_entry = ddi_translations->hdmi_default_entry;
-
- return n_entries;
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&dg2_snps_trans_uhbr, n_entries);
+ else
+ return intel_get_buf_trans(&dg2_snps_trans, n_entries);
}
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1588,7 +1624,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (IS_ALDERLAKE_P(i915)) {
+ if (IS_DG2(i915)) {
+ encoder->get_buf_trans = dg2_get_snps_buf_trans;
+ } else if (IS_ALDERLAKE_P(i915)) {
if (intel_phy_is_combo(i915, phy))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 2acd720f9d4f..2133984a572b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -34,15 +34,21 @@ struct icl_ddi_buf_trans {
};
struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_11_6;
- u32 cri_txdeemph_override_5_0;
- u32 cri_txdeemph_override_17_12;
+ u8 cri_txdeemph_override_11_6;
+ u8 cri_txdeemph_override_5_0;
+ u8 cri_txdeemph_override_17_12;
};
struct tgl_dkl_phy_ddi_buf_trans {
- u32 dkl_vswing_control;
- u32 dkl_preshoot_control;
- u32 dkl_de_emphasis_control;
+ u8 vswing;
+ u8 preshoot;
+ u8 de_emphasis;
+};
+
+struct dg2_snps_phy_buf_trans {
+ u8 vswing;
+ u8 pre_cursor;
+ u8 post_cursor;
};
union intel_ddi_buf_trans_entry {
@@ -51,6 +57,7 @@ union intel_ddi_buf_trans_entry {
struct icl_ddi_buf_trans icl;
struct icl_mg_phy_ddi_buf_trans mg;
struct tgl_dkl_phy_ddi_buf_trans dkl;
+ struct dg2_snps_phy_buf_trans snps;
};
struct intel_ddi_buf_trans {
@@ -61,10 +68,6 @@ struct intel_ddi_buf_trans {
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table);
-int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *default_entry);
-
void intel_ddi_buf_trans_init(struct intel_encoder *encoder);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 17f44ffea586..ff598b6cd953 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -52,6 +52,7 @@
#include "display/intel_dp_mst.h"
#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_drrs.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_fb.h"
@@ -67,9 +68,10 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
-#include "gt/intel_rps.h"
#include "gt/gen8_ppgtt.h"
+#include "pxp/intel_pxp.h"
+
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "i915_drv.h"
@@ -84,35 +86,37 @@
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
+#include "intel_dpt.h"
#include "intel_fbc.h"
-#include "intel_fdi.h"
#include "intel_fbdev.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_overlay.h"
+#include "intel_panel.h"
+#include "intel_pcode.h"
#include "intel_pipe_crc.h"
+#include "intel_plane_initial.h"
#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sideband.h"
+#include "intel_sbi.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_sideband.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ilk_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_framebuffer_init(struct intel_framebuffer *ifb,
- struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -120,186 +124,105 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-struct i915_dpt {
- struct i915_address_space vm;
-
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void __iomem *iomem;
-};
-
-#define i915_is_dpt(vm) ((vm)->is_dpt)
-
-static inline struct i915_dpt *
-i915_vm_to_dpt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
- GEM_BUG_ON(!i915_is_dpt(vm));
- return container_of(vm, struct i915_dpt, vm);
-}
-
-#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void dpt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+static void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
-
- gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
- vm->pte_encode(addr, level, flags));
+ if (dev_priv->wm_disp->update_wm)
+ dev_priv->wm_disp->update_wm(dev_priv);
}
-static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
- const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
- struct sgt_iter sgt_iter;
- dma_addr_t addr;
- int i;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- i = vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(&base[i++], pte_encode | addr);
-}
-
-static void dpt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
+static int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->compute_pipe_wm)
+ return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
+ return 0;
}
-static void dpt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_gem_object *obj = vma->obj;
- u32 pte_flags;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
- pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
- pte_flags |= PTE_LM;
-
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (!dev_priv->wm_disp->compute_intermediate_wm)
+ return 0;
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->wm_disp->compute_pipe_wm))
+ return 0;
+ return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
}
-static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->initial_watermarks) {
+ dev_priv->wm_disp->initial_watermarks(state, crtc);
+ return true;
+ }
+ return false;
}
-static void dpt_cleanup(struct i915_address_space *vm)
+static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_gem_object_put(dpt->obj);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->atomic_update_watermarks)
+ dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
}
-static struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb)
+static void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
- struct drm_i915_private *i915 = to_i915(obj->dev);
- struct drm_i915_gem_object *dpt_obj;
- struct i915_address_space *vm;
- struct i915_dpt *dpt;
- size_t size;
- int ret;
-
- if (intel_fb_needs_pot_stride_remap(fb))
- size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
- else
- size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
-
- size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
-
- if (HAS_LMEM(i915))
- dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
- else
- dpt_obj = i915_gem_object_create_stolen(i915, size);
- if (IS_ERR(dpt_obj))
- return ERR_CAST(dpt_obj);
-
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
- if (ret) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(ret);
- }
-
- dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
- if (!dpt) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- vm = &dpt->vm;
-
- vm->gt = &i915->gt;
- vm->i915 = i915;
- vm->dma = i915->drm.dev;
- vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- vm->is_dpt = true;
-
- i915_address_space_init(vm, VM_CLASS_DPT);
-
- vm->insert_page = dpt_insert_page;
- vm->clear_range = dpt_clear_range;
- vm->insert_entries = dpt_insert_entries;
- vm->cleanup = dpt_cleanup;
-
- vm->vma_ops.bind_vma = dpt_bind_vma;
- vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->vma_ops.set_pages = ggtt_set_pages;
- vm->vma_ops.clear_pages = clear_pages;
-
- vm->pte_encode = gen8_ggtt_pte_encode;
-
- dpt->obj = dpt_obj;
-
- return &dpt->vm;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->optimize_watermarks)
+ dev_priv->wm_disp->optimize_watermarks(state, crtc);
}
-static void intel_dpt_destroy(struct i915_address_space *vm)
+static int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vm_close(&dpt->vm);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->compute_global_watermarks)
+ return dev_priv->wm_disp->compute_global_watermarks(state);
+ return 0;
}
/* returns HPLL frequency in kHz */
@@ -359,6 +282,12 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
+static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->active_planes &
+ ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
+}
+
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -384,6 +313,15 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
+/* Wa_1604331009:icl,jsl,ehl */
+static void
+icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ enable ? CURSOR_GATING_DIS : 0);
+}
+
static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
@@ -464,168 +402,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
}
}
-/* Only for pre-ILK configs */
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, DPLL(pipe));
- cur_state = !!(val & DPLL_VCO_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-/* XXX: the dsi pll is shared between MIPI DSI ports */
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
-{
- u32 val;
- bool cur_state;
-
- vlv_cck_get(dev_priv);
- val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- vlv_cck_put(dev_priv);
-
- cur_state = val & DSI_PLL_VCO_EN;
- I915_STATE_WARN(cur_state != state,
- "DSI PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- bool cur_state;
-
- if (HAS_DDI(dev_priv)) {
- /*
- * DDI does not have a specific FDI_TX register.
- *
- * FDI is never fed from EDP transcoder
- * so pipe->transcoder cast is fine here.
- */
- enum transcoder cpu_transcoder = (enum transcoder)pipe;
- u32 val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
- } else {
- u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
- cur_state = !!(val & FDI_TX_ENABLE);
- }
- I915_STATE_WARN(cur_state != state,
- "FDI TX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
-#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
-
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
- cur_state = !!(val & FDI_RX_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "FDI RX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
-#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
-
-static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 val;
-
- /* ILK FDI PLL is always enabled */
- if (IS_IRONLAKE(dev_priv))
- return;
-
- /* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (HAS_DDI(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
- I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
-}
-
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
- cur_state = !!(val & FDI_RX_PLL_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "FDI RX PLL assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- i915_reg_t pp_reg;
- u32 val;
- enum pipe panel_pipe = INVALID_PIPE;
- bool locked = true;
-
- if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
- return;
-
- if (HAS_PCH_SPLIT(dev_priv)) {
- u32 port_sel;
-
- pp_reg = PP_CONTROL(0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
- switch (port_sel) {
- case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPA:
- g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPC:
- g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPD:
- g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
- break;
- default:
- MISSING_CASE(port_sel);
- break;
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- /* presumably write lock depends on pipe, not port select */
- pp_reg = PP_CONTROL(pipe);
- panel_pipe = pipe;
- } else {
- u32 port_sel;
-
- pp_reg = PP_CONTROL(0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
- drm_WARN_ON(&dev_priv->drm,
- port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
- }
-
- val = intel_de_read(dev_priv, pp_reg);
- if (!(val & PANEL_POWER_ON) ||
- ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
- locked = false;
-
- I915_STATE_WARN(panel_pipe == pipe && locked,
- "panel assertion failure, pipe %c regs locked\n",
- pipe_name(pipe));
-}
-
-void assert_pipe(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, bool state)
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
enum intel_display_power_domain power_domain;
@@ -942,7 +720,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1003,7 +781,7 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
intel_wait_for_pipe_scanline_moving(crtc);
}
-void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1053,69 +831,6 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
}
-unsigned int
-intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- return intel_tile_size(dev_priv);
- case I915_FORMAT_MOD_X_TILED:
- if (DISPLAY_VER(dev_priv) == 2)
- return 128;
- else
- return 512;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 128;
- fallthrough;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 64;
- fallthrough;
- case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
- return 128;
- else
- return 512;
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 128;
- fallthrough;
- case I915_FORMAT_MOD_Yf_TILED:
- switch (cpp) {
- case 1:
- return 64;
- case 2:
- case 4:
- return 128;
- case 8:
- case 16:
- return 256;
- default:
- MISSING_CASE(cpp);
- return cpp;
- }
- break;
- default:
- MISSING_CASE(fb->modifier);
- return cpp;
- }
-}
-
-unsigned int
-intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height)
-{
- unsigned int tile_height = intel_tile_height(fb, color_plane);
-
- return ALIGN(height, tile_height);
-}
-
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
unsigned int size = 0;
@@ -1132,82 +847,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
unsigned int size = 0;
int i;
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ if (rem_info->plane_alignment)
+ size = ALIGN(size, rem_info->plane_alignment);
size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
-
- return size;
-}
-
-static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9)
- return 256 * 1024;
- else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return 128 * 1024;
- else if (DISPLAY_VER(dev_priv) >= 4)
- return 4 * 1024;
- else
- return 0;
-}
-
-static bool has_async_flips(struct drm_i915_private *i915)
-{
- return DISPLAY_VER(i915) >= 5;
-}
-
-unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
-
- if (intel_fb_uses_dpt(fb))
- return 512 * 4096;
-
- /* AUX_DIST needs only 4K alignment */
- if (is_ccs_plane(fb, color_plane))
- return 4096;
-
- if (is_semiplanar_uv_plane(fb, color_plane)) {
- /*
- * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
- * alignment for linear UV planes on all platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return intel_linear_alignment(dev_priv);
-
- return intel_tile_row_size(fb, color_plane);
- }
-
- return 4096;
}
- drm_WARN_ON(&dev_priv->drm, color_plane != 0);
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- return intel_linear_alignment(dev_priv);
- case I915_FORMAT_MOD_X_TILED:
- if (has_async_flips(dev_priv))
- return 256 * 1024;
- return 0;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return 16 * 1024;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- return 1 * 1024 * 1024;
- default:
- MISSING_CASE(fb->modifier);
- return 0;
- }
+ return size;
}
-static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1217,198 +866,6 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
-static struct i915_vma *
-intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags,
- struct i915_address_space *vm)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_vma *vma;
- u32 alignment;
- int ret;
-
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- alignment = 4096 * 512;
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
-
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
- }
-
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
-
- i915_gem_object_flush_if_display(obj);
-
- i915_vma_get(vma);
-err:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- return vma;
-}
-
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- intel_wakeref_t wakeref;
- struct i915_gem_ww_ctx ww;
- struct i915_vma *vma;
- unsigned int pinctl;
- u32 alignment;
- int ret;
-
- if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- if (phys_cursor)
- alignment = intel_cursor_alignment(dev_priv);
- else
- alignment = intel_surf_alignment(fb, 0);
- if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
- return ERR_PTR(-EINVAL);
-
- /* Note that the w/a also requires 64 PTE of padding following the
- * bo. We currently fill all unused PTE with the shadow page and so
- * we should always have valid PTE following the scanout preventing
- * the VT-d warning.
- */
- if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
- alignment = 256 * 1024;
-
- /*
- * Global gtt pte registers are special registers which actually forward
- * writes to a chunk of system memory. Which means that there is no risk
- * that the register values disappear as soon as we call
- * intel_runtime_pm_put(), so it is correct to wrap only the
- * pin/unpin/fence and not more.
- */
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- /*
- * Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- pinctl = 0;
- if (HAS_GMCH(dev_priv))
- pinctl |= PIN_MAPPABLE;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(obj, &ww);
- if (!ret && phys_cursor)
- ret = i915_gem_object_attach_phys(obj, alignment);
- else if (!ret && HAS_LMEM(dev_priv))
- ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
- /* TODO: Do we need to sync when migration becomes async? */
- if (!ret)
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
- view, pinctl);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unpin;
- }
- }
-
- if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
- /*
- * Install a fence for tiled scan-out. Pre-i965 always needs a
- * fence, whereas 965+ only requires a fence if using
- * framebuffer compression. For simplicity, we always, when
- * possible, install a fence as the cost is not that onerous.
- *
- * If we fail to fence the tiled scanout, then either the
- * modeset will reject the change (which is highly unlikely as
- * the affected systems, all but one, do not have unmappable
- * space) or we will not be able to enable full powersaving
- * techniques (also likely not to apply due to various limits
- * FBC and the like impose on the size of the buffer, which
- * presumably we violated anyway with this unmappable buffer).
- * Anyway, it is presumably better to stumble onwards with
- * something and try to run the system in a "less than optimal"
- * mode that matches the user configuration.
- */
- ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
- i915_vma_unpin(vma);
- goto err_unpin;
- }
- ret = 0;
-
- if (vma->fence)
- *out_flags |= PLANE_HAS_FENCE;
- }
-
- i915_vma_get(vma);
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- if (ret)
- vma = ERR_PTR(ret);
-
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return vma;
-}
-
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
-{
- if (flags & PLANE_HAS_FENCE)
- i915_vma_unpin_fence(vma);
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
-
/*
* Convert the x/y offsets into a linear offset.
* Only valid with 0/180 degree rotation, which is fine since linear
@@ -1440,22 +897,6 @@ void intel_add_fb_offsets(int *x, int *y,
*y += state->view.color_plane[color_plane].y;
}
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
- switch (fb_modifier) {
- case I915_FORMAT_MOD_X_TILED:
- return I915_TILING_X;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return I915_TILING_Y;
- default:
- return I915_TILING_NONE;
- }
-}
-
/*
* From the Sky Lake PRM:
* "The Color Control Surface (CCS) contains the compression status of
@@ -1586,12 +1027,6 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
}
}
-static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
-{
- return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
- 512) * 64;
-}
-
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -1616,188 +1051,6 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0);
}
-static
-u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 modifier)
-{
- /*
- * Arbitrary limit for gen4+ chosen to match the
- * render engine max stride.
- *
- * The new CCS hash mode makes remapping impossible
- */
- if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
- intel_modifier_uses_dpt(dev_priv, modifier))
- return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
- else if (DISPLAY_VER(dev_priv) >= 7)
- return 256 * 1024;
- else
- return 128 * 1024;
-}
-
-static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- u32 tile_width;
-
- if (is_surface_linear(fb, color_plane)) {
- u32 max_stride = intel_plane_fb_max_stride(dev_priv,
- fb->format->format,
- fb->modifier);
-
- /*
- * To make remapping with linear generally feasible
- * we need the stride to be page aligned.
- */
- if (fb->pitches[color_plane] > max_stride &&
- !is_ccs_modifier(fb->modifier))
- return intel_tile_size(dev_priv);
- else
- return 64;
- }
-
- tile_width = intel_tile_width_bytes(fb, color_plane);
- if (is_ccs_modifier(fb->modifier)) {
- /*
- * Display WA #0531: skl,bxt,kbl,glk
- *
- * Render decompression and plane width > 3840
- * combined with horizontal panning requires the
- * plane stride to be a multiple of 4. We'll just
- * require the entire fb to accommodate that to avoid
- * potential runtime errors at plane configuration time.
- */
- if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
- color_plane == 0 && fb->width > 3840)
- tile_width *= 4;
- /*
- * The main surface pitch must be padded to a multiple of four
- * tile widths.
- */
- else if (DISPLAY_VER(dev_priv) >= 12)
- tile_width *= 4;
- }
- return tile_width;
-}
-
-static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 base, size;
-
- if (plane_config->size == 0)
- return NULL;
-
- base = round_down(plane_config->base,
- I915_GTT_MIN_ALIGNMENT);
- size = round_up(plane_config->base + plane_config->size,
- I915_GTT_MIN_ALIGNMENT);
- size -= base;
-
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- */
- if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
- size * 2 > i915->stolen_usable_size)
- return NULL;
-
- obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
- if (IS_ERR(obj))
- return NULL;
-
- /*
- * Mark it WT ahead of time to avoid changing the
- * cache_level during fbdev initialization. The
- * unbind there would get stuck waiting for rcu.
- */
- i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
- I915_CACHE_WT : I915_CACHE_NONE);
-
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride =
- plane_config->fb->base.pitches[0] |
- plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- goto err_obj;
-
- if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
- goto err_obj;
-
- if (i915_gem_object_is_tiled(obj) &&
- !i915_vma_is_map_and_fenceable(vma))
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return NULL;
-}
-
-static bool
-intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- struct drm_framebuffer *fb = &plane_config->fb->base;
- struct i915_vma *vma;
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- break;
- default:
- drm_dbg(&dev_priv->drm,
- "Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
- return false;
- }
-
- vma = initial_plane_vma(dev_priv, plane_config);
- if (!vma)
- return false;
-
- mode_cmd.pixel_format = fb->format->format;
- mode_cmd.width = fb->width;
- mode_cmd.height = fb->height;
- mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier;
- mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
-
- if (intel_framebuffer_init(to_intel_framebuffer(fb),
- vma->obj, &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
- goto err_vma;
- }
-
- plane_config->vma = vma;
- return true;
-
-err_vma:
- i915_vma_put(vma);
- return false;
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -1833,8 +1086,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
}
}
-static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
- struct intel_plane *plane)
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
@@ -1879,168 +1132,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
-static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
- struct i915_vma *vma;
- void __iomem *iomem;
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- atomic_inc(&i915->gpu_error.pending_fb_pin);
-
- vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
- HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
- if (IS_ERR(vma))
- goto err;
-
- iomem = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
- if (IS_ERR(iomem)) {
- vma = iomem;
- goto err;
- }
-
- dpt->vma = vma;
- dpt->iomem = iomem;
-
- i915_vma_get(vma);
-
-err:
- atomic_dec(&i915->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return vma;
-}
-
-static void intel_dpt_unpin(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vma_unpin_iomap(dpt->vma);
- i915_vma_put(dpt->vma);
-}
-
-static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
- struct drm_framebuffer **fb,
- struct i915_vma **vma)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (!crtc_state->uapi.active)
- continue;
-
- if (!plane_state->ggtt_vma)
- continue;
-
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
- *fb = plane_state->hw.fb;
- *vma = plane_state->ggtt_vma;
- return true;
- }
- }
-
- return false;
-}
-
-static void
-intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct drm_framebuffer *fb;
- struct i915_vma *vma;
-
- /*
- * TODO:
- * Disable planes if get_initial_plane_config() failed.
- * Make sure things work if the surface base is not page aligned.
- */
- if (!plane_config->fb)
- return;
-
- if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
- fb = &plane_config->fb->base;
- vma = plane_config->vma;
- goto valid_fb;
- }
-
- /*
- * Failed to alloc the obj, check to see if we should share
- * an fb with another CRTC instead
- */
- if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
- goto valid_fb;
-
- /*
- * We've failed to reconstruct the BIOS FB. Current display state
- * indicates that the primary plane is visible, but has a NULL FB,
- * which will lead to problems later if we don't fix it up. The
- * simplest solution is to just disable the primary plane now and
- * pretend the BIOS never had it enabled.
- */
- intel_plane_disable_noatomic(crtc, plane);
- if (crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- crtc_state->bigjoiner_linked_crtc;
- intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
- }
-
- return;
-
-valid_fb:
- plane_state->uapi.rotation = plane_config->rotation;
- intel_fb_fill_view(to_intel_framebuffer(fb),
- plane_state->uapi.rotation, &plane_state->view);
-
- __i915_vma_pin(vma);
- plane_state->ggtt_vma = i915_vma_get(vma);
- if (intel_plane_uses_fence(plane_state) &&
- i915_vma_pin_fence(vma) == 0 && vma->fence)
- plane_state->flags |= PLANE_HAS_FENCE;
-
- plane_state->uapi.src_x = 0;
- plane_state->uapi.src_y = 0;
- plane_state->uapi.src_w = fb->width << 16;
- plane_state->uapi.src_h = fb->height << 16;
-
- plane_state->uapi.crtc_x = 0;
- plane_state->uapi.crtc_y = 0;
- plane_state->uapi.crtc_w = fb->width;
- plane_state->uapi.crtc_h = fb->height;
-
- if (plane_config->tiling)
- dev_priv->preserve_bios_swizzle = true;
-
- plane_state->uapi.fb = fb;
- drm_framebuffer_get(fb);
-
- plane_state->uapi.crtc = &crtc->base;
- intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
-
- atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
-}
-
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
@@ -2449,55 +1540,6 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 temp;
-
- temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
- if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
- return;
-
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
- FDI_RX_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
- FDI_RX_ENABLE);
-
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- if (enable)
- temp |= FDI_BC_BIFURCATION_SELECT;
-
- drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
- enable ? "en" : "dis");
- intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
- intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
-}
-
-static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- switch (crtc->pipe) {
- case PIPE_A:
- break;
- case PIPE_B:
- if (crtc_state->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev_priv, false);
- else
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
- break;
- case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
- break;
- default:
- BUG();
- }
-}
-
/*
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
@@ -2547,16 +1589,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev_priv))
- ivb_update_fdi_bc_bifurcation(crtc_state);
-
- /* Write the TU size bits before fdi link training, so that error
- * detection works. */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
-
/* For PCH output, training FDI link */
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+ intel_fdi_link_train(crtc, crtc_state);
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
@@ -2584,7 +1618,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
- assert_panel_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(dev_priv, pipe);
ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -2842,6 +1876,46 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
return false;
}
+static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (is_hdr_mode(crtc_state) &&
+ crtc_state->active_planes & BIT(PLANE_CURSOR) &&
+ DISPLAY_VER(dev_priv) == 11)
+ return true;
+
+ return false;
+}
+
+static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+ enum pipe pipe, bool enable)
+{
+ if (DISPLAY_VER(i915) == 9) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK,
+ enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
+ } else {
+ /* Also needed on HSW/BDW albeit undocumented */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ HSW_PRI_STRETCH_MAX_MASK,
+ enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
+ }
+}
+
+static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return crtc_state->uapi.async_flip && intel_vtd_active() &&
+ (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+}
+
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
@@ -2869,12 +1943,17 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(crtc);
+ intel_update_watermarks(dev_priv);
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
intel_fbc_post_update(state, crtc);
+ intel_drrs_page_flip(state, crtc);
+
+ if (needs_async_flip_vtd_wa(old_crtc_state) &&
+ !needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, false);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
@@ -2883,6 +1962,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, false);
+
+ if (needs_cursorclk_wa(old_crtc_state) &&
+ !needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, false);
+
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -2969,6 +2053,10 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
+ if (!needs_async_flip_vtd_wa(old_crtc_state) &&
+ needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, true);
+
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
@@ -2979,6 +2067,11 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (!needs_cursorclk_wa(old_crtc_state) &&
+ needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, true);
+
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
@@ -3022,10 +2115,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* we'll continue to update watermarks the old way, if flags tell
* us to.
*/
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- else if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(crtc);
+ if (!intel_initial_watermarks(state, crtc))
+ if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(dev_priv);
}
/*
@@ -3360,9 +2452,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (new_crtc_state->has_pch_encoder)
- intel_prepare_shared_dpll(new_crtc_state);
-
if (intel_crtc_has_dp_encoder(new_crtc_state))
intel_dp_set_m_n(new_crtc_state, M1_N1);
@@ -3400,9 +2489,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- intel_enable_pipe(new_crtc_state);
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
if (new_crtc_state->has_pch_encoder)
ilk_pch_enable(state, new_crtc_state);
@@ -3578,10 +2666,9 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
&new_crtc_state->fdi_m_n, NULL);
hsw_set_frame_start_delay(new_crtc_state);
- }
- if (!transcoder_is_dsi(cpu_transcoder))
- hsw_set_pipeconf(new_crtc_state);
+ hsw_set_transconf(new_crtc_state);
+ }
crtc->active = true;
@@ -3611,8 +2698,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 11)
icl_set_pipe_chicken(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
+ intel_initial_watermarks(state, crtc);
if (DISPLAY_VER(dev_priv) >= 11) {
const struct intel_dbuf_state *dbuf_state =
@@ -3676,7 +2762,7 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
ilk_pfit_disable(old_crtc_state);
@@ -3738,7 +2824,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
*/
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
intel_de_write(dev_priv, PFIT_PGM_RATIOS,
crtc_state->gmch_pfit.pgm_ratios);
@@ -3858,11 +2944,7 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_phy_is_tc(dev_priv, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT) {
+ if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
switch (dig_port->aux_ch) {
case AUX_CH_C:
return POWER_DOMAIN_AUX_C_TBT;
@@ -3923,16 +3005,16 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
u64 mask;
- enum transcoder transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->hw.active)
return 0;
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
+ mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
@@ -3951,7 +3033,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
if (crtc_state->dsc.compression_enable)
- mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
+ mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
return mask;
}
@@ -4015,13 +3097,10 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(crtc, new_crtc_state);
- chv_enable_pll(crtc, new_crtc_state);
- } else {
- vlv_prepare_pll(crtc, new_crtc_state);
- vlv_enable_pll(crtc, new_crtc_state);
- }
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_enable_pll(new_crtc_state);
+ else
+ vlv_enable_pll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -4032,25 +3111,14 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- dev_priv->display.initial_watermarks(state, crtc);
- intel_enable_pipe(new_crtc_state);
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
}
-static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- intel_de_write(dev_priv, FP0(crtc->pipe),
- crtc_state->dpll_hw_state.fp0);
- intel_de_write(dev_priv, FP1(crtc->pipe),
- crtc_state->dpll_hw_state.fp1);
-}
-
static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -4062,8 +3130,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- i9xx_set_pll_dividers(new_crtc_state);
-
if (intel_crtc_has_dp_encoder(new_crtc_state))
intel_dp_set_m_n(new_crtc_state, M1_N1);
@@ -4079,7 +3145,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- i9xx_enable_pll(crtc, new_crtc_state);
+ i9xx_enable_pll(new_crtc_state);
i9xx_pfit_enable(new_crtc_state);
@@ -4088,11 +3154,9 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- else
- intel_update_watermarks(crtc);
- intel_enable_pipe(new_crtc_state);
+ if (!intel_initial_watermarks(state, crtc))
+ intel_update_watermarks(dev_priv);
+ intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -4111,7 +3175,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->gmch_pfit.control)
return;
- assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
intel_de_read(dev_priv, PFIT_CONTROL));
@@ -4137,7 +3201,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
@@ -4157,8 +3221,8 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (!dev_priv->display.initial_watermarks)
- intel_update_watermarks(crtc);
+ if (!dev_priv->wm_disp->initial_watermarks)
+ intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
@@ -4211,7 +3275,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
- dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
+ dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
@@ -4234,12 +3298,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
encoder->base.crtc = NULL;
intel_fbc_disable(crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(dev_priv);
intel_disable_shared_dpll(crtc_state);
intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
- dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
@@ -4596,13 +3659,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
}
- /* Cantiga+ cannot handle modes with a hsync front porch of 0.
- * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
- */
- if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
- pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
- return -EINVAL;
-
intel_crtc_compute_pixel_rate(pipe_config);
if (pipe_config->has_pch_encoder)
@@ -5412,102 +4468,6 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
BUG_ON(val != final);
}
-static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
-}
-
-/* WaMPhyProgramming:hsw */
-static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
- tmp &= ~(0xFF << 24);
- tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
-}
-
/* Implements 3 different sequences from BSpec chapter "Display iCLK
* Programming" based on the parameters passed:
* - Sequence to enable CLKOUT_DP
@@ -5540,10 +4500,8 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- if (with_fdi) {
- lpt_reset_fdi_mphy(dev_priv);
- lpt_program_fdi_mphy(dev_priv);
- }
+ if (with_fdi)
+ lpt_fdi_program_mphy(dev_priv);
}
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
@@ -5806,7 +4764,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(dev_priv, PIPECONF(pipe));
}
-static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5870,9 +4828,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- if (DISPLAY_VER(dev_priv) >= 11 &&
- (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
- BIT(PLANE_CURSOR))) == 0)
+ if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPEMISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
@@ -6211,59 +5167,64 @@ out:
return ret;
}
-static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- struct intel_display_power_domain_set *power_domain_set)
+static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
- unsigned long enabled_panel_transcoders = 0;
- enum transcoder panel_transcoder;
- u32 tmp;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 tmp = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
- panel_transcoder_mask |=
- BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- /*
- * The pipe->transcoder mapping is fixed with the exception of the eDP
- * and DSI transcoders handled below.
- */
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ return tmp & TRANS_DDI_FUNC_ENABLE;
+}
+
+static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+{
+ u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
+
+ if (DISPLAY_VER(i915) >= 11)
+ panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
+ return panel_transcoder_mask;
+}
+
+static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ enum transcoder cpu_transcoder;
+ u8 enabled_transcoders = 0;
/*
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
panel_transcoder_mask) {
- bool force_thru = false;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
enum pipe trans_pipe;
+ u32 tmp = 0;
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(panel_transcoder));
- if (!(tmp & TRANS_DDI_FUNC_ENABLE))
- continue;
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- /*
- * Log all enabled ones, only use the first one.
- *
- * FIXME: This won't work for two separate DSI displays.
- */
- enabled_panel_transcoders |= BIT(panel_transcoder);
- if (enabled_panel_transcoders != BIT(panel_transcoder))
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
drm_WARN(dev, 1,
"unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ transcoder_name(cpu_transcoder));
fallthrough;
case TRANS_DDI_EDP_INPUT_A_ONOFF:
- force_thru = true;
- fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
@@ -6278,22 +5239,83 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
break;
}
- if (trans_pipe == crtc->pipe) {
- pipe_config->cpu_transcoder = panel_transcoder;
- pipe_config->pch_pfit.force_thru = force_thru;
- }
+ if (trans_pipe == crtc->pipe)
+ enabled_transcoders |= BIT(cpu_transcoder);
}
+ cpu_transcoder = (enum transcoder) crtc->pipe;
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+
+ return enabled_transcoders;
+}
+
+static bool has_edp_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & BIT(TRANSCODER_EDP);
+}
+
+static bool has_dsi_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static bool has_pipe_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
+ BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static void assert_enabled_transcoders(struct drm_i915_private *i915,
+ u8 enabled_transcoders)
+{
+ /* Only one type of transcoder please */
+ drm_WARN_ON(&i915->drm,
+ has_edp_transcoders(enabled_transcoders) +
+ has_dsi_transcoders(enabled_transcoders) +
+ has_pipe_transcoders(enabled_transcoders) > 1);
+
+ /* Only DSI transcoders can be ganged */
+ drm_WARN_ON(&i915->drm,
+ !has_dsi_transcoders(enabled_transcoders) &&
+ !is_power_of_2(enabled_transcoders));
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long enabled_transcoders;
+ u32 tmp;
+
+ enabled_transcoders = hsw_enabled_transcoders(crtc);
+ if (!enabled_transcoders)
+ return false;
+
+ assert_enabled_transcoders(dev_priv, enabled_transcoders);
+
/*
- * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ * With the exception of DSI we should only ever have
+ * a single enabled transcoder. With DSI let's just
+ * pick the first one.
*/
- drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
+ if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
+ pipe_config->pch_pfit.force_thru = true;
+ }
+
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -6515,7 +5537,7 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display.get_pipe_config(crtc, crtc_state))
+ if (!i915->display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -6531,28 +5553,6 @@ static const struct drm_display_mode load_detect_mode = {
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct intel_framebuffer *intel_fb;
- int ret;
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return ERR_PTR(-ENOMEM);
-
- ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
- if (ret)
- goto err;
-
- return &intel_fb->base;
-
-err:
- kfree(intel_fb);
- return ERR_PTR(ret);
-}
-
static int intel_modeset_disable_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
@@ -6780,7 +5780,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -6830,11 +5829,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
- LVDS);
- bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
- if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -6985,27 +5986,27 @@ static bool needs_scaling(const struct intel_plane_state *state)
}
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *plane_state)
+ struct intel_plane_state *new_plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = intel_crtc_needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
- bool is_crtc_enabled = crtc_state->hw.active;
+ bool is_crtc_enabled = new_crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(crtc_state, plane_state);
+ ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
}
was_visible = old_plane_state->uapi.visible;
- visible = plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
@@ -7021,7 +6022,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- intel_plane_set_invisible(crtc_state, plane_state);
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
visible = false;
}
@@ -7040,28 +6041,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (turn_on) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- crtc_state->disable_cxsr = true;
+ new_crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- crtc_state->update_wm_post = true;
+ new_crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- crtc_state->disable_cxsr = true;
- } else if (intel_wm_need_update(old_plane_state, plane_state)) {
+ new_crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
- crtc_state->update_wm_pre = true;
- crtc_state->update_wm_post = true;
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
}
}
if (visible || was_visible)
- crtc_state->fb_bits |= plane->frontbuffer_bit;
+ new_crtc_state->fb_bits |= plane->frontbuffer_bit;
/*
* ILK/SNB DVSACNTR/Sprite Enable
@@ -7100,8 +6101,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
(IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(plane_state))))
- crtc_state->disable_lp_wm = true;
+ needs_scaling(new_plane_state))))
+ new_crtc_state->disable_lp_wm = true;
return 0;
}
@@ -7363,10 +6364,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed && crtc_state->hw.enable &&
- dev_priv->display.crtc_compute_clock &&
+ dev_priv->dpll_funcs &&
!crtc_state->bigjoiner_slave &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
+ ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
if (ret)
return ret;
}
@@ -7385,32 +6386,23 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Target pipe watermarks are invalid\n");
- return ret;
- }
-
+ ret = intel_compute_pipe_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
+ return ret;
}
- if (dev_priv->display.compute_intermediate_wm) {
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.compute_pipe_wm))
- return 0;
-
- /*
- * Calculate 'intermediate' watermarks that satisfy both the
- * old state and the new state. We can program these
- * immediately.
- */
- ret = dev_priv->display.compute_intermediate_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "No valid intermediate pipe watermarks are possible\n");
- return ret;
- }
+ /*
+ * Calculate 'intermediate' watermarks that satisfy both the
+ * old state and the new state. We can program these
+ * immediately.
+ */
+ ret = intel_compute_intermediate_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
+ return ret;
}
if (DISPLAY_VER(dev_priv) >= 9) {
@@ -7439,11 +6431,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
- if (!mode_changed) {
- ret = intel_psr2_sel_fetch_update(state, crtc);
- if (ret)
- return ret;
- }
+ ret = intel_psr2_sel_fetch_update(state, crtc);
+ if (ret)
+ return ret;
return 0;
}
@@ -8153,11 +7143,10 @@ encoder_retry:
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
+ if (ret == -EDEADLK)
+ return ret;
if (ret < 0) {
- if (ret != -EDEADLK)
- drm_dbg_kms(&i915->drm,
- "Encoder config failure: %d\n",
- ret);
+ drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
return ret;
}
}
@@ -8171,12 +7160,7 @@ encoder_retry:
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret == -EDEADLK)
return ret;
- if (ret < 0) {
- drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
- return ret;
- }
-
- if (ret == I915_DISPLAY_CONFIG_RETRY) {
+ if (ret == -EAGAIN) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -8185,6 +7169,10 @@ encoder_retry:
retry = false;
goto encoder_retry;
}
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
+ return ret;
+ }
/* Dithering seems to not pass-through bits correctly when it should, so
* only enable it on 6bpc panels and when its not a compliance
@@ -8720,10 +7708,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_psr2);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_I(dc3co_exitline);
+ if (current_config->active_planes) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
+ }
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -8780,7 +7770,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
}
- if (fastset && (current_config->has_psr || pipe_config->has_psr))
+ if (current_config->has_psr || pipe_config->has_psr)
PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
~intel_hdmi_infoframe_enable(DP_SDP_VSC));
else
@@ -9402,7 +8392,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
- if (!dev_priv->display.crtc_compute_clock)
+ if (!dev_priv->dpll_funcs)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -9503,23 +8493,6 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
-/*
- * Handle calculation of various watermark data at the end of the atomic check
- * phase. The code here should be run after the per-crtc and per-plane 'check'
- * handlers to ensure that all derived state has been updated.
- */
-static int calc_watermark_data(struct intel_atomic_state *state)
-{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* Is there platform-specific watermark information to calculate? */
- if (dev_priv->display.compute_global_watermarks)
- return dev_priv->display.compute_global_watermarks(state);
-
- return 0;
-}
-
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
@@ -9627,13 +8600,28 @@ static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
+static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
+}
+
+static bool pxp_is_borked(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+}
+
static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_plane_state *plane_state;
struct intel_plane *plane;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane_state *old_plane_state;
struct intel_crtc *crtc;
+ const struct drm_framebuffer *fb;
int i, ret;
ret = icl_add_linked_planes(state);
@@ -9681,6 +8669,19 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
}
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ new_plane_state = intel_atomic_get_new_plane_state(state, plane);
+ old_plane_state = intel_atomic_get_old_plane_state(state, plane);
+ fb = new_plane_state->hw.fb;
+ if (fb) {
+ new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
+ new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
+ } else {
+ new_plane_state->decrypt = old_plane_state->decrypt;
+ new_plane_state->force_black = old_plane_state->force_black;
+ }
+ }
+
return 0;
}
@@ -9715,7 +8716,7 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
*need_cdclk_calc = true;
- ret = dev_priv->display.bw_calc_min_cdclk(state);
+ ret = intel_cdclk_bw_calc_min_cdclk(state);
if (ret)
return ret;
@@ -9967,6 +8968,10 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
return -EINVAL;
}
+
+ /* plane decryption is allow to change only in synchronous flips */
+ if (old_plane_state->decrypt != new_plane_state->decrypt)
+ return -EINVAL;
}
return 0;
@@ -10166,7 +9171,7 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
intel_fbc_choose_crtc(dev_priv, state);
- ret = calc_watermark_data(state);
+ ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
@@ -10336,12 +9341,11 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
intel_pipe_fastset(old_crtc_state, new_crtc_state);
-
- intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
}
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state, crtc);
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
+
+ intel_atomic_update_watermarks(state, crtc);
}
static void commit_pipe_post_planes(struct intel_atomic_state *state,
@@ -10373,7 +9377,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(state, crtc);
+ dev_priv->display->crtc_enable(state, crtc);
if (new_crtc_state->bigjoiner_slave)
return;
@@ -10404,10 +9408,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_encoders_update_pipe(state, crtc);
}
- if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
+ intel_fbc_update(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -10464,16 +9465,15 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(state, crtc);
+ dev_priv->display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
/* FIXME unify this for all platforms */
if (!new_crtc_state->hw.active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
+ !HAS_GMCH(dev_priv))
+ intel_initial_watermarks(state, crtc);
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
@@ -10837,6 +9837,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
+ intel_psr_pre_plane_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
@@ -10844,7 +9845,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.commit_modeset_enables(state);
+ dev_priv->display->commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
@@ -10895,11 +9896,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(state, crtc);
+ intel_optimize_watermarks(state, crtc);
}
intel_dbuf_post_plane_update(state);
+ intel_psr_post_plane_update(state);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -11088,280 +10089,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
-{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
-
- /*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
- */
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
-
- if (!dma_fence_is_i915(fence))
- return;
-
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
-
- if (drm_crtc_vblank_get(crtc))
- return;
-
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
- }
-
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
-
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
-
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- struct i915_vma *vma;
- bool phys_cursor =
- plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->display.cursor_needs_physical;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
- &plane_state->view.gtt,
- intel_plane_uses_fence(plane_state),
- &plane_state->flags);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = intel_dpt_pin(intel_fb->dpt_vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
-
- vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
- &plane_state->flags, intel_fb->dpt_vm);
- if (IS_ERR(vma)) {
- intel_dpt_unpin(intel_fb->dpt_vm);
- plane_state->ggtt_vma = NULL;
- return PTR_ERR(vma);
- }
-
- plane_state->dpt_vma = vma;
-
- WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
- }
-
- return 0;
-}
-
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
-{
- struct drm_framebuffer *fb = old_plane_state->hw.fb;
- struct i915_vma *vma;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = fetch_and_zero(&old_plane_state->dpt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
-
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_dpt_unpin(intel_fb->dpt_vm);
- }
-}
-
-/**
- * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @_plane: drm plane to prepare for
- * @_new_plane_state: the plane state being prepared
- *
- * Prepares a framebuffer for usage on a display plane. Generally this
- * involves pinning the underlying object and updating the frontbuffer tracking
- * bits. Some older platforms need special physical address handling for
- * cursor planes.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int
-intel_prepare_plane_fb(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
-{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
- intel_atomic_get_old_plane_state(state, plane);
- struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
- int ret;
-
- if (old_obj) {
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- to_intel_crtc(old_plane_state->hw.crtc));
-
- /* Big Hammer, we also need to ensure that any pending
- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
- * current scanout is retired before unpinning the old
- * framebuffer. Note that we rely on userspace rendering
- * into the buffer attached to the pipe they are waiting
- * on. If not, userspace generates a GPU hang with IPEHR
- * point to the MI_WAIT_FOR_EVENT.
- *
- * This should only fail upon a hung GPU, in which case we
- * can safely continue.
- */
- if (intel_crtc_needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv, NULL,
- false, 0,
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
- }
-
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- if (!obj)
- return 0;
-
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- return ret;
-
- i915_gem_object_wait_priority(obj, 0, &attr);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
-
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, NULL,
- false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
-
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- if (fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- dma_fence_put(fence);
- }
- } else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
- }
-
- /*
- * We declare pageflips to be interactive and so merit a small bias
- * towards upclocking to deliver the frame on time. By only changing
- * the RPS thresholds to sample more regularly and aim for higher
- * clocks we can hopefully deliver low power workloads (like kodi)
- * that are not quite steady state without resorting to forcing
- * maximum clocks following a vblank miss (see do_rps_boost()).
- */
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- state->rps_interactive = true;
- }
-
- return 0;
-
-unpin_fb:
- intel_plane_unpin_fb(new_plane_state);
-
- return ret;
-}
-
-/**
- * intel_cleanup_plane_fb - Cleans up an fb after plane use
- * @plane: drm plane to clean up for
- * @_old_plane_state: the state from the previous modeset
- *
- * Cleans up a framebuffer that has just been removed from a plane.
- */
-void
-intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *_old_plane_state)
-{
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(old_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
-
- if (!obj)
- return;
-
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- state->rps_interactive = false;
- }
-
- /* Should only be called after a successful intel_prepare_plane_fb()! */
- intel_plane_unpin_fb(old_plane_state);
-}
-
/**
* intel_plane_destroy - destroy a plane
* @plane: plane to destroy
@@ -11491,6 +10218,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_TC2);
intel_ddi_init(dev_priv, PORT_TC3);
intel_ddi_init(dev_priv, PORT_TC4);
+ icl_dsi_init(dev_priv);
} else if (IS_ALDERLAKE_S(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_TC1);
@@ -11708,249 +10436,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
-static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- drm_framebuffer_cleanup(fb);
-
- if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
-
- intel_frontbuffer_put(intel_fb->frontbuffer);
-
- kfree(intel_fb);
-}
-
-static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int *handle)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- if (i915_gem_object_is_userptr(obj)) {
- drm_dbg(&i915->drm,
- "attempting to use a userptr for a framebuffer, denied\n");
- return -EINVAL;
- }
-
- return drm_gem_handle_create(file, &obj->base, handle);
-}
-
-static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
- i915_gem_object_flush_if_display(obj);
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs intel_fb_funcs = {
- .destroy = intel_user_framebuffer_destroy,
- .create_handle = intel_user_framebuffer_create_handle,
- .dirty = intel_user_framebuffer_dirty,
-};
-
-static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct drm_framebuffer *fb = &intel_fb->base;
- u32 max_stride;
- unsigned int tiling, stride;
- int ret = -EINVAL;
- int i;
-
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
-
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode doesn't match fb modifier\n");
- goto err;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&dev_priv->drm,
- "No Y tiling for legacy addfb\n");
- goto err;
- }
- }
-
- if (!drm_any_plane_has_format(&dev_priv->drm,
- mode_cmd->pixel_format,
- mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "unsupported pixel format %p4cc / modifier 0x%llx\n",
- &mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err;
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(dev_priv) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- goto err;
- }
-
- max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
- if (mode_cmd->pitches[0] > max_stride) {
- drm_dbg_kms(&dev_priv->drm,
- "%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
- "tiled" : "linear",
- mode_cmd->pitches[0], max_stride);
- goto err;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&dev_priv->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- goto err;
- }
-
- /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "plane 0 offset (0x%08x) must be 0\n",
- mode_cmd->offsets[0]);
- goto err;
- }
-
- drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
-
- for (i = 0; i < fb->format->num_planes; i++) {
- u32 stride_alignment;
-
- if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
- i);
- goto err;
- }
-
- stride_alignment = intel_fb_stride_alignment(fb, i);
- if (fb->pitches[i] & (stride_alignment - 1)) {
- drm_dbg_kms(&dev_priv->drm,
- "plane %d pitch (%d) must be at least %u byte aligned\n",
- i, fb->pitches[i], stride_alignment);
- goto err;
- }
-
- if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
- int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
-
- if (fb->pitches[i] != ccs_aux_stride) {
- drm_dbg_kms(&dev_priv->drm,
- "ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
- goto err;
- }
- }
-
- /* TODO: Add POT stride remapping support for CCS formats as well. */
- if (IS_ALDERLAKE_P(dev_priv) &&
- mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
- !intel_fb_needs_pot_stride_remap(intel_fb) &&
- !is_power_of_2(mode_cmd->pitches[i])) {
- drm_dbg_kms(&dev_priv->drm,
- "plane %d pitch (%d) must be power of two for tiled buffers\n",
- i, mode_cmd->pitches[i]);
- goto err;
- }
-
- fb->obj[i] = &obj->base;
- }
-
- ret = intel_fill_fb_info(dev_priv, intel_fb);
- if (ret)
- goto err;
-
- if (intel_fb_uses_dpt(fb)) {
- struct i915_address_space *vm;
-
- vm = intel_dpt_create(intel_fb);
- if (IS_ERR(vm)) {
- ret = PTR_ERR(vm);
- goto err;
- }
-
- intel_fb->dpt_vm = vm;
- }
-
- ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
- if (ret) {
- drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
- }
-
- return 0;
-
-err:
- intel_frontbuffer_put(intel_fb->frontbuffer);
- return ret;
-}
-
-static struct drm_framebuffer *
-intel_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *user_mode_cmd)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915;
-
- obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- return ERR_PTR(-EREMOTE);
- }
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
-
- return fb;
-}
-
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -12039,6 +10524,14 @@ intel_mode_valid(struct drm_device *dev,
return MODE_V_ILLEGAL;
}
+ /*
+ * Cantiga+ cannot handle modes with a hsync front porch of 0.
+ * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+ */
+ if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ mode->hsync_start == mode->hdisplay)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -12090,6 +10583,46 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_free = intel_atomic_state_free,
};
+static const struct drm_i915_display_funcs skl_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = skl_commit_modeset_enables,
+ .get_initial_plane_config = skl_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs ddi_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs pch_split_display_funcs = {
+ .get_pipe_config = ilk_get_pipe_config,
+ .crtc_enable = ilk_crtc_enable,
+ .crtc_disable = ilk_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs vlv_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = valleyview_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs i9xx_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = i9xx_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
/**
* intel_init_display_hooks - initialize the display modesetting hooks
* @dev_priv: device private
@@ -12105,38 +10638,19 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_dpll_init_clock_hook(dev_priv);
if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.crtc_enable = hsw_crtc_enable;
- dev_priv->display.crtc_disable = hsw_crtc_disable;
+ dev_priv->display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.crtc_enable = hsw_crtc_enable;
- dev_priv->display.crtc_disable = hsw_crtc_disable;
+ dev_priv->display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.crtc_enable = ilk_crtc_enable;
- dev_priv->display.crtc_disable = ilk_crtc_disable;
+ dev_priv->display = &pch_split_display_funcs;
} else if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display = &vlv_display_funcs;
} else {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display = &i9xx_display_funcs;
}
intel_fdi_init_hook(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
- } else {
- dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
- dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
- }
-
}
void intel_modeset_init_hw(struct drm_i915_private *i915)
@@ -12206,7 +10720,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.optimize_watermarks)
+ if (!dev_priv->wm_disp->optimize_watermarks)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
@@ -12239,7 +10753,7 @@ retry:
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
crtc_state->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, crtc);
+ intel_optimize_watermarks(intel_state, crtc);
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
@@ -12271,22 +10785,6 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
-static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
-{
- if (IS_IRONLAKE(dev_priv)) {
- u32 fdi_pll_clk =
- intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
-
- dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->fdi_pll_freq = 270000;
- } else {
- return;
- }
-
- drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -12381,7 +10879,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- mode_config->async_page_flip = has_async_flips(i915);
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -12420,22 +10918,6 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915)
drm_mode_config_cleanup(&i915->drm);
}
-static void plane_config_fini(struct intel_initial_plane_config *plane_config)
-{
- if (plane_config->fb) {
- struct drm_framebuffer *fb = &plane_config->fb->base;
-
- /* We may only have the stub and not a full framebuffer */
- if (drm_framebuffer_read_refcount(fb))
- drm_framebuffer_put(fb);
- else
- kfree(fb);
- }
-
- if (plane_config->vma)
- i915_vma_put(plane_config->vma);
-}
-
/* part #1: call before irq install */
int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
@@ -12540,7 +11022,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(i915);
+ intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
intel_modeset_init_hw(i915);
@@ -12564,30 +11046,13 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
+ intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
- struct intel_initial_plane_config plane_config = {};
-
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
-
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- i915->display.get_initial_plane_config(crtc, &plane_config);
-
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
-
- plane_config_fini(&plane_config);
+ intel_crtc_initial_plane_config(crtc);
}
/*
@@ -12869,13 +11334,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- /*
- * Disable any background color set by the BIOS, but enable the
- * gamma and CSC to match how we program our planes.
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
+ /* Disable any background color/etc. set by the BIOS */
+ intel_color_commit(crtc_state);
}
/* Adjust the state of the output pipe according to whether we
@@ -13076,8 +11536,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
enableddisabled(crtc_state->hw.active));
}
- dev_priv->active_pipes = cdclk_state->active_pipes =
- dbuf_state->active_pipes = active_pipes;
+ cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
readout_plane_state(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 284936f0ddab..0c76bf57f86b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -270,6 +270,7 @@ enum tc_port {
};
enum tc_port_mode {
+ TC_PORT_DISCONNECTED,
TC_PORT_TBT_ALT,
TC_PORT_DP_ALT,
TC_PORT_LEGACY,
@@ -531,8 +532,8 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
-void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state);
-void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -548,8 +549,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
@@ -577,19 +576,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags);
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -620,19 +609,16 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
-int intel_plane_pin_fb(struct intel_plane_state *plane_state);
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
-
-unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane);
-unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane);
void intel_display_driver_register(struct drm_i915_private *i915);
void intel_display_driver_unregister(struct drm_i915_private *i915);
@@ -650,23 +636,10 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
int intel_modeset_all_pipes(struct intel_atomic_state *state);
/* modesetting asserts */
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
-#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, bool state);
-#define assert_pipe_enabled(d, t) assert_pipe(d, t, true)
-#define assert_pipe_disabled(d, t) assert_pipe(d, t, false)
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state);
+#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
+#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 8fdacb252bb1..e04767695530 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,18 +7,19 @@
#include <drm/drm_fourcc.h>
#include "i915_debugfs.h"
+#include "intel_de.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
-#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
+#include "intel_drrs.h"
#include "intel_fbc.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_pm.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -1323,9 +1324,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
-#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
- seq_puts(m, "LPSP: disabled\n"))
-
static bool
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
enum i915_power_well_id power_well_id)
@@ -1344,32 +1342,20 @@ intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
static int i915_lpsp_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
-
- if (DISPLAY_VER(i915) >= 13) {
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
- SKL_DISP_PW_2));
+ bool lpsp_enabled = false;
+
+ if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
+ } else if (IS_DISPLAY_VER(i915, 11, 12)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
+ } else {
+ seq_puts(m, "LPSP: not supported\n");
return 0;
}
- switch (DISPLAY_VER(i915)) {
- case 12:
- case 11:
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
- break;
- case 10:
- case 9:
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
- break;
- default:
- /*
- * Apart from HASWELL/BROADWELL other legacy platform doesn't
- * support lpsp.
- */
- if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
- else
- seq_puts(m, "LPSP: not supported\n");
- }
+ seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled));
return 0;
}
@@ -1393,7 +1379,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
continue;
dig_port = enc_to_dig_port(intel_encoder);
- if (!dig_port->dp.can_mst)
+ if (!intel_dp_mst_source_support(&dig_port->dp))
continue;
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
@@ -2044,11 +2030,9 @@ static int i915_drrs_ctl_set(void *data, u64 val)
intel_dp = enc_to_intel_dp(encoder);
if (val)
- intel_edp_drrs_enable(intel_dp,
- crtc_state);
+ intel_drrs_enable(intel_dp, crtc_state);
else
- intel_edp_drrs_disable(intel_dp,
- crtc_state);
+ intel_drrs_disable(intel_dp, crtc_state);
}
drm_connector_list_iter_end(&conn_iter);
@@ -2240,14 +2224,12 @@ static int i915_psr_status_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
-#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
- seq_puts(m, "LPSP: incapable\n"))
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_encoder *encoder;
+ bool lpsp_capable = false;
encoder = intel_attached_encoder(to_intel_connector(connector));
if (!encoder)
@@ -2256,35 +2238,27 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (DISPLAY_VER(i915) >= 13) {
- LPSP_CAPABLE(encoder->port <= PORT_B);
- return 0;
- }
-
- switch (DISPLAY_VER(i915)) {
- case 12:
+ if (DISPLAY_VER(i915) >= 13)
+ lpsp_capable = encoder->port <= PORT_B;
+ else if (DISPLAY_VER(i915) >= 12)
/*
* Actually TGL can drive LPSP on port till DDI_C
* but there is no physical connected DDI_C on TGL sku's,
* even driver is not initilizing DDI_C port for gen12.
*/
- LPSP_CAPABLE(encoder->port <= PORT_B);
- break;
- case 11:
- LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP);
- break;
- case 10:
- case 9:
- LPSP_CAPABLE(encoder->port == PORT_A &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
- break;
- default:
- if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
- }
+ lpsp_capable = encoder->port <= PORT_B;
+ else if (DISPLAY_VER(i915) == 11)
+ lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ else if (IS_DISPLAY_VER(i915, 9, 10))
+ lpsp_capable = (encoder->port == PORT_A &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
+
+ seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
return 0;
}
@@ -2468,17 +2442,16 @@ static const struct file_operations i915_dsc_bpp_fops = {
*
* Cleanup will be done by drm_connector_unregister() through a call to
* drm_debugfs_connector_remove().
- *
- * Returns 0 on success, negative error codes on error.
*/
-int intel_connector_debugfs_add(struct drm_connector *connector)
+void intel_connector_debugfs_add(struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
struct dentry *root = connector->debugfs_entry;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
/* The connector must have been registered beforehands. */
if (!root)
- return -ENODEV;
+ return;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
@@ -2511,33 +2484,23 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
connector, &i915_dsc_bpp_fops);
}
- /* Legacy panels doesn't lpsp on any platform */
- if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
debugfs_create_file("i915_lpsp_capability", 0444, root,
connector, &i915_lpsp_capability_fops);
-
- return 0;
}
/**
* intel_crtc_debugfs_add - add i915 specific crtc debugfs files
* @crtc: pointer to a drm_crtc
*
- * Returns 0 on success, negative error codes on error.
- *
* Failure to add debugfs entries should generally be ignored.
*/
-int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+void intel_crtc_debugfs_add(struct drm_crtc *crtc)
{
- if (!crtc->debugfs_entry)
- return -ENODEV;
-
- crtc_updates_add(crtc);
- return 0;
+ if (crtc->debugfs_entry)
+ crtc_updates_add(crtc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index 557901f3eb90..d3a79c07c384 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,18 +6,18 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_connector;
struct drm_crtc;
struct drm_i915_private;
+struct intel_connector;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
-int intel_connector_debugfs_add(struct drm_connector *connector);
-int intel_crtc_debugfs_add(struct drm_crtc *crtc);
+void intel_connector_debugfs_add(struct intel_connector *connector);
+void intel_crtc_debugfs_add(struct drm_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
-static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
-static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
+static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
+static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index cce1a926fcc1..1672604f9ef7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,24 +3,25 @@
* Copyright © 2019 Intel Corporation
*/
-#include "display/intel_crt.h"
-
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
-#include "intel_display_power.h"
+#include "intel_crt.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
#include "intel_vga.h"
+#include "vlv_sideband.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -560,7 +561,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
return;
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
@@ -629,7 +630,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* exit sequence.
*/
timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
icl_tc_cold_exit(dev_priv);
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
@@ -1195,7 +1196,7 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
+ intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 978531841fa3..0612e4b6e3c8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -410,6 +410,10 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+#define with_intel_display_power_if_enabled(i915, domain, wf) \
+ for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 6beeeeba1bed..39e11eaec1a3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -103,8 +103,6 @@ struct intel_fb_view {
* in the rotated and remapped GTT view all no-CCS formats (up to 2
* color planes) are supported.
*
- * TODO: add support for CCS formats in the remapped GTT view.
- *
* The view information shared by all FB color planes in the FB,
* like dst x/y and src/dst width, is stored separately in
* intel_plane_state.
@@ -271,6 +269,9 @@ struct intel_encoder {
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
+ void (*set_signal_levels)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -428,10 +429,6 @@ struct intel_hdcp_shim {
int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
bool *capable);
- /* Detects whether a HDCP 1.4 sink connected in MST topology */
- int (*streams_type1_capable)(struct intel_connector *connector,
- bool *capable);
-
/* Write HDCP2.2 messages */
int (*write_2_2_msg)(struct intel_digital_port *dig_port,
void *buf, size_t size);
@@ -629,6 +626,12 @@ struct intel_plane_state {
struct intel_fb_view view;
+ /* Plane pxp decryption state */
+ bool decrypt;
+
+ /* Plane state to display black pixels when pxp is borked */
+ bool force_black;
+
/* plane control register */
u32 ctl;
@@ -1060,12 +1063,14 @@ struct intel_crtc_state {
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ /* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;
bool has_psr2;
bool enable_psr2_sel_fetch;
bool req_psr2_sdp_prior_scanline;
u32 dc3co_exitline;
u16 su_y_granularity;
+ struct drm_dp_vsc_sdp psr_vsc;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -1529,7 +1534,6 @@ struct intel_psr {
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
- struct drm_dp_vsc_sdp vsc;
};
struct intel_dp {
@@ -1576,7 +1580,6 @@ struct intel_dp {
struct intel_pps pps;
- bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
@@ -1606,8 +1609,6 @@ struct intel_dp {
u8 dp_train_pat);
void (*set_idle_link_train)(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
- void (*set_signal_levels)(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
u8 (*preemph_max)(struct intel_dp *intel_dp);
u8 (*voltage_max)(struct intel_dp *intel_dp,
@@ -1667,8 +1668,11 @@ struct intel_digital_port {
enum intel_display_power_domain ddi_io_power_domain;
intel_wakeref_t ddi_io_wakeref;
intel_wakeref_t aux_wakeref;
+
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
+ enum intel_display_power_domain tc_lock_power_domain;
+ struct delayed_work tc_disconnect_phy_work;
int tc_link_refcount;
bool tc_legacy_port:1;
char tc_port_name[8];
@@ -1684,6 +1688,8 @@ struct intel_digital_port {
bool hdcp_auth_status;
/* HDCP port data need to pass to security f/w */
struct hdcp_port_data hdcp_port_data;
+ /* Whether the MST topology supports HDCP Type 1 Content */
+ bool hdcp_mst_type1_capable;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -2035,28 +2041,6 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
}
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->params.panel_use_ssc >= 0)
- return dev_priv->params.panel_use_ssc != 0;
- return dev_priv->vbt.lvds_use_ssc
- && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
- return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (HAS_DDI(dev_priv))
- return pipe_config->port_clock; /* SPLL */
- else
- return dev_priv->fdi_pll_freq;
-}
-
static inline bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index b3c8e1c450ef..2dc9d632969d 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -45,8 +45,8 @@
#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 10)
-#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 10)
+#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 12)
+#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
MODULE_FIRMWARE(ADLP_DMC_PATH);
#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
@@ -255,20 +255,10 @@ intel_get_stepping_info(struct drm_i915_private *i915,
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
- u32 val, mask;
-
- mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
-
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- mask |= DC_STATE_DEBUG_MASK_CORES;
-
/* The below bit doesn't need to be cleared ever afterwards */
- val = intel_de_read(dev_priv, DC_STATE_DEBUG);
- if ((val & mask) != mask) {
- val |= mask;
- intel_de_write(dev_priv, DC_STATE_DEBUG, val);
- intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
- }
+ intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
+ intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index abe3d61b6243..23de500d56b5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -44,6 +44,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_de.h"
@@ -55,6 +56,7 @@
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -64,7 +66,6 @@
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -100,6 +101,8 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
+ *
+ * This function is not safe to use prior to encoder type being set.
*/
bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
@@ -111,6 +114,12 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
+/* Is link rate UHBR and thus 128b/132b? */
+bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->port_clock >= 1000000;
+}
+
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
@@ -130,6 +139,9 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
return;
}
+ /*
+ * Sink rates for 8b/10b.
+ */
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
@@ -141,6 +153,41 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->sink_rates[i] = dp_rates[i];
}
+ /*
+ * Sink rates for 128b/132b. If set, sink should support all 8b/10b
+ * rates and 10 Gbps.
+ */
+ if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
+ u8 uhbr_rates = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
+
+ drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
+
+ if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
+ /* We have a repeater */
+ if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
+ intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
+ DP_PHY_REPEATER_128B132B_SUPPORTED) {
+ /* Repeater supports 128b/132b, valid UHBR rates */
+ uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ } else {
+ /* Does not support 128b/132b */
+ uhbr_rates = 0;
+ }
+ }
+
+ if (uhbr_rates & DP_UHBR10)
+ intel_dp->sink_rates[i++] = 1000000;
+ if (uhbr_rates & DP_UHBR13_5)
+ intel_dp->sink_rates[i++] = 1350000;
+ if (uhbr_rates & DP_UHBR20)
+ intel_dp->sink_rates[i++] = 2000000;
+ }
+
intel_dp->num_sink_rates = i;
}
@@ -192,6 +239,10 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
return intel_dp->max_link_lane_count;
}
+/*
+ * The required data bandwidth for a mode with given pixel clock and bpp. This
+ * is the required net bandwidth independent of the data bandwidth efficiency.
+ */
int
intel_dp_link_required(int pixel_clock, int bpp)
{
@@ -199,16 +250,52 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
+/*
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
+ * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
+ * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
+ * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
+ * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
+ * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
+ *
+ * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
+ * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
+ * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
+ * does not match the symbol clock, the port clock (not even if you think in
+ * terms of a byte clock), nor the data bandwidth. It only matches the link bit
+ * rate in units of 10000 bps.
+ */
int
-intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+intel_dp_max_data_rate(int max_link_rate, int max_lanes)
{
- /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
- * link rate that is generally expressed in Gbps. Since, 8 bits of data
- * is transmitted every LS_Clk per lane, there is no need to account for
- * the channel encoding that is done in the PHY layer here.
+ if (max_link_rate >= 1000000) {
+ /*
+ * UHBR rates always use 128b/132b channel encoding, and have
+ * 97.71% data bandwidth efficiency. Consider max_link_rate the
+ * link bit rate in units of 10000 bps.
+ */
+ int max_link_rate_kbps = max_link_rate * 10;
+
+ max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000);
+ max_link_rate = max_link_rate_kbps / 8;
+ }
+
+ /*
+ * Lower than UHBR rates always use 8b/10b channel encoding, and have
+ * 80% data bandwidth efficiency for SST non-FEC. However, this turns
+ * out to be a nop by coincidence, and can be skipped:
+ *
+ * int max_link_rate_kbps = max_link_rate * 10;
+ * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
+ * max_link_rate = max_link_rate_kbps / 8;
*/
- return max_link_clock * max_lanes;
+ return max_link_rate * max_lanes;
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -222,6 +309,20 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
encoder->port != PORT_A);
}
+static int dg2_max_source_rate(struct intel_dp *intel_dp)
+{
+ return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
+}
+
+static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
+{
+ u32 voltage;
+
+ voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
+
+ return voltage == VOLTAGE_INFO_0_85V;
+}
+
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -229,7 +330,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
if (intel_phy_is_combo(dev_priv, phy) &&
- !intel_dp_is_edp(intel_dp))
+ (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
return 540000;
return 810000;
@@ -237,7 +338,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- if (intel_dp_is_edp(intel_dp))
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
+ return 540000;
+
+ return 810000;
+}
+
+static int dg1_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
return 540000;
return 810000;
@@ -248,7 +365,8 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
/* The values must be in increasing order */
static const int icl_rates[] = {
- 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
+ 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
+ 1000000, 1350000,
};
static const int bxt_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000
@@ -275,7 +393,12 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 11) {
source_rates = icl_rates;
size = ARRAY_SIZE(icl_rates);
- if (IS_JSL_EHL(dev_priv))
+ if (IS_DG2(dev_priv))
+ max_rate = dg2_max_source_rate(intel_dp);
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ max_rate = dg1_max_source_rate(intel_dp);
+ else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -461,7 +584,9 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
static int
small_joiner_ram_size_bits(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 13)
+ return 17280 * 8;
+ else if (DISPLAY_VER(i915) >= 11)
return 7680 * 8;
else
return 6144 * 8;
@@ -703,6 +828,17 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
+static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp_can_bigjoiner(intel_dp))
+ return false;
+
+ return clock > i915->max_dotclk_freq || hdisplay > 5120;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -726,11 +862,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_H_ILLEGAL;
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
- if (mode->hdisplay != fixed_mode->hdisplay)
- return MODE_PANEL;
-
- if (mode->vdisplay != fixed_mode->vdisplay)
- return MODE_PANEL;
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
target_clock = fixed_mode->clock;
}
@@ -738,8 +872,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
- if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
- intel_dp_can_bigjoiner(intel_dp)) {
+ if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
}
@@ -811,18 +944,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
}
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
+bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
{
- int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
- return max_rate >= 540000;
+ return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
}
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
{
- int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
- return max_rate >= 810000;
+ return DISPLAY_VER(i915) >= 10;
}
static void snprintf_int_array(char *str, size_t len,
@@ -1044,7 +1173,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
intel_dp->num_common_rates,
intel_dp->compliance.test_link_rate);
if (index >= 0)
- limits->min_clock = limits->max_clock = index;
+ limits->min_rate = limits->max_rate =
+ intel_dp->compliance.test_link_rate;
limits->min_lane_count = limits->max_lane_count =
intel_dp->compliance.test_lane_count;
}
@@ -1058,8 +1188,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
const struct link_config_limits *limits)
{
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
+ int bpp, i, lane_count;
+ int mode_rate, link_rate, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
@@ -1067,18 +1197,22 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
output_bpp);
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ for (i = 0; i < intel_dp->num_common_rates; i++) {
+ link_rate = intel_dp->common_rates[i];
+ if (link_rate < limits->min_rate ||
+ link_rate > limits->max_rate)
+ continue;
+
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
+ link_avail = intel_dp_max_data_rate(link_rate,
lane_count);
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
+ pipe_config->port_clock = link_rate;
return 0;
}
@@ -1212,7 +1346,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* with DSC enabled for the requested mode.
*/
pipe_config->pipe_bpp = pipe_bpp;
- pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->port_clock = limits->max_rate;
pipe_config->lane_count = limits->max_lane_count;
if (intel_dp_is_edp(intel_dp)) {
@@ -1321,8 +1455,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
/* No common link rates between source and sink */
drm_WARN_ON(encoder->base.dev, common_len <= 0);
- limits.min_clock = 0;
- limits.max_clock = common_len - 1;
+ limits.min_rate = intel_dp->common_rates[0];
+ limits.max_rate = intel_dp->common_rates[common_len - 1];
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -1340,20 +1474,18 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
- limits.min_clock = limits.max_clock;
+ limits.min_rate = limits.max_rate;
}
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
"max rate %d max bpp %d pixel clock %iKHz\n",
- limits.max_lane_count,
- intel_dp->common_rates[limits.max_clock],
+ limits.max_lane_count, limits.max_rate,
limits.max_bpp, adjusted_mode->crtc_clock);
- if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
- adjusted_mode->crtc_hdisplay > 5120) &&
- intel_dp_can_bigjoiner(intel_dp))
+ if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock))
pipe_config->bigjoiner = true;
/*
@@ -1553,7 +1685,7 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
{
vsc->sdp_type = DP_SDP_VSC;
- if (intel_dp->psr.psr2_enabled) {
+ if (crtc_state->has_psr2) {
if (intel_dp->psr.colorimetry_support &&
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
/* [PSR2, +Colorimetry] */
@@ -1603,46 +1735,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
-static void
-intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n)
-{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pixel_clock;
-
- if (pipe_config->vrr.enable)
- return;
-
- /*
- * DRRS and PSR can't be enable together, so giving preference to PSR
- * as it allows more power-savings by complete shutting down display,
- * so to guarantee this, intel_dp_drrs_compute_config() must be called
- * after intel_psr_compute_config().
- */
- if (pipe_config->has_psr)
- return;
-
- if (!intel_connector->panel.downclock_mode ||
- dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
- return;
-
- pipe_config->has_drrs = true;
-
- pixel_clock = intel_connector->panel.downclock_mode->clock;
- if (pipe_config->splitter.enable)
- pixel_clock /= pipe_config->splitter.link_count;
-
- intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
- pipe_config->port_clock, &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
-
- /* FIXME: abstract this better */
- if (pipe_config->splitter.enable)
- pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
-}
-
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -1665,7 +1757,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode);
if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -1678,13 +1770,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
- if (HAS_GMCH(dev_priv))
- ret = intel_gmch_panel_fitting(pipe_config, conn_state);
- else
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -1750,9 +1840,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
g4x_dp_set_clock(encoder, pipe_config);
intel_vrr_compute_config(pipe_config, conn_state);
- intel_psr_compute_config(intel_dp, pipe_config);
- intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
- constant_n);
+ intel_psr_compute_config(intel_dp, pipe_config, conn_state);
+ intel_drrs_compute_config(intel_dp, pipe_config, output_bpp,
+ constant_n);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -1762,6 +1852,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp->link_trained = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -1779,7 +1870,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&i915->drm, "\n");
- intel_panel_enable_backlight(crtc_state, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
intel_pps_backlight_on(intel_dp);
}
@@ -1795,7 +1886,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
drm_dbg_kms(&i915->drm, "\n");
intel_pps_backlight_off(intel_dp);
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
}
static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
@@ -1916,6 +2007,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ if (!crtc_state)
+ return;
+
/*
* Don't clobber DPCD if it's been already read out during output
* setup (eDP) or detect.
@@ -2389,6 +2483,8 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_display_info *info = &connector->base.display_info;
u8 mso;
if (intel_dp->edp_dpcd[0] < DP_EDP_14)
@@ -2407,8 +2503,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
if (mso) {
- drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
- mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
+ drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
+ mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
+ info->mso_pixel_overlap);
if (!HAS_MSO(i915)) {
drm_err(&i915->drm, "No source MSO support, disabling\n");
mso = 0;
@@ -2416,7 +2513,7 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
intel_dp->mso_link_count = mso;
- intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
+ intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
}
static bool
@@ -2505,8 +2602,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
intel_edp_init_source_oui(intel_dp, true);
- intel_edp_mso_init(intel_dp);
-
return true;
}
@@ -2574,7 +2669,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return i915->params.enable_dp_mst &&
- intel_dp->can_mst &&
+ intel_dp_mst_source_support(intel_dp) &&
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
@@ -2589,10 +2684,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
- yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst),
yesno(i915->params.enable_dp_mst));
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
return;
intel_dp->is_mst = sink_can_mst &&
@@ -2809,7 +2904,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- struct drm_dp_vsc_sdp *vsc)
+ const struct drm_dp_vsc_sdp *vsc)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -4588,6 +4683,17 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return intel_modeset_synced_crtcs(state, conn);
}
+static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ i915->hotplug.event_bits |= BIT(encoder->hpd_pin);
+ spin_unlock_irq(&i915->irq_lock);
+ queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0);
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4598,6 +4704,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
+ .oob_hotplug_event = intel_dp_oob_hotplug_event,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4713,432 +4820,6 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_vrr_capable_property(connector);
}
-/**
- * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev_priv: i915 device
- * @crtc_state: a pointer to the active intel_crtc_state
- * @refresh_rate: RR to be programmed
- *
- * This function gets called when refresh rate (RR) has to be changed from
- * one frequency to another. Switches can be between high and low RR
- * supported by the panel or to any other RR based on media playback (in
- * this case, RR value needs to be passed from user space).
- *
- * The caller of this function needs to take a lock on dev_priv->drrs.
- */
-static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- int refresh_rate)
-{
- struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
-
- if (refresh_rate <= 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Refresh rate should be positive non-zero.\n");
- return;
- }
-
- if (intel_dp == NULL) {
- drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
- return;
- }
-
- if (!crtc) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS: intel_crtc not initialized\n");
- return;
- }
-
- if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
- drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
- return;
- }
-
- if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
- refresh_rate)
- index = DRRS_LOW_RR;
-
- if (index == dev_priv->drrs.refresh_rate_type) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS requested for previously set RR...ignoring\n");
- return;
- }
-
- if (!crtc_state->hw.active) {
- drm_dbg_kms(&dev_priv->drm,
- "eDP encoder disabled. CRTC not Active\n");
- return;
- }
-
- if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
- switch (index) {
- case DRRS_HIGH_RR:
- intel_dp_set_m_n(crtc_state, M1_N1);
- break;
- case DRRS_LOW_RR:
- intel_dp_set_m_n(crtc_state, M2_N2);
- break;
- case DRRS_MAX_RR:
- default:
- drm_err(&dev_priv->drm,
- "Unsupported refreshrate type\n");
- }
- } else if (DISPLAY_VER(dev_priv) > 6) {
- i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
- u32 val;
-
- val = intel_de_read(dev_priv, reg);
- if (index > DRRS_HIGH_RR) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
- else
- val |= PIPECONF_EDP_RR_MODE_SWITCH;
- } else {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
- else
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
- }
- intel_de_write(dev_priv, reg, val);
- }
-
- dev_priv->drrs.refresh_rate_type = index;
-
- drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
- refresh_rate);
-}
-
-static void
-intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- dev_priv->drrs.busy_frontbuffer_bits = 0;
- dev_priv->drrs.dp = intel_dp;
-}
-
-/**
- * intel_edp_drrs_enable - init drrs struct if supported
- * @intel_dp: DP struct
- * @crtc_state: A pointer to the active crtc state.
- *
- * Initializes frontbuffer_bits and drrs.dp
- */
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!crtc_state->has_drrs)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- if (dev_priv->drrs.dp) {
- drm_warn(&dev_priv->drm, "DRRS already enabled\n");
- goto unlock;
- }
-
- intel_edp_drrs_enable_locked(intel_dp);
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-static void
-intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
- int refresh;
-
- refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
- intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
- }
-
- dev_priv->drrs.dp = NULL;
-}
-
-/**
- * intel_edp_drrs_disable - Disable DRRS
- * @intel_dp: DP struct
- * @old_crtc_state: Pointer to old crtc_state.
- *
- */
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!old_crtc_state->has_drrs)
- return;
-
- mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
-
- intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
- mutex_unlock(&dev_priv->drrs.mutex);
-
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-}
-
-/**
- * intel_edp_drrs_update - Update DRRS state
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- *
- * This function will update DRRS states, disabling or enabling DRRS when
- * executing fastsets. For full modeset, intel_edp_drrs_disable() and
- * intel_edp_drrs_enable() should be called instead.
- */
-void
-intel_edp_drrs_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
- return;
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- /* New state matches current one? */
- if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
- goto unlock;
-
- if (crtc_state->has_drrs)
- intel_edp_drrs_enable_locked(intel_dp);
- else
- intel_edp_drrs_disable_locked(intel_dp, crtc_state);
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-static void intel_edp_drrs_downclock_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), drrs.work.work);
- struct intel_dp *intel_dp;
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
-
- if (!intel_dp)
- goto unlock;
-
- /*
- * The delayed work can race with an invalidate hence we need to
- * recheck.
- */
-
- if (dev_priv->drrs.busy_frontbuffer_bits)
- goto unlock;
-
- if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
-
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
- }
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-/**
- * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called everytime rendering on the given planes start.
- * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
- *
- * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
- */
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits)
-{
- struct intel_dp *intel_dp;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
- return;
-
- cancel_delayed_work(&dev_priv->drrs.work);
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
- if (!intel_dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
-
- crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
- dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
-
- /* invalidate means busy screen hence upclock */
- if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
-
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-/**
- * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed or flip on a crtc is completed. So DRRS should be upclocked
- * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
- * if no other planes are dirty.
- *
- * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
- */
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits)
-{
- struct intel_dp *intel_dp;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
- return;
-
- cancel_delayed_work(&dev_priv->drrs.work);
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
- if (!intel_dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
-
- crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
- dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
- /* flush means busy screen hence upclock */
- if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
-
- /*
- * flush also means no more activity hence schedule downclock, if all
- * other fbs are quiescent too
- */
- if (!dev_priv->drrs.busy_frontbuffer_bits)
- schedule_delayed_work(&dev_priv->drrs.work,
- msecs_to_jiffies(1000));
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-/**
- * DOC: Display Refresh Rate Switching (DRRS)
- *
- * Display Refresh Rate Switching (DRRS) is a power conservation feature
- * which enables swtching between low and high refresh rates,
- * dynamically, based on the usage scenario. This feature is applicable
- * for internal panels.
- *
- * Indication that the panel supports DRRS is given by the panel EDID, which
- * would list multiple refresh rates for one resolution.
- *
- * DRRS is of 2 types - static and seamless.
- * Static DRRS involves changing refresh rate (RR) by doing a full modeset
- * (may appear as a blink on screen) and is used in dock-undock scenario.
- * Seamless DRRS involves changing RR without any visual effect to the user
- * and can be used during normal system usage. This is done by programming
- * certain registers.
- *
- * Support for static/seamless DRRS may be indicated in the VBT based on
- * inputs from the panel spec.
- *
- * DRRS saves power by switching to low RR based on usage scenarios.
- *
- * The implementation is based on frontbuffer tracking implementation. When
- * there is a disturbance on the screen triggered by user activity or a periodic
- * system activity, DRRS is disabled (RR is changed to high RR). When there is
- * no movement on screen, after a timeout of 1 second, a switch to low RR is
- * made.
- *
- * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
- * and intel_edp_drrs_flush() are called.
- *
- * DRRS can be further extended to support other internal panels and also
- * the scenario of video playback wherein RR is set based on the rate
- * requested by userspace.
- */
-
-/**
- * intel_dp_drrs_init - Init basic DRRS work and mutex.
- * @connector: eDP connector
- * @fixed_mode: preferred mode of panel
- *
- * This function is called only once at driver load to initialize basic
- * DRRS stuff.
- *
- * Returns:
- * Downclock mode if panel supports it, else return NULL.
- * DRRS support is determined by the presence of downclock mode (apart
- * from VBT setting).
- */
-static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_connector *connector,
- struct drm_display_mode *fixed_mode)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct drm_display_mode *downclock_mode = NULL;
-
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
- mutex_init(&dev_priv->drrs.mutex);
-
- if (DISPLAY_VER(dev_priv) <= 6) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS supported for Gen7 and above\n");
- return NULL;
- }
-
- if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
- return NULL;
- }
-
- downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
- if (!downclock_mode) {
- drm_dbg_kms(&dev_priv->drm,
- "Downclock mode is not found. DRRS not supported\n");
- return NULL;
- }
-
- dev_priv->drrs.type = dev_priv->vbt.drrs_type;
-
- dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
- drm_dbg_kms(&dev_priv->drm,
- "seamless DRRS supported for eDP panel.\n");
- return downclock_mode;
-}
-
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
@@ -5197,7 +4878,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
if (fixed_mode)
- downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
+ downclock_mode = intel_drrs_init(intel_connector, fixed_mode);
+
+ /* MSO requires information from the EDID */
+ intel_edp_mso_init(intel_dp);
/* multiply the mode clock and horizontal timings for MSO */
intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
@@ -5230,7 +4914,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
intel_connector->panel.backlight.power = intel_pps_backlight_power;
- intel_panel_setup_backlight(connector, pipe);
+ intel_backlight_setup(intel_connector, pipe);
if (fixed_mode) {
drm_connector_set_panel_orientation_with_quirk(connector,
@@ -5292,8 +4976,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_encoder->base.name))
return false;
- intel_dp_set_source_rates(intel_dp);
-
intel_dp->reset_link_params = true;
intel_dp->pps.pps_pipe = INVALID_PIPE;
intel_dp->pps.active_pipe = INVALID_PIPE;
@@ -5309,28 +4991,22 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
*/
drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+
+ /* eDP only on port B and/or C on vlv/chv */
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ port != PORT_B && port != PORT_C))
+ return false;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
}
+ intel_dp_set_source_rates(intel_dp);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
- /*
- * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
- * for DP the encoder type can be set by the caller to
- * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
- */
- if (type == DRM_MODE_CONNECTOR_eDP)
- intel_encoder->type = INTEL_OUTPUT_EDP;
-
- /* eDP only on port B and/or C on vlv/chv */
- if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) &&
- intel_dp_is_edp(intel_dp) &&
- port != PORT_B && port != PORT_C))
- return false;
-
drm_dbg_kms(&dev_priv->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
@@ -5411,7 +5087,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
intel_dp = enc_to_intel_dp(encoder);
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
continue;
if (intel_dp->is_mst)
@@ -5435,7 +5111,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
intel_dp = enc_to_intel_dp(encoder);
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
continue;
ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 680631b5b437..ce229026dc91 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -26,7 +26,7 @@ struct intel_dp;
struct intel_encoder;
struct link_config_limits {
- int min_clock, max_clock;
+ int min_rate, max_rate;
int min_lane_count, max_lane_count;
int min_bpp, max_bpp;
};
@@ -58,6 +58,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
@@ -70,25 +71,14 @@ int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_tps3(struct drm_i915_private *i915);
+bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
-int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -98,7 +88,7 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
struct drm_dp_vsc_sdp *vsc);
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- struct drm_dp_vsc_sdp *vsc);
+ const struct drm_dp_vsc_sdp *vsc);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index f483f479dd0b..5fbb767fcd63 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -150,9 +150,6 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
u32 ret;
/*
@@ -170,8 +167,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
- if (intel_phy_is_tc(i915, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 6ac568617ef3..569d17b4d00f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,9 +34,9 @@
* for some reason.
*/
+#include "intel_backlight.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_panel.h"
/* TODO:
* Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
@@ -146,7 +146,7 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (!panel->backlight.edp.intel.sdr_uses_aux) {
u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
- return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+ return intel_backlight_level_from_pwm(connector, pwm_level);
}
/* Assume 100% brightness if backlight controls aren't enabled yet */
@@ -187,9 +187,9 @@ intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32
if (panel->backlight.edp.intel.sdr_uses_aux) {
intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
} else {
- const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+ const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
- intel_panel_set_pwm_level(conn_state, pwm_level);
+ intel_backlight_set_pwm_level(conn_state, pwm_level);
}
}
@@ -215,7 +215,7 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
} else {
- u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+ u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
@@ -238,7 +238,7 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state,
return;
/* Note we want the actual pwm_level to be 0, regardless of pwm_min */
- panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+ panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
}
static int
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index d697d169e8c1..540a669e01dd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -446,8 +446,6 @@ static
int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
void *buf, size_t size)
{
- struct intel_dp *dp = &dig_port->dp;
- struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
@@ -463,8 +461,6 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
bytes_to_write = size - 1;
byte++;
- hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
-
while (bytes_to_write) {
len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
@@ -482,29 +478,11 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
return size;
}
-static int
-get_rxinfo_hdcp_1_dev_downstream(struct intel_digital_port *dig_port, bool *hdcp_1_x)
-{
- u8 rx_info[HDCP_2_2_RXINFO_LEN];
- int ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_HDCP_2_2_REG_RXINFO_OFFSET,
- (void *)rx_info, HDCP_2_2_RXINFO_LEN);
-
- if (ret != HDCP_2_2_RXINFO_LEN)
- return ret >= 0 ? -EIO : ret;
-
- *hdcp_1_x = HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) ? true : false;
- return 0;
-}
-
static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
+ssize_t get_receiver_id_list_rx_info(struct intel_digital_port *dig_port, u32 *dev_cnt, u8 *byte)
{
- u8 rx_info[HDCP_2_2_RXINFO_LEN];
- u32 dev_cnt;
ssize_t ret;
+ u8 *rx_info = byte;
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RXINFO_OFFSET,
@@ -512,15 +490,11 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
if (ret != HDCP_2_2_RXINFO_LEN)
return ret >= 0 ? -EIO : ret;
- dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ *dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
- if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
- dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
-
- ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
- HDCP_2_2_RECEIVER_IDS_MAX_LEN +
- (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+ if (*dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ *dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
return ret;
}
@@ -530,12 +504,15 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
ktime_t msg_end = ktime_set(0, 0);
bool msg_expired;
+ u32 dev_cnt;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
@@ -546,17 +523,24 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
if (ret < 0)
return ret;
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
- ret = get_receiver_id_list_size(dig_port);
+ ret = get_receiver_id_list_rx_info(dig_port, &dev_cnt, byte);
if (ret < 0)
return ret;
- size = ret;
+ byte += ret;
+ size = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RXINFO_LEN - HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+ offset += HDCP_2_2_RXINFO_LEN;
}
- bytes_to_recv = size - 1;
- /* DP adaptation msgs has no msg_id */
- byte++;
+ bytes_to_recv = size - 1;
while (bytes_to_recv) {
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
@@ -664,27 +648,6 @@ int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
return 0;
}
-static
-int intel_dp_mst_streams_type1_capable(struct intel_connector *connector,
- bool *capable)
-{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- int ret;
- bool hdcp_1_x;
-
- ret = get_rxinfo_hdcp_1_dev_downstream(dig_port, &hdcp_1_x);
- if (ret) {
- drm_dbg_kms(&i915->drm,
- "[%s:%d] failed to read RxInfo ret=%d\n",
- connector->base.name, connector->base.base.id, ret);
- return ret;
- }
-
- *capable = !hdcp_1_x;
- return 0;
-}
-
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -833,7 +796,6 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
.check_2_2_link = intel_dp_mst_hdcp2_check_link,
.hdcp_2_2_capable = intel_dp_hdcp2_capable,
- .streams_type1_capable = intel_dp_mst_streams_type1_capable,
.protocol = HDCP_PROTOCOL_DP,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 508a514c5e37..85676c953e0a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -25,15 +25,6 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-static void
-intel_dp_dump_link_status(struct drm_device *drm,
- const u8 link_status[DP_LINK_STATUS_SIZE])
-{
- drm_dbg_kms(drm,
- "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
- link_status[0], link_status[1], link_status[2],
- link_status[3], link_status[4], link_status[5]);
-}
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
@@ -66,6 +57,7 @@ static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
char phy_name[10];
@@ -73,21 +65,22 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "failed to read the PHY caps for %s\n",
- phy_name);
+ "[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "%s PHY capabilities: %*ph\n",
- phy_name,
+ "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name, phy_name,
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_dp_is_edp(intel_dp))
return false;
@@ -104,7 +97,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
goto reset_caps;
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "LTTPR common capabilities: %*ph\n",
+ "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name,
(int)sizeof(intel_dp->lttpr_common_caps),
intel_dp->lttpr_common_caps);
@@ -130,6 +124,8 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int lttpr_count;
int i;
@@ -161,8 +157,9 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n",
+ encoder->base.base.id, encoder->base.name);
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
intel_dp_reset_lttpr_count(intel_dp);
@@ -301,21 +298,54 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
return preemph_max;
}
-void
-intel_dp_get_adjust_train(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- enum drm_dp_phy dp_phy,
- const u8 link_status[DP_LINK_STATUS_SIZE])
+static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
+}
+
+/* 128b/132b */
+static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ u8 tx_ffe = 0;
+
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+ tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++)
+ tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
+ }
+
+ return tx_ffe;
+}
+
+/* 8b/10b */
+static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
{
u8 v = 0;
u8 p = 0;
- int lane;
u8 voltage_max;
u8 preemph_max;
- for (lane = 0; lane < crtc_state->lane_count; lane++) {
- v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
- p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+
+ v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
+ p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
+ }
}
preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
@@ -328,8 +358,79 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+ return v | p;
+}
+
+static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ if (intel_dp_is_uhbr(crtc_state))
+ return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ else
+ return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+}
+
+#define TRAIN_REQ_FMT "%d/%d/%d/%d"
+#define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
+#define TRAIN_REQ_VSWING_ARGS(link_status) \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 0), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 1), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 2), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 3)
+#define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
+#define TRAIN_REQ_PREEMPH_ARGS(link_status) \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
+#define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
+ drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
+#define TRAIN_REQ_TX_FFE_ARGS(link_status) \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
+
+void
+intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ char phy_name[10];
+ int lane;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_REQ_TX_FFE_ARGS(link_status));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing request: " TRAIN_REQ_FMT ", "
+ "pre-emphasis request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_REQ_VSWING_ARGS(link_status),
+ TRAIN_REQ_PREEMPH_ARGS(link_status));
+ }
+
for (lane = 0; lane < 4; lane++)
- intel_dp->train_set[lane] = v | p;
+ intel_dp->train_set[lane] =
+ intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
}
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
@@ -351,7 +452,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
int len;
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- dp_train_pat);
+ dp_phy, dp_train_pat);
buf[0] = dp_train_pat;
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
@@ -379,40 +480,77 @@ static char dp_training_pattern_name(u8 train_pat)
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
u8 dp_train_pat)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
+ char phy_name[10];
if (train_pat != DP_TRAINING_PATTERN_DISABLE)
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}
+#define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
+#define _TRAIN_SET_VSWING_ARGS(train_set) \
+ ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
+ (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
+#define TRAIN_SET_VSWING_ARGS(train_set) \
+ _TRAIN_SET_VSWING_ARGS((train_set)[0]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[1]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[2]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[3])
+#define _TRAIN_SET_PREEMPH_ARGS(train_set) \
+ ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
+ (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
+#define TRAIN_SET_PREEMPH_ARGS(train_set) \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[3])
+#define _TRAIN_SET_TX_FFE_ARGS(train_set) \
+ ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
+#define TRAIN_SET_TX_FFE_ARGS(train_set) \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[3])
+
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
char phy_name[10];
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "",
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE presets: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing levels: " TRAIN_SET_FMT ", "
+ "pre-emphasis levels: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
+ TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
+ }
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
- intel_dp->set_signal_levels(intel_dp, crtc_state);
+ encoder->set_signal_levels(encoder, crtc_state);
}
static bool
@@ -444,15 +582,55 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
return ret == crtc_state->lane_count;
}
+/* 128b/132b */
+static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
+{
+ return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
+ DP_TX_FFE_PRESET_VALUE_MASK;
+}
+
+/*
+ * 8b/10b
+ *
+ * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
+ * have self contradicting tests around this area.
+ *
+ * In lieu of better ideas let's just stop when we've reached the max supported
+ * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
+ * whether vswing level 3 is supported or not.
+ */
+static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
+{
+ u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ return false;
+
+ if (v + p != 3)
+ return false;
+
+ return true;
+}
+
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
int lane;
- for (lane = 0; lane < crtc_state->lane_count; lane++)
- if ((intel_dp->train_set[lane] &
- DP_TRAIN_MAX_SWING_REACHED) == 0)
- return false;
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 train_set_lane = intel_dp->train_set[lane];
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
+ return false;
+ } else {
+ if (!intel_dp_lane_max_vswing_reached(train_set_lane))
+ return false;
+ }
+ }
return true;
}
@@ -465,7 +643,8 @@ static bool
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 link_config[2];
u8 link_bw, rate_select;
@@ -477,10 +656,12 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
if (link_bw)
drm_dbg_kms(&i915->drm,
- "Using LINK_BW_SET value %02x\n", link_bw);
+ "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, link_bw);
else
drm_dbg_kms(&i915->drm,
- "Using LINK_RATE_SET value %02x\n", rate_select);
+ "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -495,11 +676,10 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
&rate_select, 1);
link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
- link_config[1] = DP_SET_ANSI_8B10B;
+ link_config[1] = intel_dp_is_uhbr(crtc_state) ?
+ DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
- intel_dp->DP |= DP_PORT_EN;
-
return true;
}
@@ -512,6 +692,48 @@ static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_d
drm_dp_lttpr_link_train_clock_recovery_delay();
}
+static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
+ const u8 old_link_status[DP_LINK_STATUS_SIZE],
+ const u8 new_link_status[DP_LINK_STATUS_SIZE])
+{
+ int lane;
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 old, new;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
+ new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
+ } else {
+ old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
+ new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
+ }
+
+ if (old != new)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ char phy_name[10];
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+}
+
/*
* Perform the link training clock recovery phase on the given DP PHY using
* training pattern 1.
@@ -521,16 +743,22 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 voltage;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
int voltage_tries, cr_tries, max_cr_tries;
+ u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
+ char phy_name[10];
+
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
- drm_err(&i915->drm, "failed to enable link training\n");
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
@@ -549,105 +777,118 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- u8 link_status[DP_LINK_STATUS_SIZE];
-
intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
- drm_err(&i915->drm, "failed to get link status\n");
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
- drm_dbg_kms(&i915->drm, "clock recovery OK\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Clock recovery OK\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return true;
}
if (voltage_tries == 5) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Same voltage tried 5 times\n");
+ "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
if (max_vswing_reached) {
- drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
-
/* Update training set as requested by target */
intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
- "failed to update link training\n");
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
- voltage)
+ if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
++voltage_tries;
else
voltage_tries = 1;
+ memcpy(old_link_status, link_status, sizeof(link_status));
+
if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
max_vswing_reached = true;
-
}
+
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_err(&i915->drm,
- "Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
+ encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
+
return false;
}
/*
- * Pick training pattern for channel equalization. Training pattern 4 for HBR3
- * or for 1.4 devices that support it, training Pattern 3 for HBR2
- * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
+ * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
+ * 1.2 devices that support it, TPS2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
+ /* UHBR+ use separate 128b/132b TPS2 */
+ if (intel_dp_is_uhbr(crtc_state))
+ return DP_TRAINING_PATTERN_2;
+
/*
- * Intel platforms that support HBR3 also support TPS4. It is mandatory
- * for all downstream devices that support HBR3. There are no known eDP
- * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
- * specification.
+ * TPS4 support is mandatory for all downstream devices that
+ * support HBR3. There are no known eDP panels that support
+ * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
* LTTPRs must support TPS4.
*/
- source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+ source_tps4 = intel_dp_source_supports_tps4(i915);
sink_tps4 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps4_supported(intel_dp->dpcd);
if (source_tps4 && sink_tps4) {
return DP_TRAINING_PATTERN_4;
} else if (crtc_state->port_clock == 810000) {
if (!source_tps4)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ drm_dbg_kms(&i915->drm,
+ "8.1 Gbps link rate without source TPS4 support\n");
if (!sink_tps4)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ drm_dbg_kms(&i915->drm,
"8.1 Gbps link rate without sink TPS4 support\n");
}
+
/*
- * Intel platforms that support HBR2 also support TPS3. TPS3 support is
- * also mandatory for downstream devices that support HBR2. However, not
- * all sinks follow the spec.
+ * TPS3 support is mandatory for downstream devices that
+ * support HBR2. However, not all sinks follow the spec.
*/
- source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
+ source_tps3 = intel_dp_source_supports_tps3(i915);
sink_tps3 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
return DP_TRAINING_PATTERN_3;
} else if (crtc_state->port_clock >= 540000) {
if (!source_tps3)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
+ drm_dbg_kms(&i915->drm,
+ ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
if (!sink_tps3)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ drm_dbg_kms(&i915->drm,
">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
@@ -677,11 +918,15 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int tries;
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
+ char phy_name[10];
+
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
@@ -691,7 +936,10 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
training_pattern)) {
- drm_err(&i915->drm, "failed to start channel equalization\n");
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
+ encoder->base.base.id, encoder->base.name,
+ phy_name);
return false;
}
@@ -701,25 +949,28 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm,
- "failed to get link status\n");
+ "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
crtc_state->lane_count)) {
- intel_dp_dump_link_status(&i915->drm, link_status);
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Clock recovery check failed, cannot "
- "continue channel equalization\n");
+ "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
+ "continue channel equalization\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
if (drm_dp_channel_eq_ok(link_status,
crtc_state->lane_count)) {
channel_eq = true;
- drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
- "successful\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
@@ -728,16 +979,18 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
- "failed to update link training\n");
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
}
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
- intel_dp_dump_link_status(&i915->drm, link_status);
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Channel equalization failed 5 times\n");
+ "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
}
return channel_eq;
@@ -774,7 +1027,7 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
intel_dp->link_trained = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
- intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
}
@@ -783,7 +1036,8 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
char phy_name[10];
bool ret = false;
@@ -797,12 +1051,12 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
out:
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
- intel_connector->base.base.id,
- intel_connector->base.name,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
ret ? "passed" : "failed",
- crtc_state->port_clock, crtc_state->lane_count,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+ crtc_state->port_clock, crtc_state->lane_count);
return ret;
}
@@ -811,10 +1065,13 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_dp->hobl_active) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "Link Training failed with HOBL active, not enabling it from now on");
+ "[ENCODER:%d:%s] Link Training failed with HOBL active, "
+ "not enabling it from now on",
+ encoder->base.base.id, encoder->base.name);
intel_dp->hobl_failed = true;
} else if (intel_dp_get_link_train_fallback_values(intel_dp,
crtc_state->port_clock,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 9d24d594368c..6a3a7b37349a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -19,6 +19,7 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
u8 dp_train_pat);
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8d13d7b26a25..89d701e8ae9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -61,7 +61,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_clock;
+ crtc_state->port_clock = limits->max_rate;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
crtc_state->pipe_bpp = bpp;
@@ -131,8 +131,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- limits.min_clock =
- limits.max_clock = intel_dp_max_link_rate(intel_dp);
+ limits.min_rate =
+ limits.max_rate = intel_dp_max_link_rate(intel_dp);
limits.min_lane_count =
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -378,7 +378,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
@@ -396,7 +396,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
bool last_mst_stream;
- u32 val;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
@@ -406,18 +405,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
clear_act_sent(encoder, old_crtc_state);
- val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
- val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
- val);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
wait_for_act_sent(encoder, old_crtc_state);
@@ -523,7 +518,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp->active_mst_links++;
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
/*
* Before Gen 12 this is not done as part of
@@ -555,6 +550,17 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
clear_act_sent(encoder, pipe_config);
+ if (intel_dp_is_uhbr(pipe_config)) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
@@ -571,7 +577,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
FECSTALL_DIS_DPTSTREAM_DPTTG);
- intel_enable_pipe(pipe_config);
+ intel_enable_transcoder(pipe_config);
intel_crtc_vblank_on(pipe_config);
@@ -971,24 +977,31 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
dig_port->max_lanes,
max_source_rate,
conn_base_id);
- if (ret)
+ if (ret) {
+ intel_dp->mst_mgr.cbs = NULL;
return ret;
-
- intel_dp->can_mst = true;
+ }
return 0;
}
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst_mgr.cbs;
+}
+
void
intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
{
struct intel_dp *intel_dp = &dig_port->dp;
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
return;
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
+
+ intel_dp->mst_mgr.cbs = NULL;
}
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 6afda4e86b3c..f7301de6cdfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,13 +8,15 @@
#include <linux/types.h>
-struct intel_digital_port;
struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 48507ed79950..44edeb2e55c0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,12 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "display/intel_dp.h"
-
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dpio_phy.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
/**
* DOC: DPIO
@@ -266,15 +267,22 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
*ch = DPIO_CH0;
}
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
- enum port port, u32 margin, u32 scale,
- u32 enable, u32 deemphasis)
+void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- u32 val;
- enum dpio_phy phy;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
enum dpio_channel ch;
+ enum dpio_phy phy;
+ int n_entries;
+ u32 val;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch);
/*
* While we write to the group register to program all lanes at once we
@@ -286,12 +294,13 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+ val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT |
+ trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT;
intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
val &= ~SCALE_DCOMP_METHOD;
- if (enable)
+ if (trans->entries[level].bxt.enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
@@ -302,7 +311,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
val &= ~DE_EMPHASIS;
- val |= deemphasis << DEEMPH_SHIFT;
+ val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT;
intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 6473440e7457..9c3d008e8e1a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -17,9 +17,8 @@ struct intel_encoder;
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
- enum port port, u32 margin, u32 scale,
- u32 enable, u32 deemphasis);
+void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 14515e62c05e..04a7af8340ca 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -2,16 +2,19 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include <linux/kernel.h>
+
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_types.h"
#include "intel_display.h"
+#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_panel.h"
-#include "intel_sideband.h"
-#include "display/intel_snps_phy.h"
+#include "intel_pps.h"
+#include "intel_snps_phy.h"
+#include "vlv_sideband.h"
struct intel_limit {
struct {
@@ -309,7 +312,7 @@ int pnv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
{
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
@@ -428,7 +431,8 @@ i9xx_select_p2_div(const struct intel_limit *limit,
static bool
i9xx_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
@@ -486,7 +490,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
static bool
pnv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
@@ -542,7 +547,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
static bool
g4x_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
@@ -636,7 +642,8 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
static bool
vlv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -696,7 +703,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
static bool
chv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -763,47 +771,45 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
NULL, best_clock);
}
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
}
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2 = 0;
+ u32 fp, fp2;
if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = pnv_dpll_compute_fp(reduced_clock);
+ fp = pnv_dpll_compute_fp(clock);
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ fp = i9xx_dpll_compute_fp(clock);
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
crtc_state->dpll_hw_state.fp0 = fp;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- reduced_clock) {
- crtc_state->dpll_hw_state.fp1 = fp2;
- } else {
- crtc_state->dpll_hw_state.fp1 = fp;
- }
+ crtc_state->dpll_hw_state.fp1 = fp2;
}
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+ i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -826,13 +832,17 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev_priv))
+ if (IS_G4X(dev_priv)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ } else if (IS_PINEVIEW(dev_priv)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ } else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev_priv) && reduced_clock)
- dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ WARN_ON(reduced_clock->p1 != clock->p1);
}
+
switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -847,6 +857,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
+ WARN_ON(reduced_clock->p2 != clock->p2);
+
if (DISPLAY_VER(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
@@ -868,16 +880,15 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
}
}
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+ i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -891,6 +902,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
if (clock->p2 == 4)
dpll |= PLL_P2_DIVIDE_BY_4;
}
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ WARN_ON(reduced_clock->p2 != clock->p2);
/*
* Bspec:
@@ -918,42 +931,44 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (IS_DG2(dev_priv)) {
+ if (IS_DG2(dev_priv))
return intel_mpllb_calc_state(crtc_state, encoder);
- } else if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- DISPLAY_VER(dev_priv) >= 11) {
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
+
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
}
return 0;
}
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
- return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+ return dpll->m < factor * dpll->n;
}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll, fp, fp2;
+ u32 fp, fp2;
int factor;
/* Enable autotuning of the PLL clock (if permissible) */
@@ -968,19 +983,27 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
factor = 20;
}
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
- if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ fp = i9xx_dpll_compute_fp(clock);
+ if (ilk_needs_fb_cb_tune(clock, factor))
fp |= FP_CB_TUNE;
- if (reduced_clock) {
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ if (ilk_needs_fb_cb_tune(reduced_clock, factor))
+ fp2 |= FP_CB_TUNE;
- if (reduced_clock->m < factor * reduced_clock->n)
- fp2 |= FP_CB_TUNE;
- } else {
- fp2 = fp;
- }
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
dpll = 0;
@@ -1018,11 +1041,11 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (crtc_state->dpll.p2) {
+ switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -1036,6 +1059,7 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
+ WARN_ON(reduced_clock->p2 != clock->p2);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv))
@@ -1046,13 +1070,11 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
}
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
@@ -1097,7 +1119,8 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- ilk_compute_dpll(crtc, crtc_state, NULL);
+ ilk_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
drm_dbg_kms(&dev_priv->drm,
@@ -1109,41 +1132,42 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
-void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
{
- pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
DPLL_EXT_BUFFER_ENABLE_VLV;
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md =
+ (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void chv_compute_dpll(struct intel_crtc_state *crtc_state)
{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md =
+ (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_chv;
@@ -1159,13 +1183,12 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- chv_compute_dpll(crtc, crtc_state);
+ chv_compute_dpll(crtc_state);
return 0;
}
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_vlv;
@@ -1181,14 +1204,14 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- vlv_compute_dpll(crtc, crtc_state);
+ vlv_compute_dpll(crtc_state);
return 0;
}
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -1226,16 +1249,16 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i9xx_compute_dpll(crtc, crtc_state, NULL);
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -1263,16 +1286,16 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i9xx_compute_dpll(crtc, crtc_state, NULL);
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -1300,16 +1323,16 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i9xx_compute_dpll(crtc, crtc_state, NULL);
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 48000;
@@ -1339,30 +1362,63 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i8xx_compute_dpll(crtc, crtc_state, NULL);
+ i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
+static const struct intel_dpll_funcs hsw_dpll_funcs = {
+ .crtc_compute_clock = hsw_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs ilk_dpll_funcs = {
+ .crtc_compute_clock = ilk_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs chv_dpll_funcs = {
+ .crtc_compute_clock = chv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs vlv_dpll_funcs = {
+ .crtc_compute_clock = vlv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs g4x_dpll_funcs = {
+ .crtc_compute_clock = g4x_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs pnv_dpll_funcs = {
+ .crtc_compute_clock = pnv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs i9xx_dpll_funcs = {
+ .crtc_compute_clock = i9xx_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs i8xx_dpll_funcs = {
+ .crtc_compute_clock = i8xx_crtc_compute_clock,
+};
+
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ dev_priv->dpll_funcs = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+ dev_priv->dpll_funcs = &ilk_dpll_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+ dev_priv->dpll_funcs = &chv_dpll_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ dev_priv->dpll_funcs = &vlv_dpll_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ dev_priv->dpll_funcs = &g4x_dpll_funcs;
else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ dev_priv->dpll_funcs = &pnv_dpll_funcs;
else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ dev_priv->dpll_funcs = &i9xx_dpll_funcs;
else
- dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+ dev_priv->dpll_funcs = &i8xx_dpll_funcs;
}
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
@@ -1373,34 +1429,37 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
-void i9xx_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
+void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc_state->dpll_hw_state.dpll;
+ enum pipe pipe = crtc->pipe;
int i;
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
- assert_panel_unlocked(dev_priv, crtc->pipe);
+ assert_pps_unlocked(dev_priv, pipe);
+
+ intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
+ intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, reg, dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ intel_de_write(dev_priv, DPLL_MD(pipe),
crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1408,13 +1467,13 @@ void i9xx_enable_pll(struct intel_crtc *crtc,
*
* So write it again.
*/
- intel_de_write(dev_priv, reg, dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, reg, dpll);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
}
@@ -1448,136 +1507,22 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
-static void _vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- intel_de_posting_read(dev_priv, DPLL(pipe));
- udelay(150);
-
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
-}
-
-void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _vlv_enable_pll(crtc, pipe_config);
-
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
-}
-
-
-static void _chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 tmp;
-
- vlv_dpio_get(dev_priv);
-
- /* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
- tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
-
- vlv_dpio_put(dev_priv);
-
- /*
- * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
- */
- udelay(1);
-
- /* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-
- /* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
-}
-
-void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
+static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _chv_enable_pll(crtc, pipe_config);
-
- if (pipe != PIPE_A) {
- /*
- * WaPixelRepeatModeFixForC0:chv
- *
- * DPLLCMD is AWOL. Use chicken bits to propagate
- * the value from DPLLBMD to either pipe B or C.
- */
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
-
- /*
- * DPLLB VGA mode also seems to cause problems.
- * We should always have it disabled.
- */
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(PIPE_B)) &
- DPLL_VGA_MODE_DIS) == 0);
- } else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
- }
-}
-
-void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
- /* Enable Refclk */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
vlv_dpio_get(dev_priv);
- bestn = pipe_config->dpll.n;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
+ bestn = crtc_state->dpll.n;
+ bestm1 = crtc_state->dpll.m1;
+ bestm2 = crtc_state->dpll.m2;
+ bestp1 = crtc_state->dpll.p1;
+ bestp2 = crtc_state->dpll.p2;
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -1614,16 +1559,16 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
- if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ if (crtc_state->port_clock == 162000 ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (intel_crtc_has_dp_encoder(pipe_config)) {
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -1643,7 +1588,7 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(pipe_config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -1652,11 +1597,50 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_put(dev_priv);
}
-void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
+static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+}
+
+void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_pps_unlocked(dev_priv, pipe);
+
+ /* Enable Refclk */
+ intel_de_write(dev_priv, DPLL(pipe),
+ crtc_state->dpll_hw_state.dpll &
+ ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ vlv_prepare_pll(crtc_state);
+ _vlv_enable_pll(crtc_state);
+ }
+
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+}
+
+static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
@@ -1664,21 +1648,13 @@ void chv_prepare_pll(struct intel_crtc *crtc,
u32 dpio_val;
int vco;
- /* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- bestn = pipe_config->dpll.n;
- bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2 >> 22;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
- vco = pipe_config->dpll.vco;
+ bestn = crtc_state->dpll.n;
+ bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
+ bestm1 = crtc_state->dpll.m1;
+ bestm2 = crtc_state->dpll.m2 >> 22;
+ bestp1 = crtc_state->dpll.p1;
+ bestp2 = crtc_state->dpll.p2;
+ vco = crtc_state->dpll.vco;
dpio_val = 0;
loopfilter = 0;
@@ -1757,6 +1733,83 @@ void chv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_put(dev_priv);
}
+static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+}
+
+void chv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_pps_unlocked(dev_priv, pipe);
+
+ /* Enable Refclk and SSC */
+ intel_de_write(dev_priv, DPLL(pipe),
+ crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ chv_prepare_pll(crtc_state);
+ _chv_enable_pll(crtc_state);
+ }
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+ }
+}
+
/**
* vlv_force_pll_on - forcibly enable just the PLL
* @dev_priv: i915 private structure
@@ -1771,27 +1824,26 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_crtc_state *pipe_config;
+ struct intel_crtc_state *crtc_state;
- pipe_config = intel_crtc_state_alloc(crtc);
- if (!pipe_config)
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state)
return -ENOMEM;
- pipe_config->cpu_transcoder = (enum transcoder)pipe;
- pipe_config->pixel_multiplier = 1;
- pipe_config->dpll = *dpll;
+ crtc_state->cpu_transcoder = (enum transcoder)pipe;
+ crtc_state->pixel_multiplier = 1;
+ crtc_state->dpll = *dpll;
+ crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_compute_dpll(crtc, pipe_config);
- chv_prepare_pll(crtc, pipe_config);
- chv_enable_pll(crtc, pipe_config);
+ chv_compute_dpll(crtc_state);
+ chv_enable_pll(crtc_state);
} else {
- vlv_compute_dpll(crtc, pipe_config);
- vlv_prepare_pll(crtc, pipe_config);
- vlv_enable_pll(crtc, pipe_config);
+ vlv_compute_dpll(crtc_state);
+ vlv_enable_pll(crtc_state);
}
- kfree(pipe_config);
+ kfree(crtc_state);
return 0;
}
@@ -1801,7 +1853,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1818,7 +1870,7 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1849,7 +1901,7 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
return;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
intel_de_posting_read(dev_priv, DPLL(pipe));
@@ -1871,3 +1923,25 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
else
vlv_disable_pll(dev_priv, pipe);
}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_pll(i915, pipe, true);
+}
+
+void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_pll(i915, pipe, false);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 88247027fd5a..1af0ac43cca4 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -18,29 +18,25 @@ void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
-void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
+void chv_compute_dpll(struct intel_crtc_state *crtc_state);
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i9xx_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+void chv_enable_pll(const struct intel_crtc_state *crtc_state);
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 5c91d125a337..0a7e04db04be 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -26,6 +26,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
+#include "intel_tc.h"
/**
* DOC: Display PLLs
@@ -185,34 +186,6 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915,
}
/**
- * intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc_state: CRTC, and its state, which has a shared dpll
- *
- * This calls the PLL's prepare hook if it has one and if the PLL is not
- * already enabled. The prepare hook is platform specific.
- */
-void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-
- if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
- return;
-
- mutex_lock(&dev_priv->dpll.lock);
- drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
- if (!pll->active_mask) {
- drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
- drm_WARN_ON(&dev_priv->drm, pll->on);
- assert_shared_dpll_disabled(dev_priv, pll);
-
- pll->info->funcs->prepare(dev_priv, pll);
- }
- mutex_unlock(&dev_priv->dpll.lock);
-}
-
-/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
@@ -451,15 +424,6 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
-static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- const enum intel_dpll_id id = pll->info->id;
-
- intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
- intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
-}
-
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -481,6 +445,9 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
+ intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+ intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
+
intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
@@ -558,7 +525,6 @@ static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
}
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
- .prepare = ibx_pch_dpll_prepare,
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -3136,8 +3102,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
enc_to_dig_port(encoder);
if (primary_port &&
- (primary_port->tc_mode == TC_PORT_DP_ALT ||
- primary_port->tc_mode == TC_PORT_LEGACY))
+ (intel_tc_port_in_dp_alt_mode(primary_port) ||
+ intel_tc_port_in_legacy_mode(primary_port)))
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
icl_set_active_port_dpll(crtc_state, port_dpll_id);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 30e0aa5ca109..2f59d863be4c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -256,16 +256,6 @@ struct intel_shared_dpll_state {
*/
struct intel_shared_dpll_funcs {
/**
- * @prepare:
- *
- * Optional hook to perform operations prior to enabling the PLL.
- * Called from intel_prepare_shared_dpll() function unless the PLL
- * is already enabled.
- */
- void (*prepare)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
-
- /**
* @enable:
*
* Hook for enabling the pll, called from intel_enable_shared_dpll()
@@ -404,7 +394,6 @@ int intel_dpll_get_freq(struct drm_i915_private *i915,
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
-void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
new file mode 100644
index 000000000000..8f7b1f7534a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dpt.h"
+#include "intel_fb.h"
+#include "gt/gen8_ppgtt.h"
+
+struct i915_dpt {
+ struct i915_address_space vm;
+
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+};
+
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+
+static inline struct i915_dpt *
+i915_vm_to_dpt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
+ GEM_BUG_ON(!i915_is_dpt(vm));
+ return container_of(vm, struct i915_dpt, vm);
+}
+
+#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void dpt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+
+ gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
+ vm->pte_encode(addr, level, flags));
+}
+
+static void dpt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+ const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ i = vma->node.start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ gen8_set_pte(&base[i++], pte_encode | addr);
+}
+
+static void dpt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void dpt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 pte_flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
+ pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
+
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+}
+
+static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+{
+ vm->clear_range(vm, vma->node.start, vma->size);
+}
+
+static void dpt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_gem_object_put(dpt->obj);
+}
+
+struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ atomic_inc(&i915->gpu_error.pending_fb_pin);
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(dpt->obj, &ww);
+ if (err)
+ continue;
+
+ vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
+ HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ continue;
+ }
+
+ iomem = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(iomem)) {
+ err = PTR_ERR(iomem);
+ continue;
+ }
+
+ dpt->vma = vma;
+ dpt->iomem = iomem;
+
+ i915_vma_get(vma);
+ }
+
+ atomic_dec(&i915->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return err ? ERR_PTR(err) : vma;
+}
+
+void intel_dpt_unpin(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vma_unpin_iomap(dpt->vma);
+ i915_vma_put(dpt->vma);
+}
+
+struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb)
+{
+ struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
+ struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct drm_i915_gem_object *dpt_obj;
+ struct i915_address_space *vm;
+ struct i915_dpt *dpt;
+ size_t size;
+ int ret;
+
+ if (intel_fb_needs_pot_stride_remap(fb))
+ size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
+ else
+ size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
+
+ size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
+
+ if (HAS_LMEM(i915))
+ dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
+ else
+ dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj))
+ return ERR_CAST(dpt_obj);
+
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ if (ret) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(ret);
+ }
+
+ dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
+ if (!dpt) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vm = &dpt->vm;
+
+ vm->gt = &i915->gt;
+ vm->i915 = i915;
+ vm->dma = i915->drm.dev;
+ vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ vm->is_dpt = true;
+
+ i915_address_space_init(vm, VM_CLASS_DPT);
+
+ vm->insert_page = dpt_insert_page;
+ vm->clear_range = dpt_clear_range;
+ vm->insert_entries = dpt_insert_entries;
+ vm->cleanup = dpt_cleanup;
+
+ vm->vma_ops.bind_vma = dpt_bind_vma;
+ vm->vma_ops.unbind_vma = dpt_unbind_vma;
+ vm->vma_ops.set_pages = ggtt_set_pages;
+ vm->vma_ops.clear_pages = clear_pages;
+
+ vm->pte_encode = gen8_ggtt_pte_encode;
+
+ dpt->obj = dpt_obj;
+
+ return &dpt->vm;
+}
+
+void intel_dpt_destroy(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vm_close(&dpt->vm);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
new file mode 100644
index 000000000000..45142b8f849f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DPT_H__
+#define __INTEL_DPT_H__
+
+struct i915_address_space;
+struct i915_vma;
+struct intel_framebuffer;
+
+void intel_dpt_destroy(struct i915_address_space *vm);
+struct i915_vma *intel_dpt_pin(struct i915_address_space *vm);
+void intel_dpt_unpin(struct i915_address_space *vm);
+struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb);
+
+#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
new file mode 100644
index 000000000000..c1439fcb5a95
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_drrs.h"
+#include "intel_panel.h"
+
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * The implementation is based on frontbuffer tracking implementation. When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR). When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_drrs_invalidate()
+ * and intel_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+void
+intel_drrs_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp, bool constant_n)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pixel_clock;
+
+ if (pipe_config->vrr.enable)
+ return;
+
+ /*
+ * DRRS and PSR can't be enable together, so giving preference to PSR
+ * as it allows more power-savings by complete shutting down display,
+ * so to guarantee this, intel_drrs_compute_config() must be called
+ * after intel_psr_compute_config().
+ */
+ if (pipe_config->has_psr)
+ return;
+
+ if (!intel_connector->panel.downclock_mode ||
+ dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ return;
+
+ pipe_config->has_drrs = true;
+
+ pixel_clock = intel_connector->panel.downclock_mode->clock;
+ if (pipe_config->splitter.enable)
+ pixel_clock /= pipe_config->splitter.link_count;
+
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock, &pipe_config->dp_m2_n2,
+ constant_n, pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
+}
+
+static void intel_drrs_set_state(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state,
+ enum drrs_refresh_rate_type refresh_type)
+{
+ struct intel_dp *intel_dp = dev_priv->drrs.dp;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_display_mode *mode;
+
+ if (!intel_dp) {
+ drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
+ return;
+ }
+
+ if (!crtc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS: intel_crtc not initialized\n");
+ return;
+ }
+
+ if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
+ drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
+ return;
+ }
+
+ if (refresh_type == dev_priv->drrs.refresh_rate_type)
+ return;
+
+ if (!crtc_state->hw.active) {
+ drm_dbg_kms(&dev_priv->drm,
+ "eDP encoder disabled. CRTC not Active\n");
+ return;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
+ switch (refresh_type) {
+ case DRRS_HIGH_RR:
+ intel_dp_set_m_n(crtc_state, M1_N1);
+ break;
+ case DRRS_LOW_RR:
+ intel_dp_set_m_n(crtc_state, M2_N2);
+ break;
+ case DRRS_MAX_RR:
+ default:
+ drm_err(&dev_priv->drm,
+ "Unsupported refreshrate type\n");
+ }
+ } else if (DISPLAY_VER(dev_priv) > 6) {
+ i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
+ u32 val;
+
+ val = intel_de_read(dev_priv, reg);
+ if (refresh_type == DRRS_LOW_RR) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
+ } else {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ }
+ intel_de_write(dev_priv, reg, val);
+ }
+
+ dev_priv->drrs.refresh_rate_type = refresh_type;
+
+ if (refresh_type == DRRS_LOW_RR)
+ mode = intel_dp->attached_connector->panel.downclock_mode;
+ else
+ mode = intel_dp->attached_connector->panel.fixed_mode;
+ drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
+ drm_mode_vrefresh(mode));
+}
+
+static void
+intel_drrs_enable_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ dev_priv->drrs.busy_frontbuffer_bits = 0;
+ dev_priv->drrs.dp = intel_dp;
+}
+
+/**
+ * intel_drrs_enable - init drrs struct if supported
+ * @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
+ *
+ * Initializes frontbuffer_bits and drrs.dp
+ */
+void intel_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!crtc_state->has_drrs)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ if (dev_priv->drrs.dp) {
+ drm_warn(&dev_priv->drm, "DRRS already enabled\n");
+ goto unlock;
+ }
+
+ intel_drrs_enable_locked(intel_dp);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+static void
+intel_drrs_disable_locked(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_drrs_set_state(dev_priv, crtc_state, DRRS_HIGH_RR);
+ dev_priv->drrs.dp = NULL;
+}
+
+/**
+ * intel_drrs_disable - Disable DRRS
+ * @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
+ *
+ */
+void intel_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!old_crtc_state->has_drrs)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ intel_drrs_disable_locked(intel_dp, old_crtc_state);
+ mutex_unlock(&dev_priv->drrs.mutex);
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+}
+
+/**
+ * intel_drrs_update - Update DRRS state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This function will update DRRS states, disabling or enabling DRRS when
+ * executing fastsets. For full modeset, intel_drrs_disable() and
+ * intel_drrs_enable() should be called instead.
+ */
+void
+intel_drrs_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ /* New state matches current one? */
+ if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
+ goto unlock;
+
+ if (crtc_state->has_drrs)
+ intel_drrs_enable_locked(intel_dp);
+ else
+ intel_drrs_disable_locked(intel_dp, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+static void intel_drrs_downclock_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), drrs.work.work);
+ struct intel_dp *intel_dp;
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+
+ if (!intel_dp)
+ goto unlock;
+
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck.
+ */
+
+ if (dev_priv->drrs.busy_frontbuffer_bits)
+ goto unlock;
+
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config, DRRS_LOW_RR);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ bool invalidate)
+{
+ struct intel_dp *intel_dp;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+ return;
+
+ cancel_delayed_work(&dev_priv->drrs.work);
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+ if (!intel_dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ if (invalidate)
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ else
+ dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /* flush/invalidate means busy screen hence upclock */
+ if (frontbuffer_bits)
+ intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config,
+ DRRS_HIGH_RR);
+
+ /*
+ * flush also means no more activity hence schedule downclock, if all
+ * other fbs are quiescent too
+ */
+ if (!invalidate && !dev_priv->drrs.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->drrs.work,
+ msecs_to_jiffies(1000));
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+/**
+ * intel_drrs_invalidate - Disable Idleness DRRS
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
+}
+
+/**
+ * intel_drrs_flush - Restart Idleness DRRS
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+}
+
+void intel_drrs_page_flip(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ unsigned int frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
+
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+}
+
+/**
+ * intel_drrs_init - Init basic DRRS work and mutex.
+ * @connector: eDP connector
+ * @fixed_mode: preferred mode of panel
+ *
+ * This function is called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ * Returns:
+ * Downclock mode if panel supports it, else return NULL.
+ * DRRS support is determined by the presence of downclock mode (apart
+ * from VBT setting).
+ */
+struct drm_display_mode *
+intel_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_display_mode *downclock_mode = NULL;
+
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
+ mutex_init(&dev_priv->drrs.mutex);
+
+ if (DISPLAY_VER(dev_priv) <= 6) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported for Gen7 and above\n");
+ return NULL;
+ }
+
+ if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
+ drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
+ return NULL;
+ }
+
+ downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
+ if (!downclock_mode) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Downclock mode is not found. DRRS not supported\n");
+ return NULL;
+ }
+
+ dev_priv->drrs.type = dev_priv->vbt.drrs_type;
+
+ dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
+ drm_dbg_kms(&dev_priv->drm,
+ "seamless DRRS supported for eDP panel.\n");
+ return downclock_mode;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
new file mode 100644
index 000000000000..9ec9c447211a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DRRS_H__
+#define __INTEL_DRRS_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_connector;
+struct intel_dp;
+
+void intel_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_drrs_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_drrs_page_flip(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_drrs_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp, bool constant_n);
+struct drm_display_mode *intel_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode);
+
+#endif /* __INTEL_DRRS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index f453ceb8d149..6b0301ba046e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -5,6 +5,7 @@
#include <drm/drm_mipi_dsi.h>
#include "intel_dsi.h"
+#include "intel_panel.h"
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
{
@@ -60,20 +61,19 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
drm_dbg_kms(&dev_priv->drm, "\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- if (fixed_mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
- }
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (fixed_mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
return intel_mode_valid_max_plane_size(dev_priv, mode, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 50d6da0b2419..fbc40ffdc02e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -207,6 +207,9 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+void assert_dsi_pll_enabled(struct drm_i915_private *i915);
+void assert_dsi_pll_disabled(struct drm_i915_private *i915);
+
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 584c14c4cbd0..f61ed82e8867 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -47,33 +47,42 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_panel *panel = &connector->panel;
struct mipi_dsi_device *dsi_device;
- u8 data = 0;
+ u8 data[2] = {};
enum port port;
+ size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
- /* FIXME: Need to take care of 16 bit brightness level */
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
- &data, sizeof(data));
+ &data, len);
break;
}
- return data;
+ return (data[1] << 8) | data[0];
}
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
- u8 data = level;
+ u8 data[2] = {};
enum port port;
+ size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
+
+ if (len == 1) {
+ data[0] = level;
+ } else {
+ data[0] = level >> 8;
+ data[1] = level;
+ }
- /* FIXME: Need to take care of 16 bit brightness level */
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
- &data, sizeof(data));
+ &data, len);
}
}
@@ -147,10 +156,16 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
- panel->backlight.max = PANEL_PWM_MAX_VALUE;
- panel->backlight.level = PANEL_PWM_MAX_VALUE;
+ if (dev_priv->vbt.backlight.brightness_precision_bits > 8)
+ panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1;
+ else
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+
+ panel->backlight.level = panel->backlight.max;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index c2a2cd1f84dc..f241bedb8597 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,7 +31,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <asm/unaligned.h>
#include <drm/drm_crtc.h>
@@ -42,7 +41,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 77419f8c05e9..2eeb209afc64 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -223,9 +223,10 @@ static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
const struct drm_display_mode *fixed_mode =
- to_intel_connector(connector)->panel.fixed_mode;
+ intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
@@ -235,10 +236,11 @@ intel_dvo_mode_valid(struct drm_connector *connector,
/* XXX: Validate clock range */
if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
+ enum drm_mode_status status;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
target_clock = fixed_mode->clock;
}
@@ -254,6 +256,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *fixed_mode =
intel_dvo->attached_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -264,8 +267,13 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (fixed_mode)
- intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (fixed_mode) {
+ int ret;
+
+ ret = intel_panel_compute_config(connector, adjusted_mode);
+ if (ret)
+ return ret;
+ }
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index c60a81a81c09..fa1f375e696b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -4,9 +4,11 @@
*/
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper.h>
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_dpt.h"
#include "intel_fb.h"
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
@@ -61,6 +63,38 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
return ccs_plane - fb->format->num_planes / 2;
}
+static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int stride = fb->base.pitches[color_plane];
+
+ if (IS_ALDERLAKE_P(i915))
+ return roundup_pow_of_two(max(stride,
+ 8u * intel_tile_width_bytes(&fb->base, color_plane)));
+
+ return stride;
+}
+
+static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane);
+ unsigned int main_stride = fb->base.pitches[main_plane];
+ unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane);
+
+ /*
+ * On ADL-P the AUX stride must align with a power-of-two aligned main
+ * surface stride. The stride of the allocated main surface object can
+ * be less than this POT stride, which is then autopadded to the POT
+ * size.
+ */
+ if (IS_ALDERLAKE_P(i915))
+ main_stride = gen12_aligned_scanout_stride(fb, main_plane);
+
+ return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64;
+}
+
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
struct drm_i915_private *i915 = to_i915(fb->dev);
@@ -79,16 +113,70 @@ unsigned int intel_tile_size(const struct drm_i915_private *i915)
return DISPLAY_VER(i915) == 2 ? 2048 : 4096;
}
-unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+unsigned int
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
- if (is_gen12_ccs_plane(fb, color_plane))
- return 1;
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return intel_tile_size(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (DISPLAY_VER(dev_priv) == 2)
+ return 128;
+ else
+ return 512;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 128;
+ fallthrough;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 64;
+ fallthrough;
+ case I915_FORMAT_MOD_Y_TILED:
+ if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
+ return 128;
+ else
+ return 512;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 128;
+ fallthrough;
+ case I915_FORMAT_MOD_Yf_TILED:
+ switch (cpp) {
+ case 1:
+ return 64;
+ case 2:
+ case 4:
+ return 128;
+ case 8:
+ case 16:
+ return 256;
+ default:
+ MISSING_CASE(cpp);
+ return cpp;
+ }
+ break;
+ default:
+ MISSING_CASE(fb->modifier);
+ return cpp;
+ }
+}
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+{
return intel_tile_size(to_i915(fb->dev)) /
intel_tile_width_bytes(fb, color_plane);
}
-/* Return the tile dimensions in pixel units */
+/*
+ * Return the tile dimensions in pixel units, based on the (2 or 4 kbyte) GTT
+ * page tile size.
+ */
static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
unsigned int *tile_width,
unsigned int *tile_height)
@@ -100,6 +188,21 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
*tile_height = intel_tile_height(fb, color_plane);
}
+/*
+ * Return the tile dimensions in pixel units, based on the tile block size.
+ * The block covers the full GTT page sized tile on all tiled surfaces and
+ * it's a 64 byte portion of the tile on TGL+ CCS surfaces.
+ */
+static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_plane,
+ unsigned int *tile_width,
+ unsigned int *tile_height)
+{
+ intel_tile_dims(fb, color_plane, tile_width, tile_height);
+
+ if (is_gen12_ccs_plane(fb, color_plane))
+ *tile_height = 1;
+}
+
unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane)
{
unsigned int tile_width, tile_height;
@@ -109,6 +212,31 @@ unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_pla
return fb->pitches[color_plane] * tile_height;
}
+unsigned int
+intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height)
+{
+ unsigned int tile_height = intel_tile_height(fb, color_plane);
+
+ return ALIGN(height, tile_height);
+}
+
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+ switch (fb_modifier) {
+ case I915_FORMAT_MOD_X_TILED:
+ return I915_TILING_X;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return I915_TILING_Y;
+ default:
+ return I915_TILING_NONE;
+ }
+}
+
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
{
if (IS_I830(i915))
@@ -121,6 +249,70 @@ unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
return 4 * 1024;
}
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 9)
+ return 256 * 1024;
+ else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return 128 * 1024;
+ else if (DISPLAY_VER(dev_priv) >= 4)
+ return 4 * 1024;
+ else
+ return 0;
+}
+
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ if (intel_fb_uses_dpt(fb))
+ return 512 * 4096;
+
+ /* AUX_DIST needs only 4K alignment */
+ if (is_ccs_plane(fb, color_plane))
+ return 4096;
+
+ if (is_semiplanar_uv_plane(fb, color_plane)) {
+ /*
+ * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
+ * alignment for linear UV planes on all platforms.
+ */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return intel_linear_alignment(dev_priv);
+
+ return intel_tile_row_size(fb, color_plane);
+ }
+
+ return 4096;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, color_plane != 0);
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return intel_linear_alignment(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (HAS_ASYNC_FLIPS(dev_priv))
+ return 256 * 1024;
+ return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return 16 * 1024;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return 1 * 1024 * 1024;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 0;
+ }
+}
+
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,
int color_plane)
@@ -165,15 +357,29 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h)
{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
int main_plane = is_ccs_plane(&fb->base, color_plane) ?
skl_ccs_to_main_plane(&fb->base, color_plane) : 0;
+ unsigned int main_width = fb->base.width;
+ unsigned int main_height = fb->base.height;
int main_hsub, main_vsub;
int hsub, vsub;
+ /*
+ * On ADL-P the CCS AUX surface layout always aligns with the
+ * power-of-two aligned main surface stride. The main surface
+ * stride in the allocated FB object may not be power-of-two
+ * sized, in which case it is auto-padded to the POT size.
+ */
+ if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane))
+ main_width = gen12_aligned_scanout_stride(fb, 0) /
+ fb->base.format->cpp[0];
+
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
- *w = fb->base.width / main_hsub / hsub;
- *h = fb->base.height / main_vsub / vsub;
+
+ *w = main_width / main_hsub / hsub;
+ *h = main_height / main_vsub / vsub;
}
static u32 intel_adjust_tile_offset(int *x, int *y,
@@ -355,17 +561,8 @@ static int intel_fb_offset_to_xy(int *x, int *y,
unsigned int height;
u32 alignment;
- /*
- * All DPT color planes must be 512*4k aligned (the amount mapped by a
- * single DPT page). For ADL_P CCS FBs this only works by requiring
- * the allocated offsets to be 2MB aligned. Once supoort to remap
- * such FBs is added we can remove this requirement, as then all the
- * planes can be remapped to an aligned offset.
- */
- if (IS_ALDERLAKE_P(i915) && is_ccs_modifier(fb->modifier))
- alignment = 512 * 4096;
- else if (DISPLAY_VER(i915) >= 12 &&
- is_semiplanar_uv_plane(fb, color_plane))
+ if (DISPLAY_VER(i915) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
alignment = intel_tile_size(i915);
@@ -416,7 +613,12 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane
if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
return 0;
- intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+ /*
+ * While all the tile dimensions are based on a 2k or 4k GTT page size
+ * here the main and CCS coordinates must match only within a (64 byte
+ * on TGL+) block inside the tile.
+ */
+ intel_tile_block_dims(fb, ccs_plane, &tile_width, &tile_height);
intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
tile_width *= hsub;
@@ -491,8 +693,7 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
- return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR &&
- !is_ccs_modifier(fb->base.modifier);
+ return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR;
}
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
@@ -612,14 +813,16 @@ static unsigned int
plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
unsigned int pitch_tiles)
{
- if (intel_fb_needs_pot_stride_remap(fb))
+ if (intel_fb_needs_pot_stride_remap(fb)) {
+ unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8;
/*
* ADL_P, the only platform needing a POT stride has a minimum
- * of 8 stride tiles.
+ * of 8 main surface and 2 CCS AUX stride tiles.
*/
- return roundup_pow_of_two(max(pitch_tiles, 8u));
- else
+ return roundup_pow_of_two(max(pitch_tiles, min_stride));
+ } else {
return pitch_tiles;
+ }
}
static unsigned int
@@ -655,7 +858,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
unsigned int tile_height = dims->tile_height;
unsigned int tile_size = intel_tile_size(i915);
struct drm_rect r;
- u32 size;
+ u32 size = 0;
assign_chk_ovf(i915, remap_info->offset, obj_offset);
assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims));
@@ -680,7 +883,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
color_plane_info->stride = remap_info->dst_stride * tile_height;
- size = remap_info->dst_stride * remap_info->width;
+ size += remap_info->dst_stride * remap_info->width;
/* rotate the tile dimensions to match the GTT view */
swap(tile_width, tile_height);
@@ -689,6 +892,14 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
+ if (view->gtt.remapped.plane_alignment) {
+ unsigned int aligned_offset = ALIGN(gtt_offset,
+ view->gtt.remapped.plane_alignment);
+
+ size += aligned_offset - gtt_offset;
+ gtt_offset = aligned_offset;
+ }
+
assign_chk_ovf(i915, remap_info->dst_stride,
plane_view_dst_stride_tiles(fb, color_plane, remap_info->width));
@@ -698,7 +909,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
color_plane_info->stride = remap_info->dst_stride * tile_width *
fb->base.format->cpp[color_plane];
- size = remap_info->dst_stride * remap_info->height;
+ size += remap_info->dst_stride * remap_info->height;
}
/*
@@ -745,10 +956,14 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
return tiles;
}
-static void intel_fb_view_init(struct intel_fb_view *view, enum i915_ggtt_view_type view_type)
+static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
+ enum i915_ggtt_view_type view_type)
{
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
+
+ if (view_type == I915_GGTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
+ view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
@@ -769,16 +984,16 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
int i, num_planes = fb->base.format->num_planes;
unsigned int tile_size = intel_tile_size(i915);
- intel_fb_view_init(&fb->normal_view, I915_GGTT_VIEW_NORMAL);
+ intel_fb_view_init(i915, &fb->normal_view, I915_GGTT_VIEW_NORMAL);
drm_WARN_ON(&i915->drm,
intel_fb_supports_90_270_rotation(fb) &&
intel_fb_needs_pot_stride_remap(fb));
if (intel_fb_supports_90_270_rotation(fb))
- intel_fb_view_init(&fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ intel_fb_view_init(i915, &fb->rotated_view, I915_GGTT_VIEW_ROTATED);
if (intel_fb_needs_pot_stride_remap(fb))
- intel_fb_view_init(&fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+ intel_fb_view_init(i915, &fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -856,7 +1071,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
unsigned int src_w, src_h;
u32 gtt_offset = 0;
- intel_fb_view_init(&plane_state->view,
+ intel_fb_view_init(i915, &plane_state->view,
drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED :
I915_GGTT_VIEW_REMAPPED);
@@ -918,6 +1133,79 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio
*view = fb->normal_view;
}
+static
+u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ /*
+ * Arbitrary limit for gen4+ chosen to match the
+ * render engine max stride.
+ *
+ * The new CCS hash mode makes remapping impossible
+ */
+ if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
+ intel_modifier_uses_dpt(dev_priv, modifier))
+ return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 256 * 1024;
+ else
+ return 128 * 1024;
+}
+
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 tile_width;
+
+ if (is_surface_linear(fb, color_plane)) {
+ u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+ fb->format->format,
+ fb->modifier);
+
+ /*
+ * To make remapping with linear generally feasible
+ * we need the stride to be page aligned.
+ */
+ if (fb->pitches[color_plane] > max_stride &&
+ !is_ccs_modifier(fb->modifier))
+ return intel_tile_size(dev_priv);
+ else
+ return 64;
+ }
+
+ tile_width = intel_tile_width_bytes(fb, color_plane);
+ if (is_ccs_modifier(fb->modifier)) {
+ /*
+ * On ADL-P the stride must be either 8 tiles or a stride
+ * that is aligned to 16 tiles, required by the 16 tiles =
+ * 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be
+ * remapped.
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16;
+ /*
+ * On TGL the surface stride must be 4 tile aligned, mapped by
+ * one 64 byte cacheline on the CCS AUX surface.
+ */
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ tile_width *= 4;
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ else if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
+ color_plane == 0 && fb->width > 3840)
+ tile_width *= 4;
+ }
+ return tile_width;
+}
+
static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -981,3 +1269,257 @@ int intel_plane_compute_gtt(struct intel_plane_state *plane_state)
return intel_plane_check_stride(plane_state);
}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
+
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+
+ kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (i915_gem_object_is_userptr(obj)) {
+ drm_dbg(&i915->drm,
+ "attempting to use a userptr for a framebuffer, denied\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+ i915_gem_object_flush_if_display(obj);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+ .dirty = intel_user_framebuffer_dirty,
+};
+
+int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_framebuffer *fb = &intel_fb->base;
+ u32 max_stride;
+ unsigned int tiling, stride;
+ int ret = -EINVAL;
+ int i;
+
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer)
+ return -ENOMEM;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ goto err;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No Y tiling for legacy addfb\n");
+ goto err;
+ }
+ }
+
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ goto err;
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (DISPLAY_VER(dev_priv) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ goto err;
+ }
+
+ max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
+ if (mode_cmd->pitches[0] > max_stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], max_stride);
+ goto err;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ goto err;
+ }
+
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
+ goto err;
+ }
+
+ drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ u32 stride_alignment;
+
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+ i);
+ goto err;
+ }
+
+ stride_alignment = intel_fb_stride_alignment(fb, i);
+ if (fb->pitches[i] & (stride_alignment - 1)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
+ goto err;
+ }
+
+ if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
+ int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i);
+
+ if (fb->pitches[i] != ccs_aux_stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
+ goto err;
+ }
+ }
+
+ fb->obj[i] = &obj->base;
+ }
+
+ ret = intel_fill_fb_info(dev_priv, intel_fb);
+ if (ret)
+ goto err;
+
+ if (intel_fb_uses_dpt(fb)) {
+ struct i915_address_space *vm;
+
+ vm = intel_dpt_create(intel_fb);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
+ goto err;
+ }
+
+ intel_fb->dpt_vm = vm;
+ }
+
+ ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
+ if (ret) {
+ drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+ return ret;
+}
+
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd)
+{
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+ struct drm_i915_private *i915;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ i915 = to_i915(obj->base.dev);
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ return ERR_PTR(-EREMOTE);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+
+ return fb;
+}
+
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+ if (ret)
+ goto err;
+
+ return &intel_fb->base;
+
+err:
+ kfree(intel_fb);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 739d1b91754b..1cbdd84502bd 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -8,10 +8,12 @@
#include <linux/types.h>
+struct drm_device;
+struct drm_file;
struct drm_framebuffer;
-
+struct drm_i915_gem_object;
struct drm_i915_private;
-
+struct drm_mode_fb_cmd2;
struct intel_fb_view;
struct intel_framebuffer;
struct intel_plane_state;
@@ -28,10 +30,14 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
unsigned int intel_tile_size(const struct drm_i915_private *i915);
+unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane);
-
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915);
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane);
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,
@@ -53,4 +59,12 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio
struct intel_fb_view *view);
int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+int intel_framebuffer_init(struct intel_framebuffer *ifb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
new file mode 100644
index 000000000000..3f77f3013584
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+/**
+ * DOC: display pinning helpers
+ */
+
+#include "intel_display_types.h"
+#include "intel_fb_pin.h"
+#include "intel_fb.h"
+
+#include "intel_dpt.h"
+
+#include "gem/i915_gem_object.h"
+
+static struct i915_vma *
+intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags,
+ struct i915_address_space *vm)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_vma *vma;
+ u32 alignment;
+ int ret;
+
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ alignment = 4096 * 512;
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ goto err;
+
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+ }
+
+ ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ i915_gem_object_flush_if_display(obj);
+
+ i915_vma_get(vma);
+err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+ return vma;
+}
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ intel_wakeref_t wakeref;
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ unsigned int pinctl;
+ u32 alignment;
+ int ret;
+
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ if (phys_cursor)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, 0);
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
+
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
+ /*
+ * Global gtt pte registers are special registers which actually forward
+ * writes to a chunk of system memory. Which means that there is no risk
+ * that the register values disappear as soon as we call
+ * intel_runtime_pm_put(), so it is correct to wrap only the
+ * pin/unpin/fence and not more.
+ */
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ /*
+ * Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ pinctl = 0;
+ if (HAS_GMCH(dev_priv))
+ pinctl |= PIN_MAPPABLE;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && phys_cursor)
+ ret = i915_gem_object_attach_phys(obj, alignment);
+ else if (!ret && HAS_LMEM(dev_priv))
+ ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+ /* TODO: Do we need to sync when migration becomes async? */
+ if (!ret)
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
+ }
+ }
+
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always, when
+ * possible, install a fence as the cost is not that onerous.
+ *
+ * If we fail to fence the tiled scanout, then either the
+ * modeset will reject the change (which is highly unlikely as
+ * the affected systems, all but one, do not have unmappable
+ * space) or we will not be able to enable full powersaving
+ * techniques (also likely not to apply due to various limits
+ * FBC and the like impose on the size of the buffer, which
+ * presumably we violated anyway with this unmappable buffer).
+ * Anyway, it is presumably better to stumble onwards with
+ * something and try to run the system in a "less than optimal"
+ * mode that matches the user configuration.
+ */
+ ret = i915_vma_pin_fence(vma);
+ if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ i915_vma_unpin(vma);
+ goto err_unpin;
+ }
+ ret = 0;
+
+ if (vma->fence)
+ *out_flags |= PLANE_HAS_FENCE;
+ }
+
+ i915_vma_get(vma);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ vma = ERR_PTR(ret);
+
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ return vma;
+}
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
+{
+ if (flags & PLANE_HAS_FENCE)
+ i915_vma_unpin_fence(vma);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct i915_vma *vma;
+ bool phys_cursor =
+ plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
+ &plane_state->view.gtt,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = intel_dpt_pin(intel_fb->dpt_vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+
+ vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
+ &plane_state->flags, intel_fb->dpt_vm);
+ if (IS_ERR(vma)) {
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ plane_state->ggtt_vma = NULL;
+ return PTR_ERR(vma);
+ }
+
+ plane_state->dpt_vma = vma;
+
+ WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+ }
+
+ return 0;
+}
+
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ struct drm_framebuffer *fb = old_plane_state->hw.fb;
+ struct i915_vma *vma;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = fetch_and_zero(&old_plane_state->dpt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
new file mode 100644
index 000000000000..e4fcd0218d9d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_PIN_H__
+#define __INTEL_FB_PIN_H__
+
+#include <linux/types.h>
+
+struct drm_framebuffer;
+struct i915_vma;
+struct intel_plane_state;
+struct i915_ggtt_view;
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index ddfc17e21668..1f66de77a6b1 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -62,19 +62,84 @@ static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *
*height = cache->plane.src_h;
}
-static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
- const struct intel_fbc_state_cache *cache)
+/* plane stride in pixels */
+static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
{
- int lines;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int stride;
+
+ stride = plane_state->view.color_plane[0].stride;
+ if (!drm_rotation_90_or_270(plane_state->hw.rotation))
+ stride /= fb->format->cpp[0];
+
+ return stride;
+}
+
+/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *cache)
+{
+ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+
+ return cache->fb.stride * cpp;
+}
+
+/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915,
+ const struct intel_fbc_state_cache *cache)
+{
+ unsigned int limit = 4; /* 1:4 compression limit is the worst case */
+ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+ unsigned int height = 4; /* FBC segment is 4 lines */
+ unsigned int stride;
+
+ /* minimum segment stride we can use */
+ stride = cache->plane.src_w * cpp * height / limit;
+
+ /*
+ * Wa_16011863758: icl+
+ * Avoid some hardware segment address miscalculation.
+ */
+ if (DISPLAY_VER(i915) >= 11)
+ stride += 64;
+
+ /*
+ * At least some of the platforms require each 4 line segment to
+ * be 512 byte aligned. Just do it always for simplicity.
+ */
+ stride = ALIGN(stride, 512);
+
+ /* convert back to single line equivalent with 1:1 compression limit */
+ return stride * limit / height;
+}
+
+/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915,
+ const struct intel_fbc_state_cache *cache)
+{
+ unsigned int stride = _intel_fbc_cfb_stride(cache);
+
+ /*
+ * At least some of the platforms require each 4 line segment to
+ * be 512 byte aligned. Aligning each line to 512 bytes guarantees
+ * that regardless of the compression limit we choose later.
+ */
+ if (DISPLAY_VER(i915) >= 9)
+ return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache));
+ else
+ return stride;
+}
+
+static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv,
+ const struct intel_fbc_state_cache *cache)
+{
+ int lines = cache->plane.src_h;
- intel_fbc_get_plane_source_size(cache, NULL, &lines);
if (DISPLAY_VER(dev_priv) == 7)
lines = min(lines, 2048);
else if (DISPLAY_VER(dev_priv) >= 8)
lines = min(lines, 2560);
- /* Hardware needs the full buffer stride, not just the active area. */
- return lines * cache->fb.stride;
+ return lines * intel_fbc_cfb_stride(dev_priv, cache);
}
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -99,15 +164,13 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_reg_params *params = &fbc->params;
int cfb_pitch;
int i;
u32 fbc_ctl;
- /* Note: fbc.limit == 1 for i8xx */
- cfb_pitch = params->cfb_size / FBC_LL_SIZE;
- if (params->fb.stride < cfb_pitch)
- cfb_pitch = params->fb.stride;
+ cfb_pitch = params->cfb_stride / fbc->limit;
/* FBC_CTL wants 32B or 64B units */
if (DISPLAY_VER(dev_priv) == 2)
@@ -150,15 +213,9 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
{
- const struct intel_fbc_reg_params *params = &i915->fbc.params;
- int limit = i915->fbc.limit;
-
- if (params->fb.format->cpp[0] == 2)
- limit <<= 1;
-
- switch (limit) {
+ switch (i915->fbc.limit) {
default:
- MISSING_CASE(limit);
+ MISSING_CASE(i915->fbc.limit);
fallthrough;
case 1:
return DPFC_CTL_LIMIT_1X;
@@ -232,16 +289,16 @@ static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
/* This function forces a CFB recompression through the nuke operation. */
static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- trace_intel_fbc_nuke(fbc->crtc);
-
intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ trace_intel_fbc_nuke(fbc->crtc);
+
if (DISPLAY_VER(dev_priv) >= 6)
snb_fbc_recompress(dev_priv);
else if (DISPLAY_VER(dev_priv) >= 4)
@@ -280,8 +337,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
params->fence_y_offset);
/* enable it... */
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- intel_fbc_recompress(dev_priv);
}
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -303,19 +358,29 @@ static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_reg_params *params = &fbc->params;
u32 dpfc_ctl;
- /* Display WA #0529: skl, kbl, bxt. */
- if (DISPLAY_VER(dev_priv) == 9) {
- u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
+ if (DISPLAY_VER(dev_priv) >= 10) {
+ u32 val = 0;
- val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
+ if (params->override_cfb_stride)
+ val |= FBC_STRIDE_OVERRIDE |
+ FBC_STRIDE(params->override_cfb_stride / fbc->limit);
- if (params->gen9_wa_cfb_stride)
- val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
+ intel_de_write(dev_priv, GLK_FBC_STRIDE, val);
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ u32 val = 0;
- intel_de_write(dev_priv, CHICKEN_MISC_4, val);
+ /* Display WA #0529: skl, kbl, bxt. */
+ if (params->override_cfb_stride)
+ val |= CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit);
+
+ intel_de_rmw(dev_priv, CHICKEN_MISC_4,
+ CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE_MASK, val);
}
dpfc_ctl = 0;
@@ -339,8 +404,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- intel_fbc_recompress(dev_priv);
}
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
@@ -402,6 +465,12 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
+static void intel_fbc_activate(struct drm_i915_private *dev_priv)
+{
+ intel_fbc_hw_activate(dev_priv);
+ intel_fbc_recompress(dev_priv);
+}
+
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{
@@ -440,30 +509,32 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
return min(end, intel_fbc_cfb_base_max(dev_priv));
}
-static int intel_fbc_max_limit(struct drm_i915_private *dev_priv, int fb_cpp)
+static int intel_fbc_min_limit(int fb_cpp)
{
- /*
- * FIXME: FBC1 can have arbitrary cfb stride,
- * so we could support different compression ratios.
- */
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- return 1;
+ return fb_cpp == 2 ? 2 : 1;
+}
+static int intel_fbc_max_limit(struct drm_i915_private *dev_priv)
+{
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
return 1;
- /* FBC2 can only do 1:1, 1:2, 1:4 */
- return fb_cpp == 2 ? 2 : 4;
+ /*
+ * FBC2 can only do 1:1, 1:2, 1:4, we limit
+ * FBC1 to the same out of convenience.
+ */
+ return 4;
}
static int find_compression_limit(struct drm_i915_private *dev_priv,
- unsigned int size,
- unsigned int fb_cpp)
+ unsigned int size, int min_limit)
{
struct intel_fbc *fbc = &dev_priv->fbc;
u64 end = intel_fbc_stolen_end(dev_priv);
- int ret, limit = 1;
+ int ret, limit = min_limit;
+
+ size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
@@ -471,7 +542,7 @@ static int find_compression_limit(struct drm_i915_private *dev_priv,
if (ret == 0)
return limit;
- for (; limit <= intel_fbc_max_limit(dev_priv, fb_cpp); limit <<= 1) {
+ for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) {
ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
@@ -482,7 +553,7 @@ static int find_compression_limit(struct drm_i915_private *dev_priv,
}
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
- unsigned int size, unsigned int fb_cpp)
+ unsigned int size, int min_limit)
{
struct intel_fbc *fbc = &dev_priv->fbc;
int ret;
@@ -499,13 +570,12 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
goto err;
}
- ret = find_compression_limit(dev_priv, size, fb_cpp);
+ ret = find_compression_limit(dev_priv, size, min_limit);
if (!ret)
goto err_llb;
- else if (ret > 1) {
+ else if (ret > min_limit)
drm_info_once(&dev_priv->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
- }
fbc->limit = ret;
@@ -675,11 +745,10 @@ static bool tiling_is_valid(struct drm_i915_private *dev_priv,
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
- if (DISPLAY_VER(dev_priv) >= 9)
- return true;
- return false;
- case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return DISPLAY_VER(dev_priv) >= 9;
+ case I915_FORMAT_MOD_X_TILED:
return true;
default:
return false;
@@ -718,11 +787,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.modifier = fb->modifier;
-
- /* FIXME is this correct? */
- cache->fb.stride = plane_state->view.color_plane[0].stride;
- if (drm_rotation_90_or_270(plane_state->hw.rotation))
- cache->fb.stride *= fb->format->cpp[0];
+ cache->fb.stride = intel_fbc_plane_stride(plane_state);
/* FBC1 compression interval: arbitrary choice of 1 second */
cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
@@ -745,27 +810,29 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+ return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) >
fbc->compressed_fb.size * fbc->limit;
}
-static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
+static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv,
+ const struct intel_fbc_state_cache *cache)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_state_cache *cache = &fbc->state_cache;
-
- if ((DISPLAY_VER(dev_priv) == 9) &&
- cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
- return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->limit) * 8;
- else
- return 0;
-}
+ unsigned int stride = _intel_fbc_cfb_stride(cache);
+ unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache);
-static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ /*
+ * Override stride in 64 byte units per 4 line segment.
+ *
+ * Gen9 hw miscalculates cfb stride for linear as
+ * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
+ * we always need to use the override there.
+ */
+ if (stride != stride_aligned ||
+ (DISPLAY_VER(dev_priv) == 9 &&
+ cache->fb.modifier == DRM_FORMAT_MOD_LINEAR))
+ return stride_aligned * 4 / 64;
- return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv);
+ return 0;
}
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
@@ -860,7 +927,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) {
+ if (!stride_is_valid(dev_priv, cache->fb.modifier,
+ cache->fb.stride * cache->fb.format->cpp[0])) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
@@ -948,9 +1016,9 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->fb.modifier = cache->fb.modifier;
params->fb.stride = cache->fb.stride;
- params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
- params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
+ params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache);
+ params->cfb_size = intel_fbc_cfb_size(dev_priv, cache);
+ params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache);
params->plane_visible = cache->plane.visible;
}
@@ -981,10 +1049,13 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
if (params->fb.stride != cache->fb.stride)
return false;
- if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
+ if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache))
+ return false;
+
+ if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache))
return false;
- if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
+ if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache))
return false;
return true;
@@ -1090,7 +1161,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
return;
if (!fbc->busy_bits)
- intel_fbc_hw_activate(dev_priv);
+ intel_fbc_activate(dev_priv);
else
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
@@ -1129,7 +1200,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
if (!HAS_FBC(dev_priv))
return;
- if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
return;
mutex_lock(&fbc->lock);
@@ -1150,19 +1221,11 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (!HAS_FBC(dev_priv))
return;
- /*
- * GTT tracking does not nuke the entire cfb
- * so don't clear busy_bits set for some other
- * reason.
- */
- if (origin == ORIGIN_GTT)
- return;
-
mutex_lock(&fbc->lock);
fbc->busy_bits &= ~frontbuffer_bits;
- if (origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
goto out;
if (!fbc->busy_bits && fbc->crtc &&
@@ -1246,8 +1309,8 @@ out:
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
@@ -1257,16 +1320,22 @@ void intel_fbc_enable(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ int min_limit;
if (!plane->has_fbc || !plane_state)
return;
+ min_limit = intel_fbc_min_limit(plane_state->hw.fb ?
+ plane_state->hw.fb->format->cpp[0] : 0);
+
mutex_lock(&fbc->lock);
if (fbc->crtc) {
- if (fbc->crtc != crtc ||
- (!intel_fbc_cfb_size_changed(dev_priv) &&
- !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv)))
+ if (fbc->crtc != crtc)
+ goto out;
+
+ if (fbc->limit >= min_limit &&
+ !intel_fbc_cfb_size_changed(dev_priv))
goto out;
__intel_fbc_disable(dev_priv);
@@ -1281,15 +1350,12 @@ void intel_fbc_enable(struct intel_atomic_state *state,
goto out;
if (intel_fbc_alloc_cfb(dev_priv,
- intel_fbc_calculate_cfb_size(dev_priv, cache),
- plane_state->hw.fb->format->cpp[0])) {
+ intel_fbc_cfb_size(dev_priv, cache), min_limit)) {
cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
- cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv);
-
drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
@@ -1323,6 +1389,28 @@ void intel_fbc_disable(struct intel_crtc *crtc)
}
/**
+ * intel_fbc_update: enable/disable FBC on the CRTC
+ * @state: atomic state
+ * @crtc: the CRTC
+ *
+ * This function checks if the given CRTC was chosen for FBC, then enables it if
+ * possible. Notice that it doesn't activate FBC. It is valid to call
+ * intel_fbc_update multiple times for the same pipe without an
+ * intel_fbc_disable in the middle.
+ */
+void intel_fbc_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->update_pipe && !crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
+ else
+ intel_fbc_enable(state, crtc);
+}
+
+/**
* intel_fbc_global_disable - globally disable FBC
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 6dc1edefe81b..b97d908738e6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -24,7 +24,7 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_atomic_state *state,
+void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index df05d285f0bd..adc3a81be9f7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -45,6 +45,8 @@
#include "i915_drv.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
@@ -229,8 +231,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
- intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
-
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index e10b9cd8e86e..dd2cf0c59921 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -2,11 +2,112 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include "intel_atomic.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_sbi.h"
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ if (HAS_DDI(dev_priv)) {
+ /*
+ * DDI does not have a specific FDI_TX register.
+ *
+ * FDI is never fed from EDP transcoder
+ * so pipe->transcoder cast is fine here.
+ */
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
+ cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
+ } else {
+ cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
+ }
+ I915_STATE_WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_tx(i915, pipe, true);
+}
+
+void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_tx(i915, pipe, false);
+}
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx(i915, pipe, true);
+}
+
+void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx(i915, pipe, false);
+}
+
+void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ bool cur_state;
+
+ /* ILK FDI PLL is always enabled */
+ if (IS_IRONLAKE(i915))
+ return;
+
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (HAS_DDI(i915))
+ return;
+
+ cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
+ I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll(struct drm_i915_private *i915,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx_pll(i915, pipe, true);
+}
+
+void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx_pll(i915, pipe, false);
+}
+
+void intel_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
+}
/* units of 100MHz */
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
@@ -91,8 +192,34 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
return 0;
default:
- BUG();
+ MISSING_CASE(pipe);
+ return 0;
+ }
+}
+
+void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
+{
+ if (IS_IRONLAKE(i915)) {
+ u32 fdi_pll_clk =
+ intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
+ i915->fdi_pll_freq = 270000;
+ } else {
+ return;
}
+
+ drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
+}
+
+int intel_fdi_link_freq(struct drm_i915_private *i915,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(i915))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return i915->fdi_pll_freq;
}
int ilk_fdi_compute_config(struct intel_crtc *crtc,
@@ -140,11 +267,60 @@ retry:
}
if (needs_recompute)
- return I915_DISPLAY_CONFIG_RETRY;
+ return -EAGAIN;
return ret;
}
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
+ return;
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
+
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ enable ? "en" : "dis");
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
+}
+
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ switch (crtc->pipe) {
+ case PIPE_A:
+ break;
+ case PIPE_B:
+ if (crtc_state->fdi_lanes > 2)
+ cpt_set_fdi_bc_bifurcation(dev_priv, false);
+ else
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+ break;
+ case PIPE_C:
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+ break;
+ default:
+ MISSING_CASE(crtc->pipe);
+ }
+}
+
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -196,8 +372,15 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
i915_reg_t reg;
u32 temp, tries;
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -299,6 +482,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
i915_reg_t reg;
u32 temp, i, retry;
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -436,6 +626,15 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
i915_reg_t reg;
u32 temp, i, j;
+ ivb_update_fdi_bc_bifurcation(crtc_state);
+
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -807,15 +1006,125 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
}
+static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+}
+
+/* WaMPhyProgramming:hsw */
+void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ lpt_fdi_reset_mphy(dev_priv);
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+static const struct intel_fdi_funcs ilk_funcs = {
+ .fdi_link_train = ilk_fdi_link_train,
+};
+
+static const struct intel_fdi_funcs gen6_funcs = {
+ .fdi_link_train = gen6_fdi_link_train,
+};
+
+static const struct intel_fdi_funcs ivb_funcs = {
+ .fdi_link_train = ivb_manual_fdi_link_train,
+};
+
void
intel_fdi_init_hook(struct drm_i915_private *dev_priv)
{
if (IS_IRONLAKE(dev_priv)) {
- dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+ dev_priv->fdi_funcs = &ilk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->fdi_funcs = &gen6_funcs;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->fdi_funcs = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index af01d2c173a8..640d6585c137 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -6,12 +6,14 @@
#ifndef _INTEL_FDI_H_
#define _INTEL_FDI_H_
+enum pipe;
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
-#define I915_DISPLAY_CONFIG_RETRY 1
+int intel_fdi_link_freq(struct drm_i915_private *i915,
+ const struct intel_crtc_state *pipe_config);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config);
void intel_fdi_normal_train(struct intel_crtc *crtc);
@@ -21,5 +23,18 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
+void lpt_fdi_program_mphy(struct drm_i915_private *i915);
+
+void intel_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+
+void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 8e75debcce1a..0492446cd04a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -62,6 +62,7 @@
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_drrs.h"
#include "intel_psr.h"
/**
@@ -91,7 +92,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
trace_intel_frontbuffer_flush(frontbuffer_bits, origin);
might_sleep();
- intel_edp_drrs_flush(i915, frontbuffer_bits);
+ intel_drrs_flush(i915, frontbuffer_bits);
intel_psr_flush(i915, frontbuffer_bits, origin);
intel_fbc_flush(i915, frontbuffer_bits, origin);
}
@@ -180,7 +181,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
might_sleep();
intel_psr_invalidate(i915, frontbuffer_bits, origin);
- intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+ intel_drrs_invalidate(i915, frontbuffer_bits);
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 6d41f5394425..a88441edc8f9 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -33,11 +33,11 @@
struct drm_i915_private;
enum fb_op_origin {
- ORIGIN_GTT,
- ORIGIN_CPU,
+ ORIGIN_CPU = 0,
ORIGIN_CS,
ORIGIN_FLIP,
ORIGIN_DIRTYFB,
+ ORIGIN_CURSOR_UPDATE,
};
struct intel_frontbuffer {
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index ebc2e32aec0b..4509fe7438e8 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -17,12 +17,12 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_display_power.h"
+#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
-#include "intel_sideband.h"
-#include "intel_connector.h"
+#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
@@ -33,21 +33,6 @@ static int intel_conn_to_vcpi(struct intel_connector *connector)
return connector->port ? connector->port->vcpi.vcpi : 0;
}
-static bool
-intel_streams_type1_capable(struct intel_connector *connector)
-{
- const struct intel_hdcp_shim *shim = connector->hdcp.shim;
- bool capable = false;
-
- if (!shim)
- return capable;
-
- if (shim->streams_type1_capable)
- shim->streams_type1_capable(connector, &capable);
-
- return capable;
-}
-
/*
* intel_hdcp_required_content_stream selects the most highest common possible HDCP
* content_type for all streams in DP MST topology because security f/w doesn't
@@ -86,7 +71,7 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
if (conn_dig_port != dig_port)
continue;
- if (!enforce_type0 && !intel_streams_type1_capable(connector))
+ if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
enforce_type0 = true;
data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
@@ -112,6 +97,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
return 0;
}
+static int intel_hdcp_prepare_streams(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
@@ -1632,6 +1636,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
return -EINVAL;
}
+ /*
+ * MST topology is not Type 1 capable if it contains a downstream
+ * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
+ */
+ dig_port->hdcp_mst_type1_capable =
+ !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
+ !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
+
/* Converting and Storing the seq_num_v to local variable as DWORD */
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
@@ -1876,6 +1888,14 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
+ ret = intel_hdcp_prepare_streams(connector);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Prepare streams failed.(%d)\n",
+ ret);
+ break;
+ }
+
ret = hdcp2_propagate_stream_management_info(connector);
if (ret) {
drm_dbg_kms(&i915->drm,
@@ -1921,9 +1941,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -1931,16 +1949,6 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
hdcp->content_type);
- /* Stream which requires encryption */
- if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
- data->k = 1;
- data->streams[0].stream_type = hdcp->content_type;
- } else {
- ret = intel_hdcp_required_content_stream(dig_port);
- if (ret)
- return ret;
- }
-
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b04685bb6439..d2e61f6c6e08 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -53,21 +53,20 @@
#include "intel_panel.h"
#include "intel_snps_phy.h"
-static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+static struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
{
- return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+ return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
}
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi);
u32 enabled_bits;
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- drm_WARN(dev,
+ drm_WARN(&dev_priv->drm,
intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
@@ -1246,7 +1245,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -1703,7 +1702,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
drm_dbg_kms(&i915->drm,
"msg_sz(%zd) is more than exp size(%zu)\n",
ret, size);
- return -1;
+ return -EINVAL;
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
@@ -1830,7 +1829,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
bool has_hdmi_sink)
{
- struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1946,8 +1945,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
enum drm_mode_status status;
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -2210,7 +2208,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return ret;
if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 47c85ac97c87..955f6d07b0e1 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -215,8 +215,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
static void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->display.hpd_irq_setup)
- i915->display.hpd_irq_setup(i915);
+ if (i915->display_irqs_enabled && i915->hotplug_funcs)
+ i915->hotplug_funcs->hpd_irq_setup(i915);
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index e0381b0fce91..9fced37bed70 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -40,9 +40,12 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_fdi.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
@@ -323,7 +326,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
drm_err(&dev_priv->drm,
"timed out waiting for panel to power on\n");
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_backlight_enable(pipe_config, conn_state);
}
static void intel_disable_lvds(struct intel_atomic_state *state,
@@ -351,7 +354,7 @@ static void gmch_disable_lvds(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
@@ -361,7 +364,7 @@ static void pch_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_atomic_state *state,
@@ -388,13 +391,15 @@ intel_lvds_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
if (fixed_mode->clock > max_pixclk)
return MODE_CLOCK_HIGH;
@@ -441,8 +446,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -450,10 +456,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
- if (HAS_GMCH(dev_priv))
- ret = intel_gmch_panel_fitting(pipe_config, conn_state);
- else
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
@@ -906,7 +909,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
- intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->update_pipe = intel_backlight_update;
intel_encoder->shutdown = intel_lvds_shutdown;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -999,7 +1002,7 @@ out:
mutex_unlock(&dev->mode_config.mutex);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 3855fba70980..0065111593a6 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,10 +30,9 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include "display/intel_panel.h"
-
#include "i915_drv.h"
#include "intel_acpi.h"
+#include "intel_backlight.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
@@ -450,7 +449,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
bclp);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
- intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
+ intel_backlight_set_acpi(connector->base.state, bclp, 255);
drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 7d7a60b4d2de..a0c8e43db5eb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -28,26 +28,51 @@
* Chris Wilson <chris@chris-wilson.co.uk>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
-#include <linux/moduleparam.h>
#include <linux/pwm.h>
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
-#include "intel_dp_aux_backlight.h"
-#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
-void
-intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_panel_use_ssc(struct drm_i915_private *i915)
+{
+ if (i915->params.panel_use_ssc >= 0)
+ return i915->params.panel_use_ssc != 0;
+ return i915->vbt.lvds_use_ssc
+ && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+int intel_panel_compute_config(struct intel_connector *connector,
+ struct drm_display_mode *adjusted_mode)
{
+ const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode;
+
+ if (!fixed_mode)
+ return 0;
+
+ /*
+ * We don't want to lie too much to the user about the refresh
+ * rate they're going to get. But we have to allow a bit of latitude
+ * for Xorg since it likes to automagically cook up modes with slightly
+ * off refresh rates.
+ */
+ if (abs(drm_mode_vrefresh(adjusted_mode) - drm_mode_vrefresh(fixed_mode)) > 1) {
+ drm_dbg_kms(connector->base.dev,
+ "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n",
+ connector->base.base.id, connector->base.name,
+ drm_mode_vrefresh(adjusted_mode), drm_mode_vrefresh(fixed_mode));
+
+ return -EINVAL;
+ }
+
drm_mode_copy(adjusted_mode, fixed_mode);
drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return 0;
}
static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
@@ -175,8 +200,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
}
/* adjusted_mode has been preset to be the panel's fixed mode */
-int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -381,8 +406,8 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
}
}
-int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -456,1783 +481,55 @@ out:
return 0;
}
-/**
- * scale - scale values from one range to another
- * @source_val: value in range [@source_min..@source_max]
- * @source_min: minimum legal value for @source_val
- * @source_max: maximum legal value for @source_val
- * @target_min: corresponding target value for @source_min
- * @target_max: corresponding target value for @source_max
- *
- * Return @source_val in range [@source_min..@source_max] scaled to range
- * [@target_min..@target_max].
- */
-static u32 scale(u32 source_val,
- u32 source_min, u32 source_max,
- u32 target_min, u32 target_max)
-{
- u64 target_val;
-
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
-
- /* defensive */
- source_val = clamp(source_val, source_min, source_max);
-
- /* avoid overflows */
- target_val = mul_u32_u32(source_val - source_min,
- target_max - target_min);
- target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
- target_val += target_min;
-
- return target_val;
-}
-
-/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
- * to [hw_min..hw_max]. */
-static u32 clamp_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
- hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
-
- return hw_level;
-}
-
-/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
-static u32 scale_hw_to_user(struct intel_connector *connector,
- u32 hw_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
-
- return scale(hw_level, panel->backlight.min, panel->backlight.max,
- 0, user_max);
-}
-
-u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
-
- if (dev_priv->params.invert_brightness < 0)
- return val;
-
- if (dev_priv->params.invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
- }
-
- return val;
-}
-
-void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
- panel->backlight.pwm_funcs->set(conn_state, val);
-}
-
-u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
-
- val = scale(val, panel->backlight.min, panel->backlight.max,
- panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
-
- return intel_panel_invert_pwm_level(connector, val);
-}
-
-u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
-
- if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
- val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
-
- return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
- panel->backlight.min, panel->backlight.max);
-}
-
-static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
-}
-
-static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-}
-
-static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 val;
-
- val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (DISPLAY_VER(dev_priv) < 4)
- val >>= 1;
-
- if (panel->backlight.combination_mode) {
- u8 lbpc;
-
- pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
- val *= lbpc;
- }
-
- return val;
-}
-
-static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return 0;
-
- return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- return intel_de_read(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller));
-}
-
-static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct intel_panel *panel = &connector->panel;
- struct pwm_state state;
-
- pwm_get_state(panel->backlight.pwm, &state);
- return pwm_get_relative_duty_cycle(&state, 100);
-}
-
-static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
-}
-
-static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
-}
-
-static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 tmp, mask;
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
-
- if (panel->backlight.combination_mode) {
- u8 lbpc;
-
- lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
- level /= lbpc;
- pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
- }
-
- if (DISPLAY_VER(dev_priv) == 4) {
- mask = BACKLIGHT_DUTY_CYCLE_MASK;
- } else {
- level <<= 1;
- mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
- }
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
- intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
-}
-
-static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
-}
-
-static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- intel_de_write(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
-}
-
-static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
-
- pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-}
-
-static void
-intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
-
- panel->backlight.funcs->set(conn_state, level);
-}
-
-/* set backlight brightness to level in range [0..max], assuming hw min is
- * respected.
- */
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 user_level, u32 user_max)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- /*
- * Lack of crtc may occur during driver init because
- * connection_mutex isn't held across the entire backlight
- * setup + modeset readout, and the BIOS can issue the
- * requests at any time.
- */
- if (!panel->backlight.present || !conn_state->crtc)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
-
- hw_level = clamp_user_to_hw(connector, user_level, user_max);
- panel->backlight.level = hw_level;
-
- if (panel->backlight.device)
- panel->backlight.device->props.brightness =
- scale_hw_to_user(connector,
- panel->backlight.level,
- panel->backlight.device->props.max_brightness);
-
- if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(conn_state, hw_level);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, level);
-
- /*
- * Although we don't support or enable CPU PWM with LPT/SPT based
- * systems, it may have been enabled prior to loading the
- * driver. Disable to avoid warnings on LCPLL disable.
- *
- * This needs rework if we need to add support for CPU PWM on PCH split
- * platforms.
- */
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "cpu backlight was enabled, disabling\n");
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
- tmp & ~BLM_PWM_ENABLE);
- }
-
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
-}
-
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
-
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
-}
-
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- intel_panel_set_pwm_level(old_conn_state, val);
-}
-
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
-}
-
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- tmp & ~BLM_PWM_ENABLE);
-}
-
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
-
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
- }
-}
-
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
-}
-
-static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_state.enabled = false;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-}
-
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- if (!panel->backlight.present)
- return;
-
- /*
- * Do not disable backlight on the vga_switcheroo path. When switching
- * away from i915, the other client may depend on i915 to handle the
- * backlight. This will leave the backlight on unnecessarily when
- * another client is not activated.
- */
- if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
- return;
- }
-
- mutex_lock(&dev_priv->backlight_lock);
-
- if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
- panel->backlight.enabled = false;
- panel->backlight.funcs->disable(old_conn_state, 0);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, schicken;
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
- pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- }
-
- if (HAS_PCH_LPT(dev_priv)) {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= LPT_PWM_GRANULARITY;
- else
- schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
- } else {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= SPT_PWM_GRANULARITY;
- else
- schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
- }
-
- pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
-
- pch_ctl1 = 0;
- if (panel->backlight.active_low_pwm)
- pch_ctl1 |= BLM_PCH_POLARITY;
-
- /* After LPT, override is the default. */
- if (HAS_PCH_LPT(dev_priv))
- pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
-
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
-
- /* This won't stick until the above enable. */
- intel_panel_set_pwm_level(conn_state, level);
-}
-
-static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2;
-
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
- cpu_ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
- }
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
- pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- }
-
- if (cpu_transcoder == TRANSCODER_EDP)
- cpu_ctl2 = BLM_TRANSCODER_EDP;
- else
- cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
-
- /* This won't stick until the above enable. */
- intel_panel_set_pwm_level(conn_state, level);
-
- pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
-
- pch_ctl1 = 0;
- if (panel->backlight.active_low_pwm)
- pch_ctl1 |= BLM_PCH_POLARITY;
-
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
-}
-
-static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, freq;
-
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- intel_de_write(dev_priv, BLC_PWM_CTL, 0);
- }
-
- freq = panel->backlight.pwm_level_max;
- if (panel->backlight.combination_mode)
- freq /= 0xff;
-
- ctl = freq << 17;
- if (panel->backlight.combination_mode)
- ctl |= BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
- ctl |= BLM_POLARITY_PNV;
-
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL);
-
- /* XXX: combine this into above write? */
- intel_panel_set_pwm_level(conn_state, level);
-
- /*
- * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
- * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
- * that has backlight.
- */
- if (DISPLAY_VER(dev_priv) == 2)
- intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
-}
-
-static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
- u32 ctl, ctl2, freq;
-
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
- if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
- }
-
- freq = panel->backlight.pwm_level_max;
- if (panel->backlight.combination_mode)
- freq /= 0xff;
-
- ctl = freq << 16;
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
-
- ctl2 = BLM_PIPE(pipe);
- if (panel->backlight.combination_mode)
- ctl2 |= BLM_COMBINATION_MODE;
- if (panel->backlight.active_low_pwm)
- ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
-
- intel_panel_set_pwm_level(conn_state, level);
-}
-
-static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- u32 ctl, ctl2;
-
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
- }
-
- ctl = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
-
- /* XXX: combine this into above write? */
- intel_panel_set_pwm_level(conn_state, level);
-
- ctl2 = 0;
- if (panel->backlight.active_low_pwm)
- ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
- intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- ctl2 | BLM_PWM_ENABLE);
-}
-
-static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- u32 pwm_ctl, val;
-
- /* Controller 1 uses the utility pin. */
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "util pin already enabled\n");
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
- }
-
- val = 0;
- if (panel->backlight.util_pin_active_low)
- val |= UTIL_PIN_POLARITY;
- intel_de_write(dev_priv, UTIL_PIN_CTL,
- val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
- }
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- }
-
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.pwm_level_max);
-
- intel_panel_set_pwm_level(conn_state, level);
-
- pwm_ctl = 0;
- if (panel->backlight.active_low_pwm)
- pwm_ctl |= BXT_BLC_PWM_POLARITY;
-
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
-}
-
-static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl;
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- }
-
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.pwm_level_max);
-
- intel_panel_set_pwm_level(conn_state, level);
-
- pwm_ctl = 0;
- if (panel->backlight.active_low_pwm)
- pwm_ctl |= BXT_BLC_PWM_POLARITY;
-
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
-}
-
-static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- panel->backlight.pwm_state.enabled = true;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-}
-
-static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- WARN_ON(panel->backlight.max == 0);
-
- if (panel->backlight.level <= panel->backlight.min) {
- panel->backlight.level = panel->backlight.max;
- if (panel->backlight.device)
- panel->backlight.device->props.brightness =
- scale_hw_to_user(connector,
- panel->backlight.level,
- panel->backlight.device->props.max_brightness);
- }
-
- panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
- panel->backlight.enabled = true;
- if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_UNBLANK;
-}
-
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-
- if (!panel->backlight.present)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
-
- mutex_lock(&dev_priv->backlight_lock);
-
- __intel_panel_enable_backlight(crtc_state, conn_state);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static u32 intel_panel_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 val = 0;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- if (panel->backlight.enabled)
- val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
-
- mutex_unlock(&dev_priv->backlight_lock);
-
- drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
- return val;
-}
-
-/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
-static u32 scale_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
-
- return scale(user_level, 0, user_max,
- panel->backlight.min, panel->backlight.max);
-}
-
-/* set backlight brightness to level in range [0..max], scaling wrt hw min */
-static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
- u32 user_level, u32 user_max)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- if (!panel->backlight.present)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
-
- hw_level = scale_user_to_hw(connector, user_level, user_max);
- panel->backlight.level = hw_level;
-
- if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(conn_state, hw_level);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-static int intel_backlight_device_update_status(struct backlight_device *bd)
-{
- struct intel_connector *connector = bl_get_data(bd);
- struct intel_panel *panel = &connector->panel;
- struct drm_device *dev = connector->base.dev;
-
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
- bd->props.brightness, bd->props.max_brightness);
- intel_panel_set_backlight(connector->base.state, bd->props.brightness,
- bd->props.max_brightness);
-
- /*
- * Allow flipping bl_power as a sub-state of enabled. Sadly the
- * backlight class device does not make it easy to to differentiate
- * between callbacks for brightness and bl_power, so our backlight_power
- * callback needs to take this into account.
- */
- if (panel->backlight.enabled) {
- if (panel->backlight.power) {
- bool enable = bd->props.power == FB_BLANK_UNBLANK &&
- bd->props.brightness != 0;
- panel->backlight.power(connector, enable);
- }
- } else {
- bd->props.power = FB_BLANK_POWERDOWN;
- }
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- return 0;
-}
-
-static int intel_backlight_device_get_brightness(struct backlight_device *bd)
-{
- struct intel_connector *connector = bl_get_data(bd);
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
- int ret = 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 hw_level;
-
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- hw_level = intel_panel_get_backlight(connector);
- ret = scale_hw_to_user(connector,
- hw_level, bd->props.max_brightness);
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- }
-
- return ret;
-}
-
-static const struct backlight_ops intel_backlight_device_ops = {
- .update_status = intel_backlight_device_update_status,
- .get_brightness = intel_backlight_device_get_brightness,
-};
-
-int intel_backlight_device_register(struct intel_connector *connector)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- struct backlight_properties props;
- struct backlight_device *bd;
- const char *name;
- int ret = 0;
-
- if (WARN_ON(panel->backlight.device))
- return -ENODEV;
-
- if (!panel->backlight.present)
- return 0;
-
- WARN_ON(panel->backlight.max == 0);
-
- memset(&props, 0, sizeof(props));
- props.type = BACKLIGHT_RAW;
-
- /*
- * Note: Everything should work even if the backlight device max
- * presented to the userspace is arbitrarily chosen.
- */
- props.max_brightness = panel->backlight.max;
- props.brightness = scale_hw_to_user(connector,
- panel->backlight.level,
- props.max_brightness);
-
- if (panel->backlight.enabled)
- props.power = FB_BLANK_UNBLANK;
- else
- props.power = FB_BLANK_POWERDOWN;
-
- name = kstrdup("intel_backlight", GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
- kfree(name);
- name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
- i915->drm.primary->index, connector->base.name);
- if (!name)
- return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
- }
-
- if (IS_ERR(bd)) {
- drm_err(&i915->drm,
- "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
- connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
- ret = PTR_ERR(bd);
- goto out;
- }
-
- panel->backlight.device = bd;
-
- drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] backlight device %s registered\n",
- connector->base.base.id, connector->base.name, name);
-
-out:
- kfree(name);
-
- return ret;
-}
-
-void intel_backlight_device_unregister(struct intel_connector *connector)
-{
- struct intel_panel *panel = &connector->panel;
-
- if (panel->backlight.device) {
- backlight_device_unregister(panel->backlight.device);
- panel->backlight.device = NULL;
- }
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-
-/*
- * CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
- * PWM increment = 1
- */
-static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
- pwm_freq_hz);
-}
-
-/*
- * BXT: PWM clock frequency = 19.2 MHz.
- */
-static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
-}
-
-/*
- * SPT: This value represents the period of the PWM stream in clock periods
- * multiplied by 16 (default increment) or 128 (alternate increment selected in
- * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
- */
-static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct intel_panel *panel = &connector->panel;
- u32 mul;
-
- if (panel->backlight.alternate_pwm_increment)
- mul = 128;
- else
- mul = 16;
-
- return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
-}
-
-/*
- * LPT: This value represents the period of the PWM stream in clock periods
- * multiplied by 128 (default increment) or 16 (alternate increment, selected in
- * LPT SOUTH_CHICKEN2 register bit 5).
- */
-static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 mul, clock;
-
- if (panel->backlight.alternate_pwm_increment)
- mul = 16;
- else
- mul = 128;
-
- if (HAS_PCH_LPT_H(dev_priv))
- clock = MHz(135); /* LPT:H */
- else
- clock = MHz(24); /* LPT:LP */
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
-}
-
-/*
- * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
- * display raw clocks multiplied by 128.
- */
-static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
- pwm_freq_hz * 128);
-}
-
-/*
- * Gen2: This field determines the number of time base events (display core
- * clock frequency/32) in total for a complete cycle of modulated backlight
- * control.
- *
- * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
- * divided by 32.
- */
-static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int clock;
-
- if (IS_PINEVIEW(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
- else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
-}
-
-/*
- * Gen4: This value represents the period of the PWM stream in display core
- * clocks ([DevCTG] HRAW clocks) multiplied by 128.
- *
- */
-static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int clock;
-
- if (IS_G4X(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
- else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
-}
-
-/*
- * VLV: This value represents the period of the PWM stream in display core
- * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
- * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
- */
-static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int mul, clock;
-
- if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(dev_priv))
- clock = KHz(19200);
- else
- clock = MHz(25);
- mul = 16;
- } else {
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
- mul = 128;
- }
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
-}
-
-static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
-{
- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
-
- if (pwm_freq_hz) {
- drm_dbg_kms(&dev_priv->drm,
- "VBT defined backlight frequency %u Hz\n",
- pwm_freq_hz);
- } else {
- pwm_freq_hz = 200;
- drm_dbg_kms(&dev_priv->drm,
- "default backlight frequency %u Hz\n",
- pwm_freq_hz);
- }
-
- return pwm_freq_hz;
-}
-
-static u32 get_backlight_max_vbt(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
- u32 pwm;
-
- if (!panel->backlight.pwm_funcs->hz_to_pwm) {
- drm_dbg_kms(&dev_priv->drm,
- "backlight frequency conversion not supported\n");
- return 0;
- }
-
- pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
- if (!pwm) {
- drm_dbg_kms(&dev_priv->drm,
- "backlight frequency conversion failed\n");
- return 0;
- }
-
- return pwm;
-}
-
-/*
- * Note: The setup hooks can't assume pipe is set!
- */
-static u32 get_backlight_min_vbt(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- int min;
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
-
- /*
- * XXX: If the vbt value is 255, it makes min equal to max, which leads
- * to problems. There are such machines out there. Either our
- * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
- * against this by letting the minimum be at most (arbitrarily chosen)
- * 25% of the max.
- */
- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
- if (min != dev_priv->vbt.backlight.min_brightness) {
- drm_dbg_kms(&dev_priv->drm,
- "clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
- }
-
- /* vbt value is a coefficient in range [0..255] */
- return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
-}
-
-static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
- bool alt, cpu_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (HAS_PCH_LPT(dev_priv))
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ if (HAS_GMCH(i915))
+ return gmch_panel_fitting(crtc_state, conn_state);
else
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
- panel->backlight.alternate_pwm_increment = alt;
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
-
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.pwm_level_max = pch_ctl2 >> 16;
-
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
-
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
- !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
- (cpu_ctl2 & BLM_PWM_ENABLE);
-
- if (cpu_mode) {
- val = pch_get_backlight(connector, unused);
-
- drm_dbg_kms(&dev_priv->drm,
- "CPU backlight register was enabled, switching to PCH override\n");
-
- /* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, val);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
-
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
- cpu_ctl2 & ~BLM_PWM_ENABLE);
- }
-
- return 0;
-}
-
-static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2;
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
-
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.pwm_level_max = pch_ctl2 >> 16;
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
- (pch_ctl1 & BLM_PCH_PWM_ENABLE);
-
- return 0;
-}
-
-static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, val;
-
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
-
- if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
- panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
-
- if (IS_PINEVIEW(dev_priv))
- panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
-
- panel->backlight.pwm_level_max = ctl >> 17;
-
- if (!panel->backlight.pwm_level_max) {
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- panel->backlight.pwm_level_max >>= 1;
- }
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- if (panel->backlight.combination_mode)
- panel->backlight.pwm_level_max *= 0xff;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- val = i9xx_get_backlight(connector, unused);
- val = intel_panel_invert_pwm_level(connector, val);
- val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
-
- panel->backlight.pwm_enabled = val != 0;
-
- return 0;
-}
-
-static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2;
-
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
- panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
- panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
-
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- panel->backlight.pwm_level_max = ctl >> 16;
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- if (panel->backlight.combination_mode)
- panel->backlight.pwm_level_max *= 0xff;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
-
- return 0;
-}
-
-static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2;
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return -ENODEV;
-
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
-
- ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
- panel->backlight.pwm_level_max = ctl >> 16;
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
-
- return 0;
-}
-
-static int
-bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl, val;
-
- panel->backlight.controller = dev_priv->vbt.backlight.controller;
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
-
- /* Controller 1 uses the utility pin. */
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- panel->backlight.util_pin_active_low =
- val & UTIL_PIN_POLARITY;
- }
-
- panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
-
- return 0;
-}
-
-static int
-cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl;
-
- /*
- * CNP has the BXT implementation of backlight, but with only one
- * controller. TODO: ICP has multiple controllers but we only use
- * controller 0 for now.
- */
- panel->backlight.controller = 0;
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
-
- panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
-
- return 0;
-}
-
-static int ext_pwm_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
-{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_panel *panel = &connector->panel;
- const char *desc;
- u32 level;
-
- /* Get the right PWM chip for DSI backlight according to VBT */
- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
- desc = "PMIC";
- } else {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
- desc = "SoC";
- }
-
- if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
- desc);
- panel->backlight.pwm = NULL;
- return -ENODEV;
- }
-
- panel->backlight.pwm_level_max = 100; /* 100% */
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- if (pwm_is_enabled(panel->backlight.pwm)) {
- /* PWM is already enabled, use existing settings */
- pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-
- level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
- 100);
- level = intel_panel_invert_pwm_level(connector, level);
- panel->backlight.pwm_enabled = true;
-
- drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
- NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
- get_vbt_pwm_freq(dev_priv), level);
- } else {
- /* Set period from VBT frequency, leave other settings at 0. */
- panel->backlight.pwm_state.period =
- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
- }
-
- drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
- desc);
- return 0;
-}
-
-static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_funcs->set(conn_state,
- intel_panel_invert_pwm_level(connector, level));
+ return pch_panel_fitting(crtc_state, conn_state);
}
-static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct intel_panel *panel = &connector->panel;
-
- return intel_panel_invert_pwm_level(connector,
- panel->backlight.pwm_funcs->get(connector, pipe));
-}
-
-static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
- intel_panel_invert_pwm_level(connector, level));
-}
-
-static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_funcs->disable(conn_state,
- intel_panel_invert_pwm_level(connector, level));
-}
-
-static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct intel_panel *panel = &connector->panel;
- int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
-
- if (ret < 0)
- return ret;
-
- panel->backlight.min = panel->backlight.pwm_level_min;
- panel->backlight.max = panel->backlight.pwm_level_max;
- panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
- panel->backlight.enabled = panel->backlight.pwm_enabled;
-
- return 0;
-}
-
-void intel_panel_update_backlight(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- if (!panel->backlight.present)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
- if (!panel->backlight.enabled)
- __intel_panel_enable_backlight(crtc_state, conn_state);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_panel *panel = &intel_connector->panel;
- int ret;
-
- if (!dev_priv->vbt.backlight.present) {
- if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
- drm_dbg_kms(&dev_priv->drm,
- "no backlight present per VBT, but present per quirk\n");
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "no backlight present per VBT\n");
- return 0;
- }
- }
-
- /* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
- return -ENODEV;
-
- /* set level and max in panel struct */
- mutex_lock(&dev_priv->backlight_lock);
- ret = panel->backlight.funcs->setup(intel_connector, pipe);
- mutex_unlock(&dev_priv->backlight_lock);
-
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to setup backlight for connector %s\n",
- connector->name);
- return ret;
- }
-
- panel->backlight.present = true;
-
- drm_dbg_kms(&dev_priv->drm,
- "Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->name,
- enableddisabled(panel->backlight.enabled),
- panel->backlight.level, panel->backlight.max);
-
- return 0;
-}
+ struct drm_i915_private *i915 = to_i915(connector->dev);
-static void intel_panel_destroy_backlight(struct intel_panel *panel)
-{
- /* dispose of the pwm */
- if (panel->backlight.pwm)
- pwm_put(panel->backlight.pwm);
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
- panel->backlight.present = false;
+ return connector_status_connected;
}
-static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
- .setup = bxt_setup_backlight,
- .enable = bxt_enable_backlight,
- .disable = bxt_disable_backlight,
- .set = bxt_set_backlight,
- .get = bxt_get_backlight,
- .hz_to_pwm = bxt_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
- .setup = cnp_setup_backlight,
- .enable = cnp_enable_backlight,
- .disable = cnp_disable_backlight,
- .set = bxt_set_backlight,
- .get = bxt_get_backlight,
- .hz_to_pwm = cnp_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
- .setup = lpt_setup_backlight,
- .enable = lpt_enable_backlight,
- .disable = lpt_disable_backlight,
- .set = lpt_set_backlight,
- .get = lpt_get_backlight,
- .hz_to_pwm = lpt_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs spt_pwm_funcs = {
- .setup = lpt_setup_backlight,
- .enable = lpt_enable_backlight,
- .disable = lpt_disable_backlight,
- .set = lpt_set_backlight,
- .get = lpt_get_backlight,
- .hz_to_pwm = spt_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs pch_pwm_funcs = {
- .setup = pch_setup_backlight,
- .enable = pch_enable_backlight,
- .disable = pch_disable_backlight,
- .set = pch_set_backlight,
- .get = pch_get_backlight,
- .hz_to_pwm = pch_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs ext_pwm_funcs = {
- .setup = ext_pwm_setup_backlight,
- .enable = ext_pwm_enable_backlight,
- .disable = ext_pwm_disable_backlight,
- .set = ext_pwm_set_backlight,
- .get = ext_pwm_get_backlight,
-};
-
-static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
- .setup = vlv_setup_backlight,
- .enable = vlv_enable_backlight,
- .disable = vlv_disable_backlight,
- .set = vlv_set_backlight,
- .get = vlv_get_backlight,
- .hz_to_pwm = vlv_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs i965_pwm_funcs = {
- .setup = i965_setup_backlight,
- .enable = i965_enable_backlight,
- .disable = i965_disable_backlight,
- .set = i9xx_set_backlight,
- .get = i9xx_get_backlight,
- .hz_to_pwm = i965_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
- .setup = i9xx_setup_backlight,
- .enable = i9xx_enable_backlight,
- .disable = i9xx_disable_backlight,
- .set = i9xx_set_backlight,
- .get = i9xx_get_backlight,
- .hz_to_pwm = i9xx_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs pwm_bl_funcs = {
- .setup = intel_pwm_setup_backlight,
- .enable = intel_pwm_enable_backlight,
- .disable = intel_pwm_disable_backlight,
- .set = intel_pwm_set_backlight,
- .get = intel_pwm_get_backlight,
-};
-
-/* Set up chip specific backlight functions */
-static void
-intel_panel_init_backlight_funcs(struct intel_panel *panel)
+enum drm_mode_status
+intel_panel_mode_valid(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
{
- struct intel_connector *connector =
- container_of(panel, struct intel_connector, panel);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode;
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
- intel_dsi_dcs_init_backlight_funcs(connector) == 0)
- return;
-
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
- panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- if (HAS_PCH_LPT(dev_priv))
- panel->backlight.pwm_funcs = &lpt_pwm_funcs;
- else
- panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- panel->backlight.pwm_funcs = &pch_pwm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
- panel->backlight.pwm_funcs = &ext_pwm_funcs;
- } else {
- panel->backlight.pwm_funcs = &vlv_pwm_funcs;
- }
- } else if (DISPLAY_VER(dev_priv) == 4) {
- panel->backlight.pwm_funcs = &i965_pwm_funcs;
- } else {
- panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
- }
-
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (!fixed_mode)
+ return MODE_OK;
- /* We're using a standard PWM backlight interface */
- panel->backlight.funcs = &pwm_bl_funcs;
-}
+ if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_PANEL;
-enum drm_connector_status
-intel_panel_detect(struct drm_connector *connector, bool force)
-{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_PANEL;
- if (!INTEL_DISPLAY_ENABLED(i915))
- return connector_status_disconnected;
+ if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(fixed_mode))
+ return MODE_PANEL;
- return connector_status_connected;
+ return MODE_OK;
}
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode)
{
- intel_panel_init_backlight_funcs(panel);
+ intel_backlight_init_funcs(panel);
panel->fixed_mode = fixed_mode;
panel->downclock_mode = downclock_mode;
@@ -2245,7 +542,7 @@ void intel_panel_fini(struct intel_panel *panel)
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
- intel_panel_destroy_backlight(panel);
+ intel_backlight_destroy(panel);
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 1d340f77bffc..d50b3f7e9e58 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -8,15 +8,13 @@
#include <linux/types.h>
-#include "intel_display.h"
-
+enum drm_connector_status;
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
+struct drm_i915_private;
struct intel_connector;
-struct intel_crtc;
struct intel_crtc_state;
-struct intel_encoder;
struct intel_panel;
int intel_panel_init(struct intel_panel *panel,
@@ -25,23 +23,16 @@ int intel_panel_init(struct intel_panel *panel,
void intel_panel_fini(struct intel_panel *panel);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
-void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+bool intel_panel_use_ssc(struct drm_i915_private *i915);
+void intel_panel_fixed_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
-int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector,
- enum pipe pipe);
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
+enum drm_mode_status
+intel_panel_mode_valid(struct intel_connector *connector,
+ const struct drm_display_mode *mode);
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_panel_compute_config(struct intel_connector *connector,
+ struct drm_display_mode *adjusted_mode);
struct drm_display_mode *
intel_panel_edid_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *fixed_mode);
@@ -49,22 +40,5 @@ struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
-void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
-u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
-u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
-u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-int intel_backlight_device_register(struct intel_connector *connector);
-void intel_backlight_device_unregister(struct intel_connector *connector);
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static inline int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static inline void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
new file mode 100644
index 000000000000..dcd698a02da2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_display_types.h"
+#include "intel_plane_initial.h"
+#include "intel_atomic_plane.h"
+#include "intel_display.h"
+#include "intel_fb.h"
+
+static bool
+intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+ const struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer **fb,
+ struct i915_vma **vma)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ if (!plane_state->ggtt_vma)
+ continue;
+
+ if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ *fb = plane_state->hw.fb;
+ *vma = plane_state->ggtt_vma;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 base, size;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base,
+ I915_GTT_MIN_ALIGNMENT);
+ size = round_up(plane_config->base + plane_config->size,
+ I915_GTT_MIN_ALIGNMENT);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ if (IS_ERR(obj))
+ return NULL;
+
+ /*
+ * Mark it WT ahead of time to avoid changing the
+ * cache_level during fbdev initialization. The
+ * unbind there would get stuck waiting for rcu.
+ */
+ i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+ struct i915_vma *vma;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
+ return false;
+ }
+
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
+ return false;
+
+ mode_cmd.pixel_format = fb->format->format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier;
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
+ }
+
+ plane_config->vma = vma;
+ return true;
+
+err_vma:
+ i915_vma_put(vma);
+ return false;
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct drm_framebuffer *fb;
+ struct i915_vma *vma;
+
+ /*
+ * TODO:
+ * Disable planes if get_initial_plane_config() failed.
+ * Make sure things work if the surface base is not page aligned.
+ */
+ if (!plane_config->fb)
+ return;
+
+ if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ vma = plane_config->vma;
+ goto valid_fb;
+ }
+
+ /*
+ * Failed to alloc the obj, check to see if we should share
+ * an fb with another CRTC instead
+ */
+ if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ goto valid_fb;
+
+ /*
+ * We've failed to reconstruct the BIOS FB. Current display state
+ * indicates that the primary plane is visible, but has a NULL FB,
+ * which will lead to problems later if we don't fix it up. The
+ * simplest solution is to just disable the primary plane now and
+ * pretend the BIOS never had it enabled.
+ */
+ intel_plane_disable_noatomic(crtc, plane);
+ if (crtc_state->bigjoiner) {
+ struct intel_crtc *slave =
+ crtc_state->bigjoiner_linked_crtc;
+ intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
+ }
+
+ return;
+
+valid_fb:
+ plane_state->uapi.rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb),
+ plane_state->uapi.rotation, &plane_state->view);
+
+ __i915_vma_pin(vma);
+ plane_state->ggtt_vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(plane_state) &&
+ i915_vma_pin_fence(vma) == 0 && vma->fence)
+ plane_state->flags |= PLANE_HAS_FENCE;
+
+ plane_state->uapi.src_x = 0;
+ plane_state->uapi.src_y = 0;
+ plane_state->uapi.src_w = fb->width << 16;
+ plane_state->uapi.src_h = fb->height << 16;
+
+ plane_state->uapi.crtc_x = 0;
+ plane_state->uapi.crtc_y = 0;
+ plane_state->uapi.crtc_w = fb->width;
+ plane_state->uapi.crtc_h = fb->height;
+
+ if (plane_config->tiling)
+ dev_priv->preserve_bios_swizzle = true;
+
+ plane_state->uapi.fb = fb;
+ drm_framebuffer_get(fb);
+
+ plane_state->uapi.crtc = &crtc->base;
+ intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
+
+ atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_initial_plane_config plane_config = {};
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ dev_priv->display->get_initial_plane_config(crtc, &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
new file mode 100644
index 000000000000..c7e35ab3182b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PLANE_INITIAL_H__
+#define __INTEL_PLANE_INITIAL_H__
+
+struct intel_crtc;
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index a36ec4a818ff..e9c679bb1b2e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -9,6 +9,7 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpll.h"
+#include "intel_lvds.h"
#include "intel_pps.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
@@ -1408,3 +1409,61 @@ void intel_pps_setup(struct drm_i915_private *i915)
else
i915->pps_mmio_base = PPS_BASE;
}
+
+void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ i915_reg_t pp_reg;
+ u32 val;
+ enum pipe panel_pipe = INVALID_PIPE;
+ bool locked = true;
+
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
+ return;
+
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ u32 port_sel;
+
+ pp_reg = PP_CONTROL(0);
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ /* presumably write lock depends on pipe, not port select */
+ pp_reg = PP_CONTROL(pipe);
+ panel_pipe = pipe;
+ } else {
+ u32 port_sel;
+
+ pp_reg = PP_CONTROL(0);
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ }
+
+ val = intel_de_read(dev_priv, pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ I915_STATE_WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index fbbcca782e7b..fbb47f6f453e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -10,6 +10,7 @@
#include "intel_wakeref.h"
+enum pipe;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
@@ -49,4 +50,6 @@ void vlv_pps_init(struct intel_encoder *encoder,
void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
void intel_pps_setup(struct drm_i915_private *i915);
+void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
+
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 1b0daf649e82..7a205fd5023b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -22,6 +22,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#include "display/intel_dp.h"
@@ -364,41 +365,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
}
}
-static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 aux_clock_divider, aux_ctl;
- int i;
- static const u8 aux_msg[] = {
- [0] = DP_AUX_NATIVE_WRITE << 4,
- [1] = DP_SET_POWER >> 8,
- [2] = DP_SET_POWER & 0xff,
- [3] = 1 - 1,
- [4] = DP_SET_POWER_D0,
- };
- u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
- EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
- EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
- EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
-
- BUILD_BUG_ON(sizeof(aux_msg) > 20);
- for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
- EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
- intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
-
- aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
- /* Start with bits set for DDI_AUX_CTL register */
- aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
- aux_clock_divider);
-
- /* Select only valid bits for SRD_AUX_CTL */
- aux_ctl &= psr_aux_mask;
- intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
- aux_ctl);
-}
-
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -460,7 +426,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
val |= EDP_PSR_TP2_TP3_TIME_2500us;
check_tp3_sel:
- if (intel_dp_source_supports_hbr2(intel_dp) &&
+ if (intel_dp_source_supports_tps3(dev_priv) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
else
@@ -545,7 +511,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
/* Wa_22012278275:adl-p */
@@ -595,15 +561,16 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_SU_SDP_SCANLINE;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ u32 tmp;
+
/* Wa_1408330847 */
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK,
DIS_RAM_BYPASS_PSR2_MAN_TRACK);
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
- PSR2_MAN_TRK_CTL_ENABLE);
+ tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
intel_de_write(dev_priv,
PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
@@ -621,9 +588,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
- if (DISPLAY_VER(dev_priv) < 9)
- return false;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return trans == TRANSCODER_A;
else
return trans == TRANSCODER_EDP;
@@ -755,11 +720,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- int i;
if (!dev_priv->params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
@@ -774,14 +735,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 sel fetch not enabled, plane rotated\n");
- return false;
- }
- }
-
/* Wa_14010254185 Wa_14010103792 */
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm,
@@ -877,12 +830,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- /*
- * We are missing the implementation of some workarounds to enabled PSR2
- * in Alderlake_P, until ready PSR2 should be kept disabled.
- */
- if (IS_ALDERLAKE_P(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
return false;
}
@@ -985,7 +934,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
@@ -1037,7 +987,10 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
+ &crtc_state->psr_vsc);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1114,12 +1067,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
- /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
- * use hardcoded values PSR AUX transactions
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hsw_psr_setup_aux(intel_dp);
-
if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = intel_de_read(dev_priv, reg);
@@ -1130,6 +1077,16 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
}
/*
+ * Wa_16014451276:adlp
+ * All supported adlp panels have 1-based X granularity, this may
+ * cause issues if non-supported panels are used.
+ */
+ if (IS_ALDERLAKE_P(dev_priv) &&
+ intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+
+ /*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
@@ -1174,6 +1131,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
TRANS_SET_CONTEXT_LATENCY_MASK,
TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1208,8 +1170,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
}
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1236,9 +1197,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &intel_dp->psr.vsc);
- intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp);
@@ -1248,33 +1207,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_activate(intel_dp);
}
-/**
- * intel_psr_enable - Enable PSR
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- * @conn_state: new CONNECTOR state
- *
- * This function can only be called after the pipe is fully trained and enabled.
- */
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!CAN_PSR(intel_dp))
- return;
-
- if (!crtc_state->has_psr)
- return;
-
- drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
-
- mutex_lock(&intel_dp->psr.lock);
- intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
- mutex_unlock(&intel_dp->psr.lock);
-}
-
static void intel_psr_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1363,6 +1295,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
TRANS_SET_CONTEXT_LATENCY_MASK, 0);
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
/* Disable PSR on Sink */
@@ -1456,27 +1393,48 @@ unlock:
mutex_unlock(&psr->lock);
}
+static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
+ PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 9)
- /*
- * Display WA #0884: skl+
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
- else
- /*
- * A write to CURSURFLIVE do not cause HW tracking to exit PSR
- * on older gens so doing the manual exit instead.
- */
- intel_psr_exit(intel_dp);
+ if (intel_dp->psr.psr2_sel_fetch_enabled)
+ intel_de_rmw(dev_priv,
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0,
+ man_trk_ctl_single_full_frame_bit_get(dev_priv));
+
+ /*
+ * Display WA #0884: skl+
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ *
+ * This workaround do not exist for platforms with display 10 or newer
+ * but testing proved that it works for up display 13, for newer
+ * than that testing will be needed.
+ */
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+}
+
+void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
@@ -1487,17 +1445,17 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_rect *clip;
- u32 val, offset;
- int ret, x, y;
+ u32 val;
+ int x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- val = plane_state ? plane_state->ctl : 0;
- val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
- if (!val || plane->id == PLANE_CURSOR)
+ if (plane->id == PLANE_CURSOR) {
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
return;
+ }
clip = &plane_state->psr2_sel_fetch_area;
@@ -1508,10 +1466,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
/* TODO: consider auxiliary surfaces */
x = plane_state->uapi.src.x1 >> 16;
y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
- ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
- if (ret)
- drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
- ret);
val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
@@ -1520,14 +1474,16 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
- !crtc_state->enable_psr2_sel_fetch)
+ if (!crtc_state->enable_psr2_sel_fetch)
return;
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
@@ -1542,11 +1498,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
u32 val = PSR2_MAN_TRK_CTL_ENABLE;
if (full_update) {
- if (IS_ALDERLAKE_P(dev_priv))
- val |= ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
- else
- val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
-
+ /*
+ * Not applying Wa_14014971508:adlp as we do not support the
+ * feature that requires this workaround.
+ */
+ val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
goto exit;
}
@@ -1555,7 +1511,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (IS_ALDERLAKE_P(dev_priv)) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
- val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
} else {
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
@@ -1597,6 +1553,45 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
}
+/*
+ * TODO: Not clear how to handle planes with negative position,
+ * also planes are not updated if they have a negative X
+ * position so for now doing a full update in this cases
+ *
+ * TODO: We are missing multi-planar formats handling, until it is
+ * implemented it will send full frame updates.
+ *
+ * Plane scaling and rotation is not supported by selective fetch and both
+ * properties can change without a modeset, so need to be check at every
+ * atomic commmit.
+ */
+static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
+{
+ if (plane_state->uapi.dst.y1 < 0 ||
+ plane_state->uapi.dst.x1 < 0 ||
+ plane_state->scaler_id >= 0 ||
+ plane_state->hw.fb->format->num_planes > 1 ||
+ plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
+/*
+ * Check for pipe properties that is not supported by selective fetch.
+ *
+ * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
+ * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
+ * enabled and going to the full update path.
+ */
+static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->scaler_state.scaler_id >= 0)
+ return false;
+
+ return true;
+}
+
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1610,9 +1605,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (!crtc_state->enable_psr2_sel_fetch)
return 0;
- ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
- if (ret)
- return ret;
+ if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
+ full_update = true;
+ goto skip_sel_fetch_set_loop;
+ }
/*
* Calculate minimal selective fetch area of each plane and calculate
@@ -1623,8 +1619,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct drm_rect src, damaged_area = { .y1 = -1 };
- struct drm_mode_rect *damaged_clips;
- u32 num_clips, j;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
@@ -1633,19 +1629,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
!old_plane_state->uapi.visible)
continue;
- /*
- * TODO: Not clear how to handle planes with negative position,
- * also planes are not updated if they have a negative X
- * position so for now doing a full update in this cases
- */
- if (new_plane_state->uapi.dst.y1 < 0 ||
- new_plane_state->uapi.dst.x1 < 0) {
+ if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
full_update = true;
break;
}
- num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
-
/*
* If visibility or plane moved, mark the whole plane area as
* damaged as it needs to be complete redraw in the new and old
@@ -1666,14 +1654,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
clip_area_update(&pipe_clip, &damaged_area);
}
continue;
- } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
- (!num_clips &&
- new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
- /*
- * If the plane don't have damaged areas but the
- * framebuffer changed or alpha changed, mark the whole
- * plane area as damaged.
- */
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+ /* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area);
@@ -1681,15 +1663,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
- damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
- for (j = 0; j < num_clips; j++) {
- struct drm_rect clip;
-
- clip.x1 = damaged_clips[j].x1;
- clip.y1 = damaged_clips[j].y1;
- clip.x2 = damaged_clips[j].x2;
- clip.y2 = damaged_clips[j].y2;
+ drm_atomic_helper_damage_iter_init(&iter,
+ &old_plane_state->uapi,
+ &new_plane_state->uapi);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
if (drm_rect_intersect(&clip, &src))
clip_area_update(&damaged_area, &clip);
}
@@ -1705,6 +1683,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
/*
@@ -1723,6 +1705,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
continue;
+ if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
+ full_update = true;
+ break;
+ }
+
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
@@ -1734,58 +1721,92 @@ skip_sel_fetch_set_loop:
return 0;
}
-/**
- * intel_psr_update - Update PSR state
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- * @conn_state: new CONNECTOR state
- *
- * This functions will update PSR states, disabling, enabling or switching PSR
- * version when executing fastsets. For full modeset, intel_psr_disable() and
- * intel_psr_enable() should be called instead.
- */
-void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_psr *psr = &intel_dp->psr;
- bool enable, psr2_enable;
+ struct intel_encoder *encoder;
- if (!CAN_PSR(intel_dp))
+ for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_psr *psr = &intel_dp->psr;
+ bool needs_to_disable = false;
+
+ mutex_lock(&psr->lock);
+
+ /*
+ * Reasons to disable:
+ * - PSR disabled in new state
+ * - All planes will go inactive
+ * - Changing between PSR versions
+ */
+ needs_to_disable |= !crtc_state->has_psr;
+ needs_to_disable |= !crtc_state->active_planes;
+ needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
+
+ if (psr->enabled && needs_to_disable)
+ intel_psr_disable_locked(intel_dp);
+
+ mutex_unlock(&psr->lock);
+ }
+}
+
+void intel_psr_pre_plane_update(const struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!HAS_PSR(dev_priv))
return;
- mutex_lock(&intel_dp->psr.lock);
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
+ _intel_psr_pre_plane_update(state, crtc_state);
+}
+
+static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_psr *psr = &intel_dp->psr;
+
+ mutex_lock(&psr->lock);
+
+ drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
- enable = crtc_state->has_psr;
- psr2_enable = crtc_state->has_psr2;
+ /* Only enable if there is active planes */
+ if (!psr->enabled && crtc_state->active_planes)
+ intel_psr_enable_locked(intel_dp, crtc_state);
- if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
- crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
psr_force_hw_tracking_exit(intel_dp);
- else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
- /*
- * Activate PSR again after a force exit when enabling
- * CRC in older gens
- */
- if (!intel_dp->psr.active &&
- !intel_dp->psr.busy_frontbuffer_bits)
- schedule_work(&intel_dp->psr.work);
- }
- goto unlock;
+ mutex_unlock(&psr->lock);
}
+}
- if (psr->enabled)
- intel_psr_disable_locked(intel_dp);
+void intel_psr_post_plane_update(const struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
- if (enable)
- intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+ if (!HAS_PSR(dev_priv))
+ return;
-unlock:
- mutex_unlock(&intel_dp->psr.lock);
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
+ _intel_psr_post_plane_update(state, crtc_state);
}
/**
@@ -2065,20 +2086,16 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
/*
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
- * event also therefore tgl_dc3co_flush() require to be changed
+ * event also therefore tgl_dc3co_flush_locked() require to be changed
* accordingly in future.
*/
static void
-tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
- enum fb_op_origin origin)
+tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
- mutex_lock(&intel_dp->psr.lock);
-
- if (!intel_dp->psr.dc3co_exitline)
- goto unlock;
-
- if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
- goto unlock;
+ if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
+ !intel_dp->psr.active)
+ return;
/*
* At every frontbuffer flush flip event modified delay of delayed work,
@@ -2086,14 +2103,11 @@ tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
*/
if (!(frontbuffer_bits &
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
- goto unlock;
+ return;
tgl_psr2_enable_dc3co(intel_dp);
mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
-
-unlock:
- mutex_unlock(&intel_dp->psr.lock);
}
/**
@@ -2118,11 +2132,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (origin == ORIGIN_FLIP) {
- tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
- continue;
- }
-
mutex_lock(&intel_dp->psr.lock);
if (!intel_dp->psr.enabled) {
mutex_unlock(&intel_dp->psr.lock);
@@ -2143,6 +2152,14 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
continue;
}
+ if (origin == ORIGIN_FLIP ||
+ (origin == ORIGIN_CURSOR_UPDATE &&
+ !intel_dp->psr.psr2_sel_fetch_enabled)) {
+ tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
/* By definition flush = invalidate + flush */
if (pipe_frontbuffer_bits)
psr_force_hw_tracking_exit(intel_dp);
@@ -2186,23 +2203,12 @@ void intel_psr_init(struct intel_dp *intel_dp)
intel_dp->psr.source_support = true;
- if (IS_HASWELL(dev_priv))
- /*
- * HSW don't have PSR registers on the same space as transcoder
- * so set this to a value that when subtract to the register
- * in transcoder space results in the right offset for HSW
- */
- dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
-
if (dev_priv->params.enable_psr == -1)
- if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+ if (!dev_priv->vbt.psr.enable)
dev_priv->params.enable_psr = 0;
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- /* HSW and BDW require workarounds that we don't implement. */
- intel_dp->psr.link_standby = false;
- else if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 641521b101c8..facffbacd357 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -20,14 +20,10 @@ struct intel_plane;
struct intel_encoder;
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
+void intel_psr_pre_plane_update(const struct intel_atomic_state *state);
+void intel_psr_post_plane_update(const struct intel_atomic_state *state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
-void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
@@ -37,7 +33,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
enum fb_op_origin origin);
void intel_psr_init(struct intel_dp *intel_dp);
void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void intel_psr_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
@@ -51,6 +48,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane);
+void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6cb27599ea03..2dc6c3742ba2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1335,6 +1335,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (IS_LVDS(intel_sdvo_connector)) {
+ int ret;
+
+ ret = intel_panel_compute_config(&intel_sdvo_connector->base,
+ adjusted_mode);
+ if (ret)
+ return ret;
+
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo_connector->base.panel.fixed_mode))
return -EINVAL;
@@ -1873,7 +1880,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
-
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -1890,14 +1896,11 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
- const struct drm_display_mode *fixed_mode =
- intel_sdvo_connector->base.panel.fixed_mode;
-
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
+ enum drm_mode_status status;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
+ status = intel_panel_mode_valid(&intel_sdvo_connector->base, mode);
+ if (status != MODE_OK)
+ return status;
}
return MODE_OK;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 18b52b64af95..5e20f340730f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,8 @@
#include <linux/util_macros.h>
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_snps_phy.h"
@@ -50,58 +52,28 @@ void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
}
-static const u32 dg2_ddi_translations[] = {
- /* VS 0, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 26),
-
- /* VS 0, pre-emph 1 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 33) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 6),
-
- /* VS 0, pre-emph 2 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 38) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 12),
-
- /* VS 0, pre-emph 3 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 43) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 19),
-
- /* VS 1, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 39),
-
- /* VS 1, pre-emph 1 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 44) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 8),
-
- /* VS 1, pre-emph 2 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 47) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 15),
-
- /* VS 2, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 52),
-
- /* VS 2, pre-emph 1 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 51) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 10),
-
- /* VS 3, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 62),
-};
-
-void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- u32 level)
+void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
int n_entries, ln;
- n_entries = ARRAY_SIZE(dg2_ddi_translations);
- if (level >= n_entries)
- level = n_entries - 1;
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ for (ln = 0; ln < 4; ln++) {
+ u32 val = 0;
+
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor);
- for (ln = 0; ln < 4; ln++)
- intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy),
- dg2_ddi_translations[level]);
+ intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val);
+ }
}
/*
@@ -198,11 +170,81 @@ static const struct intel_mpllb_state dg2_dp_hbr3_100 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
};
-static const struct intel_mpllb_state *dg2_dp_100_tables[] = {
+static const struct intel_mpllb_state dg2_dp_uhbr10_100 = {
+ .clock = 1000000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 21) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 368),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 58982),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 76101),
+};
+
+static const struct intel_mpllb_state dg2_dp_uhbr13_100 = {
+ .clock = 1350000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 45) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 508),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 79626),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 102737),
+};
+
+static const struct intel_mpllb_state * const dg2_dp_100_tables[] = {
&dg2_dp_rbr_100,
&dg2_dp_hbr1_100,
&dg2_dp_hbr2_100,
&dg2_dp_hbr3_100,
+ &dg2_dp_uhbr10_100,
+ &dg2_dp_uhbr13_100,
NULL,
};
@@ -311,11 +353,88 @@ static const struct intel_mpllb_state dg2_dp_hbr3_38_4 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 61440),
};
-static const struct intel_mpllb_state *dg2_dp_38_4_tables[] = {
+static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = {
+ .clock = 1000000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 488),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 3),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 27306),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 76800),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 129024),
+};
+
+static const struct intel_mpllb_state dg2_dp_uhbr13_38_4 = {
+ .clock = 1350000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 56) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 670),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36864),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 103680),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 174182),
+};
+
+static const struct intel_mpllb_state * const dg2_dp_38_4_tables[] = {
&dg2_dp_rbr_38_4,
&dg2_dp_hbr1_38_4,
&dg2_dp_hbr2_38_4,
&dg2_dp_hbr3_38_4,
+ &dg2_dp_uhbr10_38_4,
+ &dg2_dp_uhbr13_38_4,
NULL,
};
@@ -448,7 +567,7 @@ static const struct intel_mpllb_state dg2_edp_r432 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752),
};
-static const struct intel_mpllb_state *dg2_edp_tables[] = {
+static const struct intel_mpllb_state * const dg2_edp_tables[] = {
&dg2_dp_rbr_100,
&dg2_edp_r216,
&dg2_edp_r243,
@@ -611,7 +730,7 @@ static const struct intel_mpllb_state dg2_hdmi_594 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
-static const struct intel_mpllb_state *dg2_hdmi_tables[] = {
+static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_25_175,
&dg2_hdmi_27_0,
&dg2_hdmi_74_25,
@@ -620,7 +739,7 @@ static const struct intel_mpllb_state *dg2_hdmi_tables[] = {
NULL,
};
-static const struct intel_mpllb_state **
+static const struct intel_mpllb_state * const *
intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -654,7 +773,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_mpllb_state **tables;
+ const struct intel_mpllb_state * const *tables;
int i;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -850,7 +969,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
int intel_snps_phy_check_hdmi_link_rate(int clock)
{
- const struct intel_mpllb_state **tables = dg2_hdmi_tables;
+ const struct intel_mpllb_state * const *tables = dg2_hdmi_tables;
int i;
for (i = 0; tables[i]; i++) {
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 6261ff88ef5c..11dcd6deb070 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -29,7 +29,7 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
const struct intel_mpllb_state *pll_state);
int intel_snps_phy_check_hdmi_link_rate(int clock);
-void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- u32 level);
+void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_SNPS_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 3ffece568ed9..40faa18947c9 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -12,44 +12,81 @@
static const char *tc_port_mode_name(enum tc_port_mode mode)
{
static const char * const names[] = {
+ [TC_PORT_DISCONNECTED] = "disconnected",
[TC_PORT_TBT_ALT] = "tbt-alt",
[TC_PORT_DP_ALT] = "dp-alt",
[TC_PORT_LEGACY] = "legacy",
};
if (WARN_ON(mode >= ARRAY_SIZE(names)))
- mode = TC_PORT_TBT_ALT;
+ mode = TC_PORT_DISCONNECTED;
return names[mode];
}
+static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
+ enum tc_port_mode mode)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
+}
+
+bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
+}
+
+bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
+}
+
+bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
+}
+
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
+ IS_ALDERLAKE_P(i915);
+}
+
static enum intel_display_power_domain
-tc_cold_get_power_domain(struct intel_digital_port *dig_port)
+tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
{
- if (intel_tc_cold_requires_aux_pw(dig_port))
- return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
- else
+ if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
return POWER_DOMAIN_TC_COLD_OFF;
+
+ return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
}
static intel_wakeref_t
-tc_cold_block(struct intel_digital_port *dig_port)
+tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
+ enum intel_display_power_domain *domain)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum intel_display_power_domain domain;
- if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
- return 0;
+ *domain = tc_cold_get_power_domain(dig_port, mode);
- domain = tc_cold_get_power_domain(dig_port);
- return intel_display_power_get(i915, domain);
+ return intel_display_power_get(i915, *domain);
+}
+
+static intel_wakeref_t
+tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
+{
+ return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
}
static void
-tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
+tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum intel_display_power_domain domain;
/*
* wakeref == -1, means some error happened saving save_depot_stack but
@@ -59,8 +96,7 @@ tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
if (wakeref == 0)
return;
- domain = tc_cold_get_power_domain(dig_port);
- intel_display_power_put_async(i915, domain, wakeref);
+ intel_display_power_put(i915, domain, wakeref);
}
static void
@@ -69,11 +105,9 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool enabled;
- if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
- return;
-
enabled = intel_display_power_is_enabled(i915,
- tc_cold_get_power_domain(dig_port));
+ tc_cold_get_power_domain(dig_port,
+ dig_port->tc_mode));
drm_WARN_ON(&i915->drm, !enabled);
}
@@ -244,6 +278,11 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
struct intel_uncore *uncore = &i915->uncore;
u32 val, mask = 0;
+ /*
+ * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
+ * registers in IOM. Note that this doesn't apply to PHY and FIA
+ * registers.
+ */
val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
mask |= BIT(TC_PORT_DP_ALT);
@@ -270,6 +309,14 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return icl_tc_port_live_status_mask(dig_port);
}
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
+ * is connected and it's ready to switch the ownership to display. The flag
+ * will be left cleared when a TBT-alt sink is connected, where the PHY is
+ * owned by the TBT subsystem and so switching the ownership to display is not
+ * required.
+ */
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -288,6 +335,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when it's ready to switch
+ * the ownership to display, regardless of what sink is connected (TBT-alt,
+ * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
+ * subsystem and so switching the ownership to display is not required.
+ */
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -339,11 +393,6 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
- if (!take && wait_for(!tc_phy_status_complete(dig_port), 10))
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY complete clear timed out\n",
- dig_port->tc_port_name);
-
return true;
}
@@ -429,6 +478,7 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u32 live_status_mask;
int max_lanes;
if (!tc_phy_status_complete(dig_port)) {
@@ -437,6 +487,13 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
goto out_set_tbt_alt_mode;
}
+ live_status_mask = tc_port_live_status_mask(dig_port);
+ if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
+ dig_port->tc_port_name, live_status_mask);
+ goto out_set_tbt_alt_mode;
+ }
+
if (!tc_phy_take_ownership(dig_port, true) &&
!drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
@@ -485,14 +542,13 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
{
switch (dig_port->tc_mode) {
case TC_PORT_LEGACY:
- /* Nothing to do, we never disconnect from legacy mode */
- break;
case TC_PORT_DP_ALT:
tc_phy_take_ownership(dig_port, false);
- dig_port->tc_mode = TC_PORT_TBT_ALT;
- break;
+ fallthrough;
case TC_PORT_TBT_ALT:
- /* Nothing to do, we stay in TBT-alt mode */
+ dig_port->tc_mode = TC_PORT_DISCONNECTED;
+ fallthrough;
+ case TC_PORT_DISCONNECTED:
break;
default:
MISSING_CASE(dig_port->tc_mode);
@@ -509,6 +565,10 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
+ /* On ADL-P the PHY complete flag is set in TBT mode as well. */
+ if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
+ return true;
+
if (!tc_phy_is_owned(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
dig_port->tc_port_name);
@@ -550,9 +610,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
if (live_status_mask)
return fls(live_status_mask) - 1;
- return tc_phy_status_complete(dig_port) &&
- dig_port->tc_legacy_port ? TC_PORT_LEGACY :
- TC_PORT_TBT_ALT;
+ return TC_PORT_TBT_ALT;
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
@@ -581,6 +639,43 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
tc_port_mode_name(dig_port->tc_mode));
}
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
+ int required_lanes, bool force_disconnect)
+{
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wref;
+ bool needs_reset = force_disconnect;
+
+ if (!needs_reset) {
+ /* Get power domain required to check the hotplug live status. */
+ wref = tc_cold_block(dig_port, &domain);
+ needs_reset = intel_tc_port_needs_reset(dig_port);
+ tc_cold_unblock(dig_port, domain, wref);
+ }
+
+ if (!needs_reset)
+ return;
+
+ /* Get power domain required for resetting the mode. */
+ wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
+
+ intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
+
+ /* Get power domain matching the new mode after reset. */
+ tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
+ fetch_and_zero(&dig_port->tc_lock_wakeref));
+ if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
+ &dig_port->tc_lock_power_domain);
+
+ tc_cold_unblock(dig_port, domain, wref);
+}
+
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
@@ -595,45 +690,42 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
- intel_wakeref_t tc_cold_wref;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
- tc_cold_wref = tc_cold_block(dig_port);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (dig_port->dp.is_mst)
active_links = intel_dp_mst_encoder_active_links(dig_port);
else if (encoder->base.crtc)
active_links = to_intel_crtc(encoder->base.crtc)->active;
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
if (active_links) {
+ enum intel_display_power_domain domain;
+ intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
dig_port->tc_port_name, active_links);
intel_tc_port_link_init_refcount(dig_port, active_links);
- goto out;
- }
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
+ &dig_port->tc_lock_power_domain);
- if (dig_port->tc_legacy_port)
- icl_tc_phy_connect(dig_port, 1);
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ }
-out:
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
- tc_cold_unblock(dig_port, tc_cold_wref);
mutex_unlock(&dig_port->tc_lock);
}
-static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
-{
- return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
-}
-
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
@@ -648,78 +740,79 @@ bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
- intel_wakeref_t tc_cold_wref;
intel_tc_port_lock(dig_port);
- tc_cold_wref = tc_cold_block(dig_port);
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
- tc_cold_unblock(dig_port, tc_cold_wref);
intel_tc_port_unlock(dig_port);
return is_connected;
}
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
- int required_lanes, bool force_disconnect)
+ int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- intel_wakeref_t wakeref;
-
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
mutex_lock(&dig_port->tc_lock);
- if (!dig_port->tc_link_refcount) {
- intel_wakeref_t tc_cold_wref;
-
- tc_cold_wref = tc_cold_block(dig_port);
+ cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
- if (force_disconnect || intel_tc_port_needs_reset(dig_port))
- intel_tc_port_reset_mode(dig_port, required_lanes,
- force_disconnect);
-
- tc_cold_unblock(dig_port, tc_cold_wref);
- }
+ if (!dig_port->tc_link_refcount)
+ intel_tc_port_update_mode(dig_port, required_lanes,
+ false);
- drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- dig_port->tc_lock_wakeref = wakeref;
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
+ !tc_phy_is_owned(dig_port));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
- __intel_tc_port_lock(dig_port, 1, false);
+ __intel_tc_port_lock(dig_port, 1);
}
-void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+/**
+ * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
+ * @dig_port: digital port
+ *
+ * Disconnect the given digital port from its TypeC PHY (handing back the
+ * control of the PHY to the TypeC subsystem). This will happen in a delayed
+ * manner after each aux transactions and modeset disables.
+ */
+static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
+ struct intel_digital_port *dig_port =
+ container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
- mutex_unlock(&dig_port->tc_lock);
+ mutex_lock(&dig_port->tc_lock);
+
+ if (!dig_port->tc_link_refcount)
+ intel_tc_port_update_mode(dig_port, 1, true);
- intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
- wakeref);
+ mutex_unlock(&dig_port->tc_lock);
}
/**
- * intel_tc_port_disconnect_phy: disconnect TypeC PHY from display port
+ * intel_tc_port_flush_work: flush the work disconnecting the PHY
* @dig_port: digital port
*
- * Disconnect the given digital port from its TypeC PHY (handing back the
- * control of the PHY to the TypeC subsystem). The only purpose of this
- * function is to force the disconnect even with a TypeC display output still
- * plugged to the TypeC connector, which is required by the TypeC firmwares
- * during system suspend and shutdown. Otherwise - during the unplug event
- * handling - the PHY ownership is released automatically by
- * intel_tc_port_reset_mode(), when calling this function is not required.
+ * Flush the delayed work disconnecting an idle PHY.
*/
-void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port)
+void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
{
- __intel_tc_port_lock(dig_port, 1, true);
- intel_tc_port_unlock(dig_port);
+ flush_delayed_work(&dig_port->tc_disconnect_phy_work);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+ if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
+ msecs_to_jiffies(1000));
+
+ mutex_unlock(&dig_port->tc_lock);
}
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
@@ -731,21 +824,30 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
- __intel_tc_port_lock(dig_port, required_lanes, false);
+ __intel_tc_port_lock(dig_port, required_lanes);
dig_port->tc_link_refcount++;
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
- mutex_lock(&dig_port->tc_lock);
- dig_port->tc_link_refcount--;
- mutex_unlock(&dig_port->tc_lock);
+ intel_tc_port_lock(dig_port);
+ --dig_port->tc_link_refcount;
+ intel_tc_port_unlock(dig_port);
+
+ /*
+ * Disconnecting the PHY after the PHY's PLL gets disabled may
+ * hang the system on ADL-P, so disconnect the PHY here synchronously.
+ * TODO: remove this once the root cause of the ordering requirement
+ * is found/fixed.
+ */
+ intel_tc_port_flush_work(dig_port);
}
static bool
tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
+ enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
u32 val;
@@ -753,9 +855,9 @@ tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig
return false;
mutex_lock(&dig_port->tc_lock);
- wakeref = tc_cold_block(dig_port);
+ wakeref = tc_cold_block(dig_port, &domain);
val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
- tc_cold_unblock(dig_port, wakeref);
+ tc_cold_unblock(dig_port, domain, wakeref);
mutex_unlock(&dig_port->tc_lock);
drm_WARN_ON(&i915->drm, val == 0xffffffff);
@@ -795,15 +897,9 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
"%c/TC#%d", port_name(port), tc_port + 1);
mutex_init(&dig_port->tc_lock);
+ INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
dig_port->tc_legacy_port = is_legacy;
+ dig_port->tc_mode = TC_PORT_DISCONNECTED;
dig_port->tc_link_refcount = 0;
tc_port_load_fia_params(i915, dig_port);
}
-
-bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
- IS_ALDERLAKE_P(i915);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 0c881f645e27..6b47b29f551c 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -12,8 +12,11 @@
struct intel_digital_port;
struct intel_encoder;
+bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
+
bool intel_tc_port_connected(struct intel_encoder *encoder);
-void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
@@ -24,6 +27,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_put_link(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d02f09f7e750..88a398df9621 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1529,7 +1529,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
intel_de_write(dev_priv, TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
/* Filter ctl must be set before TV_WIN_SIZE */
tv_filter_ctl = TV_AUTO_SCALE;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index df3286aa6999..2275f99ce9d7 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -357,11 +357,9 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
return false;
}
-static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
+static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (DISPLAY_VER(i915) >= 12)
return true;
@@ -547,9 +545,8 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
}
enum intel_display_power_domain
-intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -566,7 +563,7 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
*/
if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
- else if (is_pipe_dsc(crtc_state))
+ else if (is_pipe_dsc(crtc, cpu_transcoder))
return POWER_DOMAIN_PIPE(pipe);
else
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
@@ -577,6 +574,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
@@ -601,7 +599,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
pps_val);
/*
@@ -625,7 +623,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
pps_val);
/*
@@ -650,7 +648,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
pps_val);
/*
@@ -675,7 +673,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
pps_val);
/*
@@ -700,7 +698,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
pps_val);
/*
@@ -725,7 +723,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
pps_val);
/*
@@ -752,7 +750,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
pps_val);
/*
@@ -777,7 +775,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
pps_val);
/*
@@ -802,7 +800,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
pps_val);
/*
@@ -827,7 +825,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
pps_val);
/*
@@ -854,7 +852,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
pps_val);
/*
@@ -882,7 +880,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
pps_val);
/*
@@ -911,7 +909,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
@@ -968,7 +966,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
@@ -1095,18 +1093,16 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
sizeof(dp_dsc_pps_sdp));
}
-static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
+static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-
- return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+ ICL_PIPE_DSS_CTL1(crtc->pipe) : DSS_CTL1;
}
-static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
+static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-
- return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+ ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2;
}
static struct intel_crtc *
@@ -1142,7 +1138,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER;
- intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
}
}
@@ -1176,8 +1172,8 @@ void intel_dsc_enable(struct intel_encoder *encoder,
if (!crtc_state->bigjoiner_slave)
dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE;
}
- intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val);
- intel_de_write(dev_priv, dss_ctl2_reg(crtc_state), dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
@@ -1188,8 +1184,8 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
/* Disable only if either of them is enabled */
if (old_crtc_state->dsc.compression_enable ||
old_crtc_state->bigjoiner) {
- intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0);
- intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0);
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
}
}
@@ -1199,7 +1195,7 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state));
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder));
if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) {
crtc_state->bigjoiner = true;
crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
@@ -1214,9 +1210,10 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1225,14 +1222,14 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!intel_dsc_source_support(crtc_state))
return;
- power_domain = intel_dsc_power_domain(crtc_state);
+ power_domain = intel_dsc_power_domain(crtc, cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return;
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state));
- dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc_state));
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
+ dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
if (!crtc_state->dsc.compression_enable)
@@ -1256,7 +1253,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
/* FIXME: add more state readout as needed */
/* PPS1 */
- if (!is_pipe_dsc(crtc_state))
+ if (!is_pipe_dsc(crtc, cpu_transcoder))
val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
else
val = intel_de_read(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index dfb1fd38deb4..0c5d80a572da 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -8,8 +8,10 @@
#include <linux/types.h>
-struct intel_encoder;
+enum transcoder;
+struct intel_crtc;
struct intel_crtc_state;
+struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
@@ -21,7 +23,7 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
-intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 724e7b04f3b6..a0e53a3b267a 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -18,6 +18,7 @@
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
@@ -656,6 +657,7 @@ skl_disable_plane(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
@@ -993,6 +995,11 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
u32 offset = plane_state->view.color_plane[color_plane].offset;
if (intel_fb_uses_dpt(fb)) {
+ /*
+ * The DPT object contains only one vma, so the VMA's offset
+ * within the DPT is always 0.
+ */
+ WARN_ON(plane_state->dpt_vma->node.start);
WARN_ON(offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1001,6 +1008,33 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
+static void intel_load_plane_csc_black(struct intel_plane *intel_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane = intel_plane->id;
+ u16 postoff = 0;
+
+ drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n",
+ intel_plane->base.name, plane);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff);
+ intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff);
+ intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff);
+}
+
static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1024,7 +1058,7 @@ skl_program_plane(struct intel_plane *plane,
u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0, aux_dist = 0;
unsigned long irqflags;
- u32 keymsk, keymax;
+ u32 keymsk, keymax, plane_surf;
u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@@ -1096,8 +1130,7 @@ skl_program_plane(struct intel_plane *plane,
(plane_state->view.color_plane[1].y << 16) |
plane_state->view.color_plane[1].x);
- if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
/*
* Enable the scaler before the plane so that we don't
@@ -1113,8 +1146,23 @@ skl_program_plane(struct intel_plane *plane,
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr;
+ plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
+
+ /*
+ * FIXME: pxp session invalidation can hit any time even at time of commit
+ * or after the commit, display content will be garbage.
+ */
+ if (plane_state->decrypt) {
+ plane_surf |= PLANE_SURF_DECRYPT;
+ } else if (plane_state->force_black) {
+ intel_load_plane_csc_black(plane);
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+ }
+
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 0ee4ff341e25..07584695514b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -39,8 +40,8 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
-#include "intel_sideband.h"
#include "skl_scaler.h"
+#include "vlv_sideband.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -270,23 +271,19 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- if (fixed_mode) {
- intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
- if (HAS_GMCH(dev_priv))
- ret = intel_gmch_panel_fitting(pipe_config, conn_state);
- else
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
- if (ret)
- return ret;
- }
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -883,7 +880,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_port_enable(encoder, pipe_config);
}
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_backlight_enable(pipe_config, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
@@ -913,7 +910,7 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
drm_dbg_kms(&i915->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
/*
* According to the spec we should send SHUTDOWN before
@@ -1633,25 +1630,21 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 allowed_scalers;
- if (connector->panel.fixed_mode) {
- u32 allowed_scalers;
-
- allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
- if (!HAS_GMCH(dev_priv))
- allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
+ if (!HAS_GMCH(dev_priv))
+ allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
- drm_connector_attach_scaling_mode_property(&connector->base,
- allowed_scalers);
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
- connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- drm_connector_set_panel_orientation_with_quirk(
- &connector->base,
- intel_dsi_get_panel_orientation(connector),
- connector->panel.fixed_mode->hdisplay,
- connector->panel.fixed_mode->vdisplay);
- }
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
}
#define NS_KHZ_RATIO 1000000
@@ -1876,7 +1869,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
- intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->update_pipe = intel_backlight_update;
intel_encoder->shutdown = intel_dsi_shutdown;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1964,7 +1957,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
vlv_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 90185b219447..5413b52ab6ba 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -31,7 +31,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
@@ -568,3 +568,26 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
}
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
+
+static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
+{
+ bool cur_state;
+
+ vlv_cck_get(i915);
+ cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
+ vlv_cck_put(i915);
+
+ I915_STATE_WARN(cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_dsi_pll_enabled(struct drm_i915_private *i915)
+{
+ assert_dsi_pll(i915, true);
+}
+
+void assert_dsi_pll_disabled(struct drm_i915_private *i915)
+{
+ assert_dsi_pll(i915, false);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6234e17259c1..7358bebef15c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -4,6 +4,8 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/dma-fence-array.h>
+
#include "gt/intel_engine.h"
#include "i915_gem_ioctls.h"
@@ -36,7 +38,7 @@ static __always_inline u32 __busy_write_id(u16 id)
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
+__busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
@@ -46,29 +48,60 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
* to eventually flush us, but to minimise latency just ask the
* hardware.
*
- * Note we only report on the status of native fences.
+ * Note we only report on the status of native fences and we currently
+ * have two native fences:
+ *
+ * 1. A composite fence (dma_fence_array) constructed of i915 requests
+ * created during a parallel submission. In this case we deconstruct the
+ * composite fence into individual i915 requests and check the status of
+ * each request.
+ *
+ * 2. A single i915 request.
*/
- if (!dma_fence_is_i915(fence))
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence **child = array->fences;
+ unsigned int nchild = array->num_fences;
+
+ do {
+ struct dma_fence *current_fence = *child++;
+
+ /* Not an i915 fence, can't be busy per above */
+ if (!dma_fence_is_i915(current_fence) ||
+ !test_bit(I915_FENCE_FLAG_COMPOSITE,
+ &current_fence->flags)) {
+ return 0;
+ }
+
+ rq = to_request(current_fence);
+ if (!i915_request_completed(rq))
+ return flag(rq->engine->uabi_class);
+ } while (--nchild);
+
+ /* All requests in array complete, not busy */
return 0;
+ } else {
+ if (!dma_fence_is_i915(fence))
+ return 0;
- /* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, const struct i915_request, fence);
- if (i915_request_completed(rq))
- return 0;
+ rq = to_request(fence);
+ if (i915_request_completed(rq))
+ return 0;
- /* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
- return flag(rq->engine->uabi_class);
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
+ }
}
static __always_inline unsigned int
-busy_check_reader(const struct dma_fence *fence)
+busy_check_reader(struct dma_fence *fence)
{
return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
-busy_check_writer(const struct dma_fence *fence)
+busy_check_writer(struct dma_fence *fence)
{
if (!fence)
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 166bb46408a9..fb33d0322960 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -77,6 +77,8 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_gem_context.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -186,10 +188,13 @@ static int validate_priority(struct drm_i915_private *i915,
return 0;
}
-static void proto_context_close(struct i915_gem_proto_context *pc)
+static void proto_context_close(struct drm_i915_private *i915,
+ struct i915_gem_proto_context *pc)
{
int i;
+ if (pc->pxp_wakeref)
+ intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
if (pc->vm)
i915_vm_put(pc->vm);
if (pc->user_engines) {
@@ -241,6 +246,35 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
return 0;
}
+static int proto_context_set_protected(struct drm_i915_private *i915,
+ struct i915_gem_proto_context *pc,
+ bool protected)
+{
+ int ret = 0;
+
+ if (!protected) {
+ pc->uses_protected_content = false;
+ } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+ ret = -ENODEV;
+ } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
+ !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
+ ret = -EPERM;
+ } else {
+ pc->uses_protected_content = true;
+
+ /*
+ * protected context usage requires the PXP session to be up,
+ * which in turn requires the device to be active.
+ */
+ pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (!intel_pxp_is_active(&i915->gt.pxp))
+ ret = intel_pxp_start(&i915->gt.pxp);
+ }
+
+ return ret;
+}
+
static struct i915_gem_proto_context *
proto_context_create(struct drm_i915_private *i915, unsigned int flags)
{
@@ -269,7 +303,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
return pc;
proto_close:
- proto_context_close(pc);
+ proto_context_close(i915, pc);
return err;
}
@@ -442,6 +476,13 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
u16 idx, num_bonds;
int err, n;
+ if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
+ !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
+ drm_dbg(&i915->drm,
+ "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
+ return -ENODEV;
+ }
+
if (get_user(idx, &ext->virtual_index))
return -EFAULT;
@@ -515,9 +556,147 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
return 0;
}
+static int
+set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
+ void *data)
+{
+ struct i915_context_engines_parallel_submit __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_proto_ctx_engines *set = data;
+ struct drm_i915_private *i915 = set->i915;
+ u64 flags;
+ int err = 0, n, i, j;
+ u16 slot, width, num_siblings;
+ struct intel_engine_cs **siblings = NULL;
+ intel_engine_mask_t prev_mask;
+
+ /* FIXME: This is NIY for execlists */
+ if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+ return -ENODEV;
+
+ if (get_user(slot, &ext->engine_index))
+ return -EFAULT;
+
+ if (get_user(width, &ext->width))
+ return -EFAULT;
+
+ if (get_user(num_siblings, &ext->num_siblings))
+ return -EFAULT;
+
+ if (slot >= set->num_engines) {
+ drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+ slot, set->num_engines);
+ return -EINVAL;
+ }
+
+ if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
+ drm_dbg(&i915->drm,
+ "Invalid placement[%d], already occupied\n", slot);
+ return -EINVAL;
+ }
+
+ if (get_user(flags, &ext->flags))
+ return -EFAULT;
+
+ if (flags) {
+ drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
+ return -EINVAL;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
+ err = check_user_mbz(&ext->mbz64[n]);
+ if (err)
+ return err;
+ }
+
+ if (width < 2) {
+ drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
+ return -EINVAL;
+ }
+
+ if (num_siblings < 1) {
+ drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
+ num_siblings);
+ return -EINVAL;
+ }
+
+ siblings = kmalloc_array(num_siblings * width,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return -ENOMEM;
+
+ /* Create contexts / engines */
+ for (i = 0; i < width; ++i) {
+ intel_engine_mask_t current_mask = 0;
+ struct i915_engine_class_instance prev_engine;
+
+ for (j = 0; j < num_siblings; ++j) {
+ struct i915_engine_class_instance ci;
+
+ n = i * num_siblings + j;
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
+ err = -EFAULT;
+ goto out_err;
+ }
+
+ siblings[n] =
+ intel_engine_lookup_user(i915, ci.engine_class,
+ ci.engine_instance);
+ if (!siblings[n]) {
+ drm_dbg(&i915->drm,
+ "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (n) {
+ if (prev_engine.engine_class !=
+ ci.engine_class) {
+ drm_dbg(&i915->drm,
+ "Mismatched class %d, %d\n",
+ prev_engine.engine_class,
+ ci.engine_class);
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+
+ prev_engine = ci;
+ current_mask |= siblings[n]->logical_mask;
+ }
+
+ if (i > 0) {
+ if (current_mask != prev_mask << 1) {
+ drm_dbg(&i915->drm,
+ "Non contiguous logical mask 0x%x, 0x%x\n",
+ prev_mask, current_mask);
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+ prev_mask = current_mask;
+ }
+
+ set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
+ set->engines[slot].num_siblings = num_siblings;
+ set->engines[slot].width = width;
+ set->engines[slot].siblings = siblings;
+
+ return 0;
+
+out_err:
+ kfree(siblings);
+
+ return err;
+}
+
static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
+ [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
+ set_proto_ctx_engines_parallel_submit,
};
static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
@@ -686,6 +865,8 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
ret = -EPERM;
else if (args->value)
pc->user_flags |= BIT(UCONTEXT_BANNABLE);
+ else if (pc->uses_protected_content)
+ ret = -EPERM;
else
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
break;
@@ -693,10 +874,12 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
- else if (args->value)
- pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
- else
+ else if (!args->value)
pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
+ else if (pc->uses_protected_content)
+ ret = -EPERM;
+ else
+ pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
break;
case I915_CONTEXT_PARAM_PRIORITY:
@@ -724,6 +907,11 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
args->value);
break;
+ case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ ret = proto_context_set_protected(fpriv->dev_priv, pc,
+ args->value);
+ break;
+
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_RINGSIZE:
@@ -735,44 +923,6 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
return ret;
}
-static struct i915_address_space *
-context_get_vm_rcu(struct i915_gem_context *ctx)
-{
- GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
-
- do {
- struct i915_address_space *vm;
-
- /*
- * We do not allow downgrading from full-ppgtt [to a shared
- * global gtt], so ctx->vm cannot become NULL.
- */
- vm = rcu_dereference(ctx->vm);
- if (!kref_get_unless_zero(&vm->ref))
- continue;
-
- /*
- * This ppgtt may have be reallocated between
- * the read and the kref, and reassigned to a third
- * context. In order to avoid inadvertent sharing
- * of this ppgtt with that third context (and not
- * src), we have to confirm that we have the same
- * ppgtt after passing through the strong memory
- * barrier implied by a successful
- * kref_get_unless_zero().
- *
- * Once we have acquired the current ppgtt of ctx,
- * we no longer care if it is released from ctx, as
- * it cannot be reallocated elsewhere.
- */
-
- if (vm == rcu_access_pointer(ctx->vm))
- return rcu_pointer_handoff(vm);
-
- i915_vm_put(vm);
- } while (1);
-}
-
static int intel_context_set_gem(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_sseu sseu)
@@ -782,25 +932,18 @@ static int intel_context_set_gem(struct intel_context *ce,
GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
RCU_INIT_POINTER(ce->gem_context, ctx);
+ GEM_BUG_ON(intel_context_is_pinned(ce));
ce->ring_size = SZ_16K;
- if (rcu_access_pointer(ctx->vm)) {
- struct i915_address_space *vm;
-
- rcu_read_lock();
- vm = context_get_vm_rcu(ctx); /* hmm */
- rcu_read_unlock();
-
- i915_vm_put(ce->vm);
- ce->vm = vm;
- }
+ i915_vm_put(ce->vm);
+ ce->vm = i915_gem_context_get_eb_vm(ctx);
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
intel_engine_has_timeslices(ce->engine) &&
intel_engine_has_semaphores(ce->engine))
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
- if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
+ if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
ctx->i915->params.request_timeout_ms) {
unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
@@ -814,6 +957,25 @@ static int intel_context_set_gem(struct intel_context *ce,
return ret;
}
+static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
+{
+ while (count--) {
+ struct intel_context *ce = e->engines[count], *child;
+
+ if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
+ continue;
+
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+ }
+}
+
+static void unpin_engines(struct i915_gem_engines *e)
+{
+ __unpin_engines(e, e->num_engines);
+}
+
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
@@ -929,6 +1091,40 @@ free_engines:
return err;
}
+static int perma_pin_contexts(struct intel_context *ce)
+{
+ struct intel_context *child;
+ int i = 0, j = 0, ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ ret = intel_context_pin(ce);
+ if (unlikely(ret))
+ return ret;
+
+ for_each_child(ce, child) {
+ ret = intel_context_pin(child);
+ if (unlikely(ret))
+ goto unwind;
+ ++i;
+ }
+
+ set_bit(CONTEXT_PERMA_PIN, &ce->flags);
+
+ return 0;
+
+unwind:
+ intel_context_unpin(ce);
+ for_each_child(ce, child) {
+ if (j++ < i)
+ intel_context_unpin(child);
+ else
+ break;
+ }
+
+ return ret;
+}
+
static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
unsigned int num_engines,
struct i915_gem_proto_engine *pe)
@@ -942,7 +1138,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
e->num_engines = num_engines;
for (n = 0; n < num_engines; n++) {
- struct intel_context *ce;
+ struct intel_context *ce, *child;
int ret;
switch (pe[n].type) {
@@ -952,7 +1148,13 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
case I915_GEM_ENGINE_TYPE_BALANCED:
ce = intel_engine_create_virtual(pe[n].siblings,
- pe[n].num_siblings);
+ pe[n].num_siblings, 0);
+ break;
+
+ case I915_GEM_ENGINE_TYPE_PARALLEL:
+ ce = intel_engine_create_parallel(pe[n].siblings,
+ pe[n].num_siblings,
+ pe[n].width);
break;
case I915_GEM_ENGINE_TYPE_INVALID:
@@ -973,6 +1175,30 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
err = ERR_PTR(ret);
goto free_engines;
}
+ for_each_child(ce, child) {
+ ret = intel_context_set_gem(child, ctx, pe->sseu);
+ if (ret) {
+ err = ERR_PTR(ret);
+ goto free_engines;
+ }
+ }
+
+ /*
+ * XXX: Must be done after calling intel_context_set_gem as that
+ * function changes the ring size. The ring is allocated when
+ * the context is pinned. If the ring size is changed after
+ * allocation we have a mismatch of the ring size and will cause
+ * the context to hang. Presumably with a bit of reordering we
+ * could move the perma-pin step to the backend function
+ * intel_engine_create_parallel.
+ */
+ if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
+ ret = perma_pin_contexts(ce);
+ if (ret) {
+ err = ERR_PTR(ret);
+ goto free_engines;
+ }
+ }
}
return e;
@@ -982,9 +1208,11 @@ free_engines:
return err;
}
-void i915_gem_context_release(struct kref *ref)
+static void i915_gem_context_release_work(struct work_struct *work)
{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+ struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
+ release_work);
+ struct i915_address_space *vm;
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
@@ -992,6 +1220,13 @@ void i915_gem_context_release(struct kref *ref)
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
+ vm = ctx->vm;
+ if (vm)
+ i915_vm_put(vm);
+
+ if (ctx->pxp_wakeref)
+ intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1001,6 +1236,13 @@ void i915_gem_context_release(struct kref *ref)
kfree_rcu(ctx, rcu);
}
+void i915_gem_context_release(struct kref *ref)
+{
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+
+ queue_work(ctx->i915->wq, &ctx->release_work);
+}
+
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@@ -1199,6 +1441,7 @@ static void context_close(struct i915_gem_context *ctx)
/* Flush any concurrent set_engines() */
mutex_lock(&ctx->engines_mutex);
+ unpin_engines(__context_engines_static(ctx));
engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
mutex_unlock(&ctx->engines_mutex);
@@ -1207,9 +1450,16 @@ static void context_close(struct i915_gem_context *ctx)
set_closed_name(ctx);
- vm = i915_gem_context_vm(ctx);
- if (vm)
+ vm = ctx->vm;
+ if (vm) {
+ /* i915_vm_close drops the final reference, which is a bit too
+ * early and could result in surprises with concurrent
+ * operations racing with thist ctx close. Keep a full reference
+ * until the end.
+ */
+ i915_vm_get(vm);
i915_vm_close(vm);
+ }
ctx->file_priv = ERR_PTR(-EBADF);
@@ -1280,49 +1530,6 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
return 0;
}
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx,
- bool *user_engines)
-{
- struct i915_gem_engines *engines;
-
- rcu_read_lock();
- do {
- engines = rcu_dereference(ctx->engines);
- GEM_BUG_ON(!engines);
-
- if (user_engines)
- *user_engines = i915_gem_context_user_engines(ctx);
-
- /* successful await => strong mb */
- if (unlikely(!i915_sw_fence_await(&engines->fence)))
- continue;
-
- if (likely(engines == rcu_access_pointer(ctx->engines)))
- break;
-
- i915_sw_fence_complete(&engines->fence);
- } while (1);
- rcu_read_unlock();
-
- return engines;
-}
-
-static void
-context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
- void *data)
-{
- struct i915_gem_engines_iter it;
- struct i915_gem_engines *e;
- struct intel_context *ce;
-
- e = __context_engines_await(ctx, NULL);
- for_each_gem_engine(ce, e, it)
- fn(ce, data);
- i915_sw_fence_complete(&e->fence);
-}
-
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *i915,
const struct i915_gem_proto_context *pc)
@@ -1342,6 +1549,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
ctx->sched = pc->sched;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);
+ INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@@ -1351,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(&i915->gt);
+ ppgtt = i915_ppgtt_create(&i915->gt, 0);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -1361,7 +1569,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
vm = &ppgtt->vm;
}
if (vm) {
- RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm));
+ ctx->vm = i915_vm_open(vm);
/* i915_vm_open() takes a reference */
i915_vm_put(vm);
@@ -1402,6 +1610,11 @@ i915_gem_create_context(struct drm_i915_private *i915,
goto err_engines;
}
+ if (pc->uses_protected_content) {
+ ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ ctx->uses_protected_content = true;
+ }
+
trace_i915_context_create(ctx);
return ctx;
@@ -1473,7 +1686,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
}
ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
+ proto_context_close(i915, pc);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@@ -1500,7 +1713,7 @@ void i915_gem_context_close(struct drm_file *file)
unsigned long idx;
xa_for_each(&file_priv->proto_context_xa, idx, pc)
- proto_context_close(pc);
+ proto_context_close(file_priv->dev_priv, pc);
xa_destroy(&file_priv->proto_context_xa);
mutex_destroy(&file_priv->proto_context_lock);
@@ -1529,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(&i915->gt);
+ ppgtt = i915_ppgtt_create(&i915->gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1584,18 +1797,15 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
int err;
u32 id;
- if (!rcu_access_pointer(ctx->vm))
+ if (!i915_gem_context_has_full_ppgtt(ctx))
return -ENODEV;
- rcu_read_lock();
- vm = context_get_vm_rcu(ctx);
- rcu_read_unlock();
- if (!vm)
- return -ENODEV;
+ vm = ctx->vm;
+ GEM_BUG_ON(!vm);
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
if (err)
- goto err_put;
+ return err;
i915_vm_open(vm);
@@ -1603,8 +1813,6 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
args->value = id;
args->size = 0;
-err_put:
- i915_vm_put(vm);
return err;
}
@@ -1772,23 +1980,11 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
-{
- struct i915_gem_context *ctx = arg;
-
- if (!intel_engine_has_timeslices(ce->engine))
- return;
-
- if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
- intel_engine_has_semaphores(ce->engine))
- intel_context_set_use_semaphores(ce);
- else
- intel_context_clear_use_semaphores(ce);
-}
-
static int set_priority(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
int err;
err = validate_priority(ctx->i915, args);
@@ -1796,7 +1992,27 @@ static int set_priority(struct i915_gem_context *ctx,
return err;
ctx->sched.priority = args->value;
- context_apply_all(ctx, __apply_priority, ctx);
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_has_timeslices(ce->engine))
+ continue;
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+ intel_engine_has_semaphores(ce->engine))
+ intel_context_set_use_semaphores(ce);
+ else
+ intel_context_clear_use_semaphores(ce);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ return 0;
+}
+
+static int get_protected(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ args->size = 0;
+ args->value = i915_gem_context_uses_protected_content(ctx);
return 0;
}
@@ -1824,6 +2040,8 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = -EPERM;
else if (args->value)
i915_gem_context_set_bannable(ctx);
+ else if (i915_gem_context_uses_protected_content(ctx))
+ ret = -EPERM; /* can't clear this for protected contexts */
else
i915_gem_context_clear_bannable(ctx);
break;
@@ -1831,10 +2049,12 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
- else if (args->value)
- i915_gem_context_set_recoverable(ctx);
- else
+ else if (!args->value)
i915_gem_context_clear_recoverable(ctx);
+ else if (i915_gem_context_uses_protected_content(ctx))
+ ret = -EPERM; /* can't set this for protected contexts */
+ else
+ i915_gem_context_set_recoverable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
@@ -1849,6 +2069,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_RINGSIZE:
@@ -1927,7 +2148,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
old = xa_erase(&file_priv->proto_context_xa, id);
GEM_BUG_ON(old != pc);
- proto_context_close(pc);
+ proto_context_close(file_priv->dev_priv, pc);
/* One for the xarray and one for the caller */
return i915_gem_context_get(ctx);
@@ -2013,7 +2234,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_pc;
}
- proto_context_close(ext_data.pc);
+ proto_context_close(i915, ext_data.pc);
gem_context_register(ctx, ext_data.fpriv, id);
} else {
ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
@@ -2027,7 +2248,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return 0;
err_pc:
- proto_context_close(ext_data.pc);
+ proto_context_close(i915, ext_data.pc);
return ret;
}
@@ -2058,7 +2279,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
GEM_WARN_ON(ctx && pc);
if (pc)
- proto_context_close(pc);
+ proto_context_close(file_priv->dev_priv, pc);
if (ctx)
context_close(ctx);
@@ -2127,6 +2348,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx;
+ struct i915_address_space *vm;
int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
@@ -2136,12 +2358,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
switch (args->param) {
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- rcu_read_lock();
- if (rcu_access_pointer(ctx->vm))
- args->value = rcu_dereference(ctx->vm)->total;
- else
- args->value = to_i915(dev)->ggtt.vm.total;
- rcu_read_unlock();
+ vm = i915_gem_context_get_eb_vm(ctx);
+ args->value = vm->total;
+ i915_vm_put(vm);
+
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@@ -2177,6 +2397,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ ret = get_protected(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_ENGINES:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18060536b0c2..babfecb17ad1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -108,6 +108,12 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
+static inline bool
+i915_gem_context_uses_protected_content(const struct i915_gem_context *ctx)
+{
+ return ctx->uses_protected_content;
+}
+
/* i915_gem_context.c */
void i915_gem_init__contexts(struct drm_i915_private *i915);
@@ -154,17 +160,22 @@ i915_gem_context_vm(struct i915_gem_context *ctx)
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
}
+static inline bool i915_gem_context_has_full_ppgtt(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(!!ctx->vm != HAS_FULL_PPGTT(ctx->i915));
+
+ return !!ctx->vm;
+}
+
static inline struct i915_address_space *
-i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+i915_gem_context_get_eb_vm(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
- rcu_read_lock();
- vm = rcu_dereference(ctx->vm);
+ vm = ctx->vm;
if (!vm)
vm = &ctx->i915->ggtt.vm;
vm = i915_vm_get(vm);
- rcu_read_unlock();
return vm;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 94c03a97cb77..282cdb8a5c5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -78,13 +78,16 @@ enum i915_gem_engine_type {
/** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */
I915_GEM_ENGINE_TYPE_BALANCED,
+
+ /** @I915_GEM_ENGINE_TYPE_PARALLEL: A parallel engine set */
+ I915_GEM_ENGINE_TYPE_PARALLEL,
};
/**
* struct i915_gem_proto_engine - prototype engine
*
* This struct describes an engine that a context may contain. Engines
- * have three types:
+ * have four types:
*
* - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they
* show up as a NULL in i915_gem_engines::engines[i] and any attempt to
@@ -97,6 +100,10 @@ enum i915_gem_engine_type {
*
* - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described
* i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings.
+ *
+ * - I915_GEM_ENGINE_TYPE_PARALLEL: A parallel submission engine set, described
+ * i915_gem_proto_engine::width, i915_gem_proto_engine::num_siblings, and
+ * i915_gem_proto_engine::siblings.
*/
struct i915_gem_proto_engine {
/** @type: Type of this engine */
@@ -105,10 +112,13 @@ struct i915_gem_proto_engine {
/** @engine: Engine, for physical */
struct intel_engine_cs *engine;
- /** @num_siblings: Number of balanced siblings */
+ /** @num_siblings: Number of balanced or parallel siblings */
unsigned int num_siblings;
- /** @siblings: Balanced siblings */
+ /** @width: Width of each sibling */
+ unsigned int width;
+
+ /** @siblings: Balanced siblings or num_siblings * width for parallel */
struct intel_engine_cs **siblings;
/** @sseu: Client-set SSEU parameters */
@@ -198,6 +208,12 @@ struct i915_gem_proto_context {
/** @single_timeline: See See &i915_gem_context.syncobj */
bool single_timeline;
+
+ /** @uses_protected_content: See &i915_gem_context.uses_protected_content */
+ bool uses_protected_content;
+
+ /** @pxp_wakeref: See &i915_gem_context.pxp_wakeref */
+ intel_wakeref_t pxp_wakeref;
};
/**
@@ -262,7 +278,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_address_space __rcu *vm;
+ struct i915_address_space *vm;
/**
* @pid: process id of creator
@@ -289,6 +305,18 @@ struct i915_gem_context {
struct kref ref;
/**
+ * @release_work:
+ *
+ * Work item for deferred cleanup, since i915_gem_context_put() tends to
+ * be called from hardirq context.
+ *
+ * FIXME: The only real reason for this is &i915_gem_engines.fence, all
+ * other callers are from process context and need at most some mild
+ * shuffling to pull the i915_gem_context_put() call out of a spinlock.
+ */
+ struct work_struct release_work;
+
+ /**
* @rcu: rcu_head for deferred freeing.
*/
struct rcu_head rcu;
@@ -309,6 +337,28 @@ struct i915_gem_context {
#define CONTEXT_CLOSED 0
#define CONTEXT_USER_ENGINES 1
+ /**
+ * @uses_protected_content: context uses PXP-encrypted objects.
+ *
+ * This flag can only be set at ctx creation time and it's immutable for
+ * the lifetime of the context. See I915_CONTEXT_PARAM_PROTECTED_CONTENT
+ * in uapi/drm/i915_drm.h for more info on setting restrictions and
+ * expected behaviour of marked contexts.
+ */
+ bool uses_protected_content;
+
+ /**
+ * @pxp_wakeref: wakeref to keep the device awake when PXP is in use
+ *
+ * PXP sessions are invalidated when the device is suspended, which in
+ * turns invalidates all contexts and objects using it. To keep the
+ * flow simple, we keep the device awake when contexts using PXP objects
+ * are in use. It is expected that the userspace application only uses
+ * PXP when the display is on, so taking a wakeref here shouldn't worsen
+ * our power metrics.
+ */
+ intel_wakeref_t pxp_wakeref;
+
/** @mutex: guards everything that isn't engines or handles_vma */
struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 23fee13a3384..8955d6abcef1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -82,21 +83,11 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj,
return 0;
}
-/**
- * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
- * @i915: i915 private
- * @size: size of the buffer, in bytes
- * @placements: possible placement regions, in priority order
- * @n_placements: number of possible placement regions
- *
- * This function is exposed primarily for selftests and does very little
- * error checking. It is assumed that the set of placement regions has
- * already been verified to be valid.
- */
-struct drm_i915_gem_object *
-__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
- struct intel_memory_region **placements,
- unsigned int n_placements)
+static struct drm_i915_gem_object *
+__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
+ struct intel_memory_region **placements,
+ unsigned int n_placements,
+ unsigned int ext_flags)
{
struct intel_memory_region *mr = placements[0];
struct drm_i915_gem_object *obj;
@@ -135,6 +126,9 @@ __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
GEM_BUG_ON(size != obj->base.size);
+ /* Add any flag set by create_ext options */
+ obj->flags |= ext_flags;
+
trace_i915_gem_object_create(obj);
return obj;
@@ -145,6 +139,26 @@ object_free:
return ERR_PTR(ret);
}
+/**
+ * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
+ * @i915: i915 private
+ * @size: size of the buffer, in bytes
+ * @placements: possible placement regions, in priority order
+ * @n_placements: number of possible placement regions
+ *
+ * This function is exposed primarily for selftests and does very little
+ * error checking. It is assumed that the set of placement regions has
+ * already been verified to be valid.
+ */
+struct drm_i915_gem_object *
+__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+{
+ return __i915_gem_object_create_user_ext(i915, size, placements,
+ n_placements, 0);
+}
+
int
i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
@@ -224,6 +238,7 @@ struct create_ext {
struct drm_i915_private *i915;
struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
unsigned int n_placements;
+ unsigned long flags;
};
static void repr_placements(char *buf, size_t size,
@@ -347,17 +362,34 @@ static int ext_set_placements(struct i915_user_extension __user *base,
{
struct drm_i915_gem_create_ext_memory_regions ext;
- if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
- return -ENODEV;
-
if (copy_from_user(&ext, base, sizeof(ext)))
return -EFAULT;
return set_placements(&ext, data);
}
+static int ext_set_protected(struct i915_user_extension __user *base, void *data)
+{
+ struct drm_i915_gem_create_ext_protected_content ext;
+ struct create_ext *ext_data = data;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+ return -EFAULT;
+
+ if (ext.flags)
+ return -EINVAL;
+
+ if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
+ return -ENODEV;
+
+ ext_data->flags |= I915_BO_PROTECTED;
+
+ return 0;
+}
+
static const i915_user_extension_fn create_extensions[] = {
[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
+ [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
};
/**
@@ -392,9 +424,10 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
ext_data.n_placements = 1;
}
- obj = __i915_gem_object_create_user(i915, args->size,
- ext_data.placements,
- ext_data.n_placements);
+ obj = __i915_gem_object_create_user_ext(i915, args->size,
+ ext_data.placements,
+ ext_data.n_placements,
+ ext_data.flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index afa34111de02..eae09d323049 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -7,11 +7,14 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/dma-resv.h>
+#include <linux/module.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
I915_SELFTEST_DECLARE(static bool force_different_devices;)
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
@@ -232,6 +235,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
unsigned int sg_page_sizes;
@@ -242,8 +246,11 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
+ /* XXX: consider doing a vmap flush or something */
+ if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj))
+ wbinvd_on_all_cpus();
+ sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
@@ -301,7 +308,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
+ I915_BO_ALLOC_USER);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1aa249908b64..4d7da07442f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -21,6 +21,8 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
@@ -244,17 +246,25 @@ struct i915_execbuffer {
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
struct eb_vma *vma;
- struct intel_engine_cs *engine; /** engine to queue the request to */
+ struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
- struct i915_request *request; /** our request to build */
- struct eb_vma *batch; /** identity of the batch obj/vma */
+ /** our requests to build */
+ struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
+ /** identity of the batch obj/vma */
+ struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1];
struct i915_vma *trampoline; /** trampoline used for chaining */
+ /** used for excl fence in dma_resv objects when > 1 BB submitted */
+ struct dma_fence *composite_fence;
+
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
+ /* number of batches in execbuf IOCTL */
+ unsigned int num_batches;
+
/** list of vma not yet bound during reservation phase */
struct list_head unbound;
@@ -281,7 +291,8 @@ struct i915_execbuffer {
u64 invalid_flags; /** Set of execobj.flags that are invalid */
- u64 batch_len; /** Length of batch within object */
+ /** Length of batch within object */
+ u64 batch_len[MAX_ENGINE_INSTANCE + 1];
u32 batch_start_offset; /** Location within object of batch */
u32 batch_flags; /** Flags composed for emit_bb_start() */
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
@@ -299,14 +310,13 @@ struct i915_execbuffer {
};
static int eb_parse(struct i915_execbuffer *eb);
-static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb,
- bool throttle);
+static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
static void eb_unpin_engine(struct i915_execbuffer *eb);
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return intel_engine_requires_cmd_parser(eb->engine) ||
- (intel_engine_using_cmd_parser(eb->engine) &&
+ return intel_engine_requires_cmd_parser(eb->context->engine) ||
+ (intel_engine_using_cmd_parser(eb->context->engine) &&
eb->args->batch_len);
}
@@ -533,11 +543,21 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static void
+static inline bool
+is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx)
+{
+ return eb->args->flags & I915_EXEC_BATCH_FIRST ?
+ buffer_idx < eb->num_batches :
+ buffer_idx >= eb->args->buffer_count - eb->num_batches;
+}
+
+static int
eb_add_vma(struct i915_execbuffer *eb,
- unsigned int i, unsigned batch_idx,
+ unsigned int *current_batch,
+ unsigned int i,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = eb->i915;
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
@@ -564,15 +584,43 @@ eb_add_vma(struct i915_execbuffer *eb,
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
- if (i == batch_idx) {
+ if (is_batch_buffer(eb, i)) {
if (entry->relocation_count &&
!(ev->flags & EXEC_OBJECT_PINNED))
ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = ev;
+ eb->batches[*current_batch] = ev;
+
+ if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) {
+ drm_dbg(&i915->drm,
+ "Attempting to use self-modifying batch buffer\n");
+ return -EINVAL;
+ }
+
+ if (range_overflows_t(u64,
+ eb->batch_start_offset,
+ eb->args->batch_len,
+ ev->vma->size)) {
+ drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
+ return -EINVAL;
+ }
+
+ if (eb->args->batch_len == 0)
+ eb->batch_len[*current_batch] = ev->vma->size -
+ eb->batch_start_offset;
+ else
+ eb->batch_len[*current_batch] = eb->args->batch_len;
+ if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */
+ drm_dbg(&i915->drm, "Invalid batch length\n");
+ return -EINVAL;
+ }
+
+ ++*current_batch;
}
+
+ return 0;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -716,14 +764,6 @@ static int eb_reserve(struct i915_execbuffer *eb)
} while (1);
}
-static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
-{
- if (eb->args->flags & I915_EXEC_BATCH_FIRST)
- return 0;
- else
- return eb->buffer_count - 1;
-}
-
static int eb_select_context(struct i915_execbuffer *eb)
{
struct i915_gem_context *ctx;
@@ -733,7 +773,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return PTR_ERR(ctx);
eb->gem_context = ctx;
- if (rcu_access_pointer(ctx->vm))
+ if (i915_gem_context_has_full_ppgtt(ctx))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
return 0;
@@ -759,11 +799,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
/* Check that the context hasn't been closed in the meantime */
err = -EINTR;
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
- struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
-
- if (unlikely(vm && vma->vm != vm))
- err = -EAGAIN; /* user racing with ctx set-vm */
- else if (likely(!i915_gem_context_is_closed(ctx)))
+ if (likely(!i915_gem_context_is_closed(ctx)))
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
else
err = -ENOENT;
@@ -814,6 +850,22 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
+ /*
+ * If the user has opted-in for protected-object tracking, make
+ * sure the object encryption can be used.
+ * We only need to do this when the object is first used with
+ * this context, because the context itself will be banned when
+ * the protected objects become invalid.
+ */
+ if (i915_gem_context_uses_protected_content(eb->gem_context) &&
+ i915_gem_object_is_protected(obj)) {
+ err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+ }
+
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
@@ -832,9 +884,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
- struct drm_i915_private *i915 = eb->i915;
- unsigned int batch = eb_batch_index(eb);
- unsigned int i;
+ unsigned int i, current_batch = 0;
int err = 0;
INIT_LIST_HEAD(&eb->relocs);
@@ -854,7 +904,9 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err;
}
- eb_add_vma(eb, i, batch, vma);
+ err = eb_add_vma(eb, &current_batch, i, vma);
+ if (err)
+ return err;
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
@@ -877,26 +929,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
}
- if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
- drm_dbg(&i915->drm,
- "Attempting to use self-modifying batch buffer\n");
- return -EINVAL;
- }
-
- if (range_overflows_t(u64,
- eb->batch_start_offset, eb->batch_len,
- eb->batch->vma->size)) {
- drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
- return -EINVAL;
- }
-
- if (eb->batch_len == 0)
- eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
- if (unlikely(eb->batch_len == 0)) { /* impossible! */
- drm_dbg(&i915->drm, "Invalid batch length\n");
- return -EINVAL;
- }
-
return 0;
err:
@@ -1629,8 +1661,7 @@ static int eb_reinit_userptr(struct i915_execbuffer *eb)
return 0;
}
-static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
- struct i915_request *rq)
+static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
{
bool have_copy = false;
struct eb_vma *ev;
@@ -1646,21 +1677,6 @@ repeat:
eb_release_vmas(eb, false);
i915_gem_ww_ctx_fini(&eb->ww);
- if (rq) {
- /* nonblocking is always false */
- if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- rq = NULL;
-
- err = -EINTR;
- goto err_relock;
- }
-
- i915_request_put(rq);
- rq = NULL;
- }
-
/*
* We take 3 passes through the slowpatch.
*
@@ -1687,28 +1703,21 @@ repeat:
if (!err)
err = eb_reinit_userptr(eb);
-err_relock:
i915_gem_ww_ctx_init(&eb->ww, true);
if (err)
goto out;
/* reacquire the objects */
repeat_validate:
- rq = eb_pin_engine(eb, false);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- rq = NULL;
+ err = eb_pin_engine(eb, false);
+ if (err)
goto err;
- }
-
- /* We didn't throttle, should be NULL */
- GEM_WARN_ON(rq);
err = eb_validate_vmas(eb);
if (err)
goto err;
- GEM_BUG_ON(!eb->batch);
+ GEM_BUG_ON(!eb->batches[0]);
list_for_each_entry(ev, &eb->relocs, reloc_link) {
if (!have_copy) {
@@ -1772,46 +1781,23 @@ out:
}
}
- if (rq)
- i915_request_put(rq);
-
return err;
}
static int eb_relocate_parse(struct i915_execbuffer *eb)
{
int err;
- struct i915_request *rq = NULL;
bool throttle = true;
retry:
- rq = eb_pin_engine(eb, throttle);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- rq = NULL;
+ err = eb_pin_engine(eb, throttle);
+ if (err) {
if (err != -EDEADLK)
return err;
goto err;
}
- if (rq) {
- bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
-
- /* Need to drop all locks now for throttling, take slowpath */
- err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0);
- if (err == -ETIME) {
- if (nonblock) {
- err = -EWOULDBLOCK;
- i915_request_put(rq);
- goto err;
- }
- goto slow;
- }
- i915_request_put(rq);
- rq = NULL;
- }
-
/* only throttle once, even if we didn't need to throttle */
throttle = false;
@@ -1851,7 +1837,7 @@ err:
return err;
slow:
- err = eb_relocate_parse_slow(eb, rq);
+ err = eb_relocate_parse_slow(eb);
if (err)
/*
* If the user expects the execobject.offset and
@@ -1865,11 +1851,40 @@ slow:
return err;
}
+/*
+ * Using two helper loops for the order of which requests / batches are created
+ * and added the to backend. Requests are created in order from the parent to
+ * the last child. Requests are added in the reverse order, from the last child
+ * to parent. This is done for locking reasons as the timeline lock is acquired
+ * during request creation and released when the request is added to the
+ * backend. To make lockdep happy (see intel_context_timeline_lock) this must be
+ * the ordering.
+ */
+#define for_each_batch_create_order(_eb, _i) \
+ for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i))
+#define for_each_batch_add_order(_eb, _i) \
+ BUILD_BUG_ON(!typecheck(int, _i)); \
+ for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i))
+
+static struct i915_request *
+eb_find_first_request_added(struct i915_execbuffer *eb)
+{
+ int i;
+
+ for_each_batch_add_order(eb, i)
+ if (eb->requests[i])
+ return eb->requests[i];
+
+ GEM_BUG_ON("Request not found");
+
+ return NULL;
+}
+
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i = count;
- int err = 0;
+ int err = 0, j;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
@@ -1882,11 +1897,17 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (flags & EXEC_OBJECT_CAPTURE) {
struct i915_capture_list *capture;
- capture = kmalloc(sizeof(*capture), GFP_KERNEL);
- if (capture) {
- capture->next = eb->request->capture_list;
- capture->vma = vma;
- eb->request->capture_list = capture;
+ for_each_batch_create_order(eb, j) {
+ if (!eb->requests[j])
+ break;
+
+ capture = kmalloc(sizeof(*capture), GFP_KERNEL);
+ if (capture) {
+ capture->next =
+ eb->requests[j]->capture_list;
+ capture->vma = vma;
+ eb->requests[j]->capture_list = capture;
+ }
}
}
@@ -1901,20 +1922,43 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
* !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
* but gcc's optimiser doesn't handle that as well and emits
* two jumps instead of one. Maybe one day...
+ *
+ * FIXME: There is also sync flushing in set_pages(), which
+ * serves a different purpose(some of the time at least).
+ *
+ * We should consider:
+ *
+ * 1. Rip out the async flush code.
+ *
+ * 2. Or make the sync flushing use the async clflush path
+ * using mandatory fences underneath. Currently the below
+ * async flush happens after we bind the object.
*/
if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
if (i915_gem_clflush_object(obj, 0))
flags &= ~EXEC_OBJECT_ASYNC;
}
+ /* We only need to await on the first request */
if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
err = i915_request_await_object
- (eb->request, obj, flags & EXEC_OBJECT_WRITE);
+ (eb_find_first_request_added(eb), obj,
+ flags & EXEC_OBJECT_WRITE);
}
- if (err == 0)
- err = i915_vma_move_to_active(vma, eb->request,
- flags | __EXEC_OBJECT_NO_RESERVE);
+ for_each_batch_add_order(eb, j) {
+ if (err)
+ break;
+ if (!eb->requests[j])
+ continue;
+
+ err = _i915_vma_move_to_active(vma, eb->requests[j],
+ j ? NULL :
+ eb->composite_fence ?
+ eb->composite_fence :
+ &eb->requests[j]->fence,
+ flags | __EXEC_OBJECT_NO_RESERVE);
+ }
}
#ifdef CONFIG_MMU_NOTIFIER
@@ -1945,11 +1989,16 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
goto err_skip;
/* Unconditionally flush any chipset caches (for streaming writes). */
- intel_gt_chipset_flush(eb->engine->gt);
+ intel_gt_chipset_flush(eb->gt);
return 0;
err_skip:
- i915_request_set_error_once(eb->request, err);
+ for_each_batch_create_order(eb, j) {
+ if (!eb->requests[j])
+ break;
+
+ i915_request_set_error_once(eb->requests[j], err);
+ }
return err;
}
@@ -2044,14 +2093,17 @@ static int eb_parse(struct i915_execbuffer *eb)
int err;
if (!eb_use_cmdparser(eb)) {
- batch = eb_dispatch_secure(eb, eb->batch->vma);
+ batch = eb_dispatch_secure(eb, eb->batches[0]->vma);
if (IS_ERR(batch))
return PTR_ERR(batch);
goto secure_batch;
}
- len = eb->batch_len;
+ if (intel_context_is_parallel(eb->context))
+ return -EINVAL;
+
+ len = eb->batch_len[0];
if (!CMDPARSER_USES_GGTT(eb->i915)) {
/*
* ppGTT backed shadow buffers must be mapped RO, to prevent
@@ -2065,11 +2117,11 @@ static int eb_parse(struct i915_execbuffer *eb)
} else {
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
- if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */
+ if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */
return -EINVAL;
if (!pool) {
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+ pool = intel_gt_get_buffer_pool(eb->gt, len,
I915_MAP_WB);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -2094,7 +2146,7 @@ static int eb_parse(struct i915_execbuffer *eb)
trampoline = shadow;
shadow = shadow_batch_pin(eb, pool->obj,
- &eb->engine->gt->ggtt->vm,
+ &eb->gt->ggtt->vm,
PIN_GLOBAL);
if (IS_ERR(shadow)) {
err = PTR_ERR(shadow);
@@ -2116,26 +2168,29 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- err = intel_engine_cmd_parser(eb->engine,
- eb->batch->vma,
+ err = intel_engine_cmd_parser(eb->context->engine,
+ eb->batches[0]->vma,
eb->batch_start_offset,
- eb->batch_len,
+ eb->batch_len[0],
shadow, trampoline);
if (err)
goto err_unpin_batch;
- eb->batch = &eb->vma[eb->buffer_count++];
- eb->batch->vma = i915_vma_get(shadow);
- eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batches[0] = &eb->vma[eb->buffer_count++];
+ eb->batches[0]->vma = i915_vma_get(shadow);
+ eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
secure_batch:
if (batch) {
- eb->batch = &eb->vma[eb->buffer_count++];
- eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
- eb->batch->vma = i915_vma_get(batch);
+ if (intel_context_is_parallel(eb->context))
+ return -EINVAL;
+
+ eb->batches[0] = &eb->vma[eb->buffer_count++];
+ eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batches[0]->vma = i915_vma_get(batch);
}
return 0;
@@ -2151,19 +2206,18 @@ err:
return err;
}
-static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
+static int eb_request_submit(struct i915_execbuffer *eb,
+ struct i915_request *rq,
+ struct i915_vma *batch,
+ u64 batch_len)
{
int err;
- if (intel_context_nopreempt(eb->context))
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
- err = eb_move_to_gpu(eb);
- if (err)
- return err;
+ if (intel_context_nopreempt(rq->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
- err = i915_reset_gen7_sol_offsets(eb->request);
+ err = i915_reset_gen7_sol_offsets(rq);
if (err)
return err;
}
@@ -2174,26 +2228,26 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
* allows us to determine if the batch is still waiting on the GPU
* or actually running by checking the breadcrumb.
*/
- if (eb->engine->emit_init_breadcrumb) {
- err = eb->engine->emit_init_breadcrumb(eb->request);
+ if (rq->context->engine->emit_init_breadcrumb) {
+ err = rq->context->engine->emit_init_breadcrumb(rq);
if (err)
return err;
}
- err = eb->engine->emit_bb_start(eb->request,
- batch->node.start +
- eb->batch_start_offset,
- eb->batch_len,
- eb->batch_flags);
+ err = rq->context->engine->emit_bb_start(rq,
+ batch->node.start +
+ eb->batch_start_offset,
+ batch_len,
+ eb->batch_flags);
if (err)
return err;
if (eb->trampoline) {
+ GEM_BUG_ON(intel_context_is_parallel(rq->context));
GEM_BUG_ON(eb->batch_start_offset);
- err = eb->engine->emit_bb_start(eb->request,
- eb->trampoline->node.start +
- eb->batch_len,
- 0, 0);
+ err = rq->context->engine->emit_bb_start(rq,
+ eb->trampoline->node.start +
+ batch_len, 0, 0);
if (err)
return err;
}
@@ -2201,6 +2255,27 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
return 0;
}
+static int eb_submit(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+ int err;
+
+ err = eb_move_to_gpu(eb);
+
+ for_each_batch_create_order(eb, i) {
+ if (!eb->requests[i])
+ break;
+
+ trace_i915_request_queue(eb->requests[i], eb->batch_flags);
+ if (!err)
+ err = eb_request_submit(eb, eb->requests[i],
+ eb->batches[i]->vma,
+ eb->batch_len[i]);
+ }
+
+ return err;
+}
+
static int num_vcs_engines(const struct drm_i915_private *i915)
{
return hweight_long(VDBOX_MASK(&i915->gt));
@@ -2266,26 +2341,11 @@ static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel
return i915_request_get(rq);
}
-static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
+static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
+ bool throttle)
{
- struct intel_context *ce = eb->context;
struct intel_timeline *tl;
struct i915_request *rq = NULL;
- int err;
-
- GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
-
- if (unlikely(intel_context_is_banned(ce)))
- return ERR_PTR(-EIO);
-
- /*
- * Pinning the contexts may generate requests in order to acquire
- * GGTT space, so do this first before we reserve a seqno for
- * ourselves.
- */
- err = intel_context_pin_ww(ce, &eb->ww);
- if (err)
- return ERR_PTR(err);
/*
* Take a local wakeref for preparing to dispatch the execbuf as
@@ -2296,33 +2356,108 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throt
* taken on the engine, and the parent device.
*/
tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl)) {
- intel_context_unpin(ce);
- return ERR_CAST(tl);
- }
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
intel_context_enter(ce);
if (throttle)
rq = eb_throttle(eb, ce);
intel_context_timeline_unlock(tl);
+ if (rq) {
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT;
+
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
+ timeout) < 0) {
+ i915_request_put(rq);
+
+ tl = intel_context_timeline_lock(ce);
+ intel_context_exit(ce);
+ intel_context_timeline_unlock(tl);
+
+ if (nonblock)
+ return -EWOULDBLOCK;
+ else
+ return -EINTR;
+ }
+ i915_request_put(rq);
+ }
+
+ return 0;
+}
+
+static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
+{
+ struct intel_context *ce = eb->context, *child;
+ int err;
+ int i = 0, j = 0;
+
+ GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ return -EIO;
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ err = intel_context_pin_ww(ce, &eb->ww);
+ if (err)
+ return err;
+ for_each_child(ce, child) {
+ err = intel_context_pin_ww(child, &eb->ww);
+ GEM_BUG_ON(err); /* perma-pinned should incr a counter */
+ }
+
+ for_each_child(ce, child) {
+ err = eb_pin_timeline(eb, child, throttle);
+ if (err)
+ goto unwind;
+ ++i;
+ }
+ err = eb_pin_timeline(eb, ce, throttle);
+ if (err)
+ goto unwind;
+
eb->args->flags |= __EXEC_ENGINE_PINNED;
- return rq;
+ return 0;
+
+unwind:
+ for_each_child(ce, child) {
+ if (j++ < i) {
+ mutex_lock(&child->timeline->mutex);
+ intel_context_exit(child);
+ mutex_unlock(&child->timeline->mutex);
+ }
+ }
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+ return err;
}
static void eb_unpin_engine(struct i915_execbuffer *eb)
{
- struct intel_context *ce = eb->context;
- struct intel_timeline *tl = ce->timeline;
+ struct intel_context *ce = eb->context, *child;
if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
return;
eb->args->flags &= ~__EXEC_ENGINE_PINNED;
- mutex_lock(&tl->mutex);
+ for_each_child(ce, child) {
+ mutex_lock(&child->timeline->mutex);
+ intel_context_exit(child);
+ mutex_unlock(&child->timeline->mutex);
+
+ intel_context_unpin(child);
+ }
+
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- mutex_unlock(&tl->mutex);
+ mutex_unlock(&ce->timeline->mutex);
intel_context_unpin(ce);
}
@@ -2373,7 +2508,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
static int
eb_select_engine(struct i915_execbuffer *eb)
{
- struct intel_context *ce;
+ struct intel_context *ce, *child;
unsigned int idx;
int err;
@@ -2386,6 +2521,20 @@ eb_select_engine(struct i915_execbuffer *eb)
if (IS_ERR(ce))
return PTR_ERR(ce);
+ if (intel_context_is_parallel(ce)) {
+ if (eb->buffer_count < ce->parallel.number_children + 1) {
+ intel_context_put(ce);
+ return -EINVAL;
+ }
+ if (eb->batch_start_offset || eb->args->batch_len) {
+ intel_context_put(ce);
+ return -EINVAL;
+ }
+ }
+ eb->num_batches = ce->parallel.number_children + 1;
+
+ for_each_child(ce, child)
+ intel_context_get(child);
intel_gt_pm_get(ce->engine->gt);
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
@@ -2393,6 +2542,13 @@ eb_select_engine(struct i915_execbuffer *eb)
if (err)
goto err;
}
+ for_each_child(ce, child) {
+ if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) {
+ err = intel_context_alloc_state(child);
+ if (err)
+ goto err;
+ }
+ }
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
@@ -2403,7 +2559,7 @@ eb_select_engine(struct i915_execbuffer *eb)
goto err;
eb->context = ce;
- eb->engine = ce->engine;
+ eb->gt = ce->engine->gt;
/*
* Make sure engine pool stays alive even if we call intel_context_put
@@ -2414,6 +2570,8 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
intel_gt_pm_put(ce->engine->gt);
+ for_each_child(ce, child)
+ intel_context_put(child);
intel_context_put(ce);
return err;
}
@@ -2421,7 +2579,11 @@ err:
static void
eb_put_engine(struct i915_execbuffer *eb)
{
- intel_gt_pm_put(eb->engine->gt);
+ struct intel_context *child;
+
+ intel_gt_pm_put(eb->gt);
+ for_each_child(eb->context, child)
+ intel_context_put(child);
intel_context_put(eb->context);
}
@@ -2644,7 +2806,8 @@ static void put_fence_array(struct eb_fence *fences, int num_fences)
}
static int
-await_fence_array(struct i915_execbuffer *eb)
+await_fence_array(struct i915_execbuffer *eb,
+ struct i915_request *rq)
{
unsigned int n;
int err;
@@ -2658,8 +2821,7 @@ await_fence_array(struct i915_execbuffer *eb)
if (!eb->fences[n].dma_fence)
continue;
- err = i915_request_await_dma_fence(eb->request,
- eb->fences[n].dma_fence);
+ err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence);
if (err < 0)
return err;
}
@@ -2667,9 +2829,9 @@ await_fence_array(struct i915_execbuffer *eb)
return 0;
}
-static void signal_fence_array(const struct i915_execbuffer *eb)
+static void signal_fence_array(const struct i915_execbuffer *eb,
+ struct dma_fence * const fence)
{
- struct dma_fence * const fence = &eb->request->fence;
unsigned int n;
for (n = 0; n < eb->num_fences; n++) {
@@ -2717,9 +2879,9 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
break;
}
-static int eb_request_add(struct i915_execbuffer *eb, int err)
+static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
+ int err, bool last_parallel)
{
- struct i915_request *rq = eb->request;
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
struct i915_request *prev;
@@ -2741,6 +2903,17 @@ static int eb_request_add(struct i915_execbuffer *eb, int err)
err = -ENOENT; /* override any transient errors */
}
+ if (intel_context_is_parallel(eb->context)) {
+ if (err) {
+ __i915_request_skip(rq);
+ set_bit(I915_FENCE_FLAG_SKIP_PARALLEL,
+ &rq->fence.flags);
+ }
+ if (last_parallel)
+ set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
+ &rq->fence.flags);
+ }
+
__i915_request_queue(rq, &attr);
/* Try to clean up the client's timeline after submitting the request */
@@ -2752,6 +2925,25 @@ static int eb_request_add(struct i915_execbuffer *eb, int err)
return err;
}
+static int eb_requests_add(struct i915_execbuffer *eb, int err)
+{
+ int i;
+
+ /*
+ * We iterate in reverse order of creation to release timeline mutexes in
+ * same order.
+ */
+ for_each_batch_add_order(eb, i) {
+ struct i915_request *rq = eb->requests[i];
+
+ if (!rq)
+ continue;
+ err |= eb_request_add(eb, rq, err, i == 0);
+ }
+
+ return err;
+}
+
static const i915_user_extension_fn execbuf_extensions[] = {
[DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
};
@@ -2778,6 +2970,185 @@ parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
eb);
}
+static void eb_requests_get(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+
+ for_each_batch_create_order(eb, i) {
+ if (!eb->requests[i])
+ break;
+
+ i915_request_get(eb->requests[i]);
+ }
+}
+
+static void eb_requests_put(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+
+ for_each_batch_create_order(eb, i) {
+ if (!eb->requests[i])
+ break;
+
+ i915_request_put(eb->requests[i]);
+ }
+}
+
+static struct sync_file *
+eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
+{
+ struct sync_file *out_fence = NULL;
+ struct dma_fence_array *fence_array;
+ struct dma_fence **fences;
+ unsigned int i;
+
+ GEM_BUG_ON(!intel_context_is_parent(eb->context));
+
+ fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_batch_create_order(eb, i) {
+ fences[i] = &eb->requests[i]->fence;
+ __set_bit(I915_FENCE_FLAG_COMPOSITE,
+ &eb->requests[i]->fence.flags);
+ }
+
+ fence_array = dma_fence_array_create(eb->num_batches,
+ fences,
+ eb->context->parallel.fence_context,
+ eb->context->parallel.seqno,
+ false);
+ if (!fence_array) {
+ kfree(fences);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Move ownership to the dma_fence_array created above */
+ for_each_batch_create_order(eb, i)
+ dma_fence_get(fences[i]);
+
+ if (out_fence_fd != -1) {
+ out_fence = sync_file_create(&fence_array->base);
+ /* sync_file now owns fence_arry, drop creation ref */
+ dma_fence_put(&fence_array->base);
+ if (!out_fence)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ eb->composite_fence = &fence_array->base;
+
+ return out_fence;
+}
+
+static struct sync_file *
+eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
+ struct dma_fence *in_fence, int out_fence_fd)
+{
+ struct sync_file *out_fence = NULL;
+ int err;
+
+ if (unlikely(eb->gem_context->syncobj)) {
+ struct dma_fence *fence;
+
+ fence = drm_syncobj_fence_get(eb->gem_context->syncobj);
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ if (in_fence) {
+ if (eb->args->flags & I915_EXEC_FENCE_SUBMIT)
+ err = i915_request_await_execution(rq, in_fence);
+ else
+ err = i915_request_await_dma_fence(rq, in_fence);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ if (eb->fences) {
+ err = await_fence_array(eb, rq);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ if (intel_context_is_parallel(eb->context)) {
+ out_fence = eb_composite_fence_create(eb, out_fence_fd);
+ if (IS_ERR(out_fence))
+ return ERR_PTR(-ENOMEM);
+ } else if (out_fence_fd != -1) {
+ out_fence = sync_file_create(&rq->fence);
+ if (!out_fence)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return out_fence;
+}
+
+static struct intel_context *
+eb_find_context(struct i915_execbuffer *eb, unsigned int context_number)
+{
+ struct intel_context *child;
+
+ if (likely(context_number == 0))
+ return eb->context;
+
+ for_each_child(eb->context, child)
+ if (!--context_number)
+ return child;
+
+ GEM_BUG_ON("Context not found");
+
+ return NULL;
+}
+
+static struct sync_file *
+eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
+ int out_fence_fd)
+{
+ struct sync_file *out_fence = NULL;
+ unsigned int i;
+
+ for_each_batch_create_order(eb, i) {
+ /* Allocate a request for this batch buffer nice and early. */
+ eb->requests[i] = i915_request_create(eb_find_context(eb, i));
+ if (IS_ERR(eb->requests[i])) {
+ out_fence = ERR_PTR(PTR_ERR(eb->requests[i]));
+ eb->requests[i] = NULL;
+ return out_fence;
+ }
+
+ /*
+ * Only the first request added (committed to backend) has to
+ * take the in fences into account as all subsequent requests
+ * will have fences inserted inbetween them.
+ */
+ if (i + 1 == eb->num_batches) {
+ out_fence = eb_fences_add(eb, eb->requests[i],
+ in_fence, out_fence_fd);
+ if (IS_ERR(out_fence))
+ return out_fence;
+ }
+
+ /*
+ * Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when
+ * this request is retired will the batch_obj be moved onto
+ * the inactive_list and lose its active reference. Hence we do
+ * not need to explicitly hold another reference here.
+ */
+ eb->requests[i]->batch = eb->batches[i]->vma;
+ if (eb->batch_pool) {
+ GEM_BUG_ON(intel_context_is_parallel(eb->context));
+ intel_gt_buffer_pool_mark_active(eb->batch_pool,
+ eb->requests[i]);
+ }
+ }
+
+ return out_fence;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2788,7 +3159,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
- struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2812,12 +3182,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.buffer_count = args->buffer_count;
eb.batch_start_offset = args->batch_start_offset;
- eb.batch_len = args->batch_len;
eb.trampoline = NULL;
eb.fences = NULL;
eb.num_fences = 0;
+ memset(eb.requests, 0, sizeof(struct i915_request *) *
+ ARRAY_SIZE(eb.requests));
+ eb.composite_fence = NULL;
+
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (GRAPHICS_VER(i915) >= 11)
@@ -2901,70 +3274,25 @@ i915_gem_do_execbuffer(struct drm_device *dev,
ww_acquire_done(&eb.ww.ctx);
- batch = eb.batch->vma;
-
- /* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_request_create(eb.context);
- if (IS_ERR(eb.request)) {
- err = PTR_ERR(eb.request);
- goto err_vma;
- }
-
- if (unlikely(eb.gem_context->syncobj)) {
- struct dma_fence *fence;
-
- fence = drm_syncobj_fence_get(eb.gem_context->syncobj);
- err = i915_request_await_dma_fence(eb.request, fence);
- dma_fence_put(fence);
- if (err)
- goto err_ext;
- }
-
- if (in_fence) {
- if (args->flags & I915_EXEC_FENCE_SUBMIT)
- err = i915_request_await_execution(eb.request,
- in_fence);
- else
- err = i915_request_await_dma_fence(eb.request,
- in_fence);
- if (err < 0)
- goto err_request;
- }
-
- if (eb.fences) {
- err = await_fence_array(&eb);
- if (err)
- goto err_request;
- }
-
- if (out_fence_fd != -1) {
- out_fence = sync_file_create(&eb.request->fence);
- if (!out_fence) {
- err = -ENOMEM;
+ out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
+ if (IS_ERR(out_fence)) {
+ err = PTR_ERR(out_fence);
+ if (eb.requests[0])
goto err_request;
- }
+ else
+ goto err_vma;
}
- /*
- * Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- eb.request->batch = batch;
- if (eb.batch_pool)
- intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request);
-
- trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb, batch);
+ err = eb_submit(&eb);
err_request:
- i915_request_get(eb.request);
- err = eb_request_add(&eb, err);
+ eb_requests_get(&eb);
+ err = eb_requests_add(&eb, err);
if (eb.fences)
- signal_fence_array(&eb);
+ signal_fence_array(&eb, eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
if (out_fence) {
if (err == 0) {
@@ -2979,10 +3307,15 @@ err_request:
if (unlikely(eb.gem_context->syncobj)) {
drm_syncobj_replace_fence(eb.gem_context->syncobj,
- &eb.request->fence);
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
}
- i915_request_put(eb.request);
+ if (!out_fence && eb.composite_fence)
+ dma_fence_put(eb.composite_fence);
+
+ eb_requests_put(&eb);
err_vma:
eb_release_vmas(&eb, true);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index e5ae9c06510c..a57a6b7013c2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -134,6 +134,8 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages);
obj->mm.dirty = false;
+
+ __start_cpu_write(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index eb345305dc52..444f8268b9c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -56,8 +56,8 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
* @obj: The object to check.
*
* This function is intended to be called from within the fence signaling
- * path where the fence keeps the object from being migrated. For example
- * during gpu reset or similar.
+ * path where the fence, or a pin, keeps the object from being migrated. For
+ * example during gpu reset or similar.
*
* Return: Whether the object is resident in lmem.
*/
@@ -66,7 +66,8 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true));
+ GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
+ i915_gem_object_evictable(obj));
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
@@ -104,6 +105,32 @@ __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
}
struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ void *map;
+
+ obj = i915_gem_object_create_lmem(i915,
+ round_up(size, PAGE_SIZE),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return obj;
+
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ i915_gem_object_put(obj);
+ return map;
+ }
+
+ memcpy(map, data, size);
+
+ i915_gem_object_unpin_map(obj);
+
+ return obj;
+}
+
+struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 4ee81fc66302..1b88ea13435c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -24,6 +24,10 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size);
+
+struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t page_size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 5130e8ed9564..65fc6ff5f59d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -395,7 +395,7 @@ retry:
/* Track the mmo associated with the fenced vma */
vma->mmo = mmo;
- if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
+ if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 6fb9afb65034..1e426a42a36c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,6 +25,7 @@
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
@@ -90,6 +91,22 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
}
/**
+ * i915_gem_object_fini - Clean up a GEM object initialization
+ * @obj: The gem object to cleanup
+ *
+ * This function cleans up gem object fields that are set up by
+ * drm_gem_private_object_init() and i915_gem_object_init().
+ * It's primarily intended as a helper for backends that need to
+ * clean up the gem object in separate steps.
+ */
+void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
+{
+ mutex_destroy(&obj->mm.get_page.lock);
+ mutex_destroy(&obj->mm.get_dma_page.lock);
+ dma_resv_fini(&obj->base._resv);
+}
+
+/**
* Mark up the object's coherency levels for a given cache_level
* @obj: #drm_i915_gem_object
* @cache_level: cache level
@@ -111,6 +128,32 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ /*
+ * This is purely from a security perspective, so we simply don't care
+ * about non-userspace objects being able to bypass the LLC.
+ */
+ if (!(obj->flags & I915_BO_ALLOC_USER))
+ return false;
+
+ /*
+ * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
+ * possible for userspace to bypass the GTT caching bits set by the
+ * kernel, as per the given object cache_level. This is troublesome
+ * since the heavy flush we apply when first gathering the pages is
+ * skipped if the kernel thinks the object is coherent with the GPU. As
+ * a result it might be possible to bypass the cache and read the
+ * contents of the page directly, which could be stale data. If it's
+ * just a case of userspace shooting themselves in the foot then so be
+ * it, but since i915 takes the stance of always zeroing memory before
+ * handing it to userspace, we need to prevent this.
+ */
+ return IS_JSL_EHL(i915);
+}
+
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
@@ -174,7 +217,6 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- dma_resv_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -204,10 +246,17 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
}
}
-void __i915_gem_free_object(struct drm_i915_gem_object *obj)
+/**
+ * __i915_gem_object_pages_fini - Clean up pages use of a gem object
+ * @obj: The gem object to clean up
+ *
+ * This function cleans up usage of the object mm.pages member. It
+ * is intended for backends that need to clean up a gem object in
+ * separate steps and needs to be called when the object is idle before
+ * the object's backing memory is freed.
+ */
+void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- trace_i915_gem_object_destroy(obj);
-
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -233,11 +282,17 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
__i915_gem_object_free_mmaps(obj);
- GEM_BUG_ON(!list_empty(&obj->lut_list));
-
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
+}
+
+void __i915_gem_free_object(struct drm_i915_gem_object *obj)
+{
+ trace_i915_gem_object_destroy(obj);
+
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
+
bitmap_free(obj->bit_17);
if (obj->base.import_attach)
@@ -253,6 +308,8 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
+
+ __i915_gem_object_fini(obj);
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
@@ -266,6 +323,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
obj->ops->delayed_free(obj);
continue;
}
+ __i915_gem_object_pages_fini(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 48112b9d76df..59201801cec5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -58,6 +58,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key,
unsigned alloc_flags);
+
+void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
+
struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private *i915,
resource_size_t size);
@@ -270,6 +273,12 @@ i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_PROTECTED;
+}
+
+static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
@@ -503,25 +512,9 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
+bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
@@ -599,6 +592,8 @@ bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
void __i915_gem_free_object_rcu(struct rcu_head *head);
+void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
+
void __i915_gem_free_object(struct drm_i915_gem_object *obj);
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2471f36aaff3..da85169006d4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -288,17 +288,23 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
-#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_VOLATILE BIT(1)
-#define I915_BO_ALLOC_CPU_CLEAR BIT(2)
-#define I915_BO_ALLOC_USER BIT(3)
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_VOLATILE BIT(1)
+#define I915_BO_ALLOC_CPU_CLEAR BIT(2)
+#define I915_BO_ALLOC_USER BIT(3)
+/* Object is allowed to lose its contents on suspend / resume, even if pinned */
+#define I915_BO_ALLOC_PM_VOLATILE BIT(4)
+/* Object needs to be restored early using memcpy during resume */
+#define I915_BO_ALLOC_PM_EARLY BIT(5)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
- I915_BO_ALLOC_USER)
-#define I915_BO_READONLY BIT(4)
-#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
-
+ I915_BO_ALLOC_USER | \
+ I915_BO_ALLOC_PM_VOLATILE | \
+ I915_BO_ALLOC_PM_EARLY)
+#define I915_BO_READONLY BIT(6)
+#define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED BIT(8)
/**
* @mem_flags - Mutable placement-related flags
*
@@ -421,6 +427,33 @@ struct drm_i915_gem_object {
* can freely bypass the CPU cache when touching the pages with the GPU,
* where the kernel is completely unaware. On such platform we need
* apply the sledgehammer-on-acquire regardless of the @cache_coherent.
+ *
+ * Special care is taken on non-LLC platforms, to prevent potential
+ * information leak. The driver currently ensures:
+ *
+ * 1. All userspace objects, by default, have @cache_level set as
+ * I915_CACHE_NONE. The only exception is userptr objects, where we
+ * instead force I915_CACHE_LLC, but we also don't allow userspace to
+ * ever change the @cache_level for such objects. Another special case
+ * is dma-buf, which doesn't rely on @cache_dirty, but there we
+ * always do a forced flush when acquiring the pages, if there is a
+ * chance that the pages can be read directly from main memory with
+ * the GPU.
+ *
+ * 2. All I915_CACHE_NONE objects have @cache_dirty initially true.
+ *
+ * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to
+ * true.
+ *
+ * 4. The @cache_dirty is never freely reset before the initial
+ * flush, even if userspace adjusts the @cache_level through the
+ * i915_gem_set_caching_ioctl.
+ *
+ * 5. All @cache_dirty objects(including swapped-in) are initially
+ * flushed with a synchronous call to drm_clflush_sg in
+ * __i915_gem_object_set_pages. The @cache_dirty can be freely reset
+ * at this point. All further asynchronous clfushes are never security
+ * critical, i.e userspace is free to race against itself.
*/
unsigned int cache_dirty:1;
@@ -534,9 +567,17 @@ struct drm_i915_gem_object {
struct {
struct sg_table *cached_io_st;
struct i915_gem_object_page_iter get_io_page;
+ struct drm_i915_gem_object *backup;
bool created:1;
} ttm;
+ /*
+ * Record which PXP key instance this object was created against (if
+ * any), so we can use it to determine if the encryption is valid by
+ * comparing against the current key instance.
+ */
+ u32 pxp_key_instance;
+
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 8b9d7d14c4bd..726b40e1fbb0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gem/i915_gem_ttm_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
@@ -39,6 +40,88 @@ void i915_gem_suspend(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
}
+static int lmem_restore(struct drm_i915_private *i915, u32 flags)
+{
+ struct intel_memory_region *mr;
+ int ret = 0, id;
+
+ for_each_memory_region(mr, i915, id) {
+ if (mr->type == INTEL_MEMORY_LOCAL) {
+ ret = i915_ttm_restore_region(mr, flags);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
+{
+ struct intel_memory_region *mr;
+ int ret = 0, id;
+
+ for_each_memory_region(mr, i915, id) {
+ if (mr->type == INTEL_MEMORY_LOCAL) {
+ ret = i915_ttm_backup_region(mr, flags);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void lmem_recover(struct drm_i915_private *i915)
+{
+ struct intel_memory_region *mr;
+ int id;
+
+ for_each_memory_region(mr, i915, id)
+ if (mr->type == INTEL_MEMORY_LOCAL)
+ i915_ttm_recover_region(mr);
+}
+
+int i915_gem_backup_suspend(struct drm_i915_private *i915)
+{
+ int ret;
+
+ /* Opportunistically try to evict unpinned objects */
+ ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
+ if (ret)
+ goto out_recover;
+
+ i915_gem_suspend(i915);
+
+ /*
+ * More objects may have become unpinned as requests were
+ * retired. Now try to evict again. The gt may be wedged here
+ * in which case we automatically fall back to memcpy.
+ * We allow also backing up pinned objects that have not been
+ * marked for early recover, and that may contain, for example,
+ * page-tables for the migrate context.
+ */
+ ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
+ I915_TTM_BACKUP_PINNED);
+ if (ret)
+ goto out_recover;
+
+ /*
+ * Remaining objects are backed up using memcpy once we've stopped
+ * using the migrate context.
+ */
+ ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
+ if (ret)
+ goto out_recover;
+
+ return 0;
+
+out_recover:
+ lmem_recover(i915);
+
+ return ret;
+}
+
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -128,12 +211,20 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
void i915_gem_resume(struct drm_i915_private *i915)
{
+ int ret;
+
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
+ ret = lmem_restore(i915, 0);
+ GEM_WARN_ON(ret);
+
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
intel_gt_resume(&i915->gt);
+
+ ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
+ GEM_WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index c9a66630e92e..bedf1e95941a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -18,6 +18,7 @@ void i915_gem_idle_work_handler(struct work_struct *work);
void i915_gem_suspend(struct drm_i915_private *i915);
void i915_gem_suspend_late(struct drm_i915_private *i915);
+int i915_gem_backup_suspend(struct drm_i915_private *i915);
int i915_gem_freeze(struct drm_i915_private *i915);
int i915_gem_freeze_late(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1f557b2178ed..a016ccec36f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -80,3 +80,73 @@ err_object_free:
i915_gem_object_free(obj);
return ERR_PTR(err);
}
+
+/**
+ * i915_gem_process_region - Iterate over all objects of a region using ops
+ * to process and optionally skip objects
+ * @mr: The memory region
+ * @apply: ops and private data
+ *
+ * This function can be used to iterate over the regions object list,
+ * checking whether to skip objects, and, if not, lock the objects and
+ * process them using the supplied ops. Note that this function temporarily
+ * removes objects from the region list while iterating, so that if run
+ * concurrently with itself may not iterate over all objects.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
+int i915_gem_process_region(struct intel_memory_region *mr,
+ struct i915_gem_apply_to_region *apply)
+{
+ const struct i915_gem_apply_to_region_ops *ops = apply->ops;
+ struct drm_i915_gem_object *obj;
+ struct list_head still_in_list;
+ int ret = 0;
+
+ /*
+ * In the future, a non-NULL apply->ww could mean the caller is
+ * already in a locking transaction and provides its own context.
+ */
+ GEM_WARN_ON(apply->ww);
+
+ INIT_LIST_HEAD(&still_in_list);
+ mutex_lock(&mr->objects.lock);
+ for (;;) {
+ struct i915_gem_ww_ctx ww;
+
+ obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
+ mm.region_link);
+ if (!obj)
+ break;
+
+ list_move_tail(&obj->mm.region_link, &still_in_list);
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ /*
+ * Note: Someone else might be migrating the object at this
+ * point. The object's region is not stable until we lock
+ * the object.
+ */
+ mutex_unlock(&mr->objects.lock);
+ apply->ww = &ww;
+ for_i915_gem_ww(&ww, ret, apply->interruptible) {
+ ret = i915_gem_object_lock(obj, apply->ww);
+ if (ret)
+ continue;
+
+ if (obj->mm.region == mr)
+ ret = ops->process_obj(apply, obj);
+ /* Implicit object unlock */
+ }
+
+ i915_gem_object_put(obj);
+ mutex_lock(&mr->objects.lock);
+ if (ret)
+ break;
+ }
+ list_splice_tail(&still_in_list, &mr->objects.list);
+ mutex_unlock(&mr->objects.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index 1008e580a89a..fcaa12d657d4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -12,6 +12,41 @@ struct intel_memory_region;
struct drm_i915_gem_object;
struct sg_table;
+struct i915_gem_apply_to_region;
+
+/**
+ * struct i915_gem_apply_to_region_ops - ops to use when iterating over all
+ * region objects.
+ */
+struct i915_gem_apply_to_region_ops {
+ /**
+ * process_obj - Process the current object
+ * @apply: Embed this for private data.
+ * @obj: The current object.
+ *
+ * Note that if this function is part of a ww transaction, and
+ * if returns -EDEADLK for one of the objects, it may be
+ * rerun for that same object in the same pass.
+ */
+ int (*process_obj)(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj);
+};
+
+/**
+ * struct i915_gem_apply_to_region - Argument to the struct
+ * i915_gem_apply_to_region_ops functions.
+ * @ops: The ops for the operation.
+ * @ww: Locking context used for the transaction.
+ * @interruptible: Whether to perform object locking interruptible.
+ *
+ * This structure is intended to be embedded in a private struct if needed
+ */
+struct i915_gem_apply_to_region {
+ const struct i915_gem_apply_to_region_ops *ops;
+ struct i915_gem_ww_ctx *ww;
+ u32 interruptible:1;
+};
+
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem);
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
@@ -22,4 +57,6 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t page_size,
unsigned int flags);
+int i915_gem_process_region(struct intel_memory_region *mr,
+ struct i915_gem_apply_to_region *apply);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 11f072193f3b..d77da59fae04 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -182,22 +182,7 @@ rebuild_st:
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
- /*
- * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
- * possible for userspace to bypass the GTT caching bits set by the
- * kernel, as per the given object cache_level. This is troublesome
- * since the heavy flush we apply when first gathering the pages is
- * skipped if the kernel thinks the object is coherent with the GPU. As
- * a result it might be possible to bypass the cache and read the
- * contents of the page directly, which could be stale data. If it's
- * just a case of userspace shooting themselves in the foot then so be
- * it, but since i915 takes the stance of always zeroing memory before
- * handing it to userspace, we need to prevent this.
- *
- * By setting cache_dirty here we make the clflush in set_pages
- * unconditional on such platforms.
- */
- if (IS_JSL_EHL(i915) && obj->flags & I915_BO_ALLOC_USER)
+ if (i915_gem_object_can_bypass_llc(obj))
obj->cache_dirty = true;
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
@@ -301,6 +286,8 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
@@ -312,6 +299,16 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
drm_clflush_sg(pages);
__start_cpu_write(obj);
+ /*
+ * On non-LLC platforms, force the flush-on-acquire if this is ever
+ * swapped-in. Our async flush path is not trust worthy enough yet(and
+ * happens in the wrong order), and with some tricks it's conceivable
+ * for userspace to change the cache-level to I915_CACHE_NONE after the
+ * pages are swapped-in, and since execbuf binds the object before doing
+ * the async flush, we have a race window.
+ */
+ if (!HAS_LLC(i915))
+ obj->cache_dirty = true;
}
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 6ea13159bffc..74a1ffd0d7dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -10,18 +10,16 @@
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
+#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
-#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_ttm_pm.h"
-#include "gt/intel_migrate.h"
-#include "gt/intel_engine_pm.h"
-#define I915_PL_LMEM0 TTM_PL_PRIV
-#define I915_PL_SYSTEM TTM_PL_SYSTEM
-#define I915_PL_STOLEN TTM_PL_VRAM
-#define I915_PL_GGTT TTM_PL_TT
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_migrate.h"
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
@@ -64,6 +62,20 @@ static struct ttm_placement i915_sys_placement = {
.busy_placement = &sys_placement_flags,
};
+/**
+ * i915_ttm_sys_placement - Return the struct ttm_placement to be
+ * used for an object in system memory.
+ *
+ * Rather than making the struct extern, use this
+ * function.
+ *
+ * Return: A pointer to a static variable for sys placement.
+ */
+struct ttm_placement *i915_ttm_sys_placement(void)
+{
+ return &i915_sys_placement;
+}
+
static int i915_ttm_err_to_gem(int err)
{
/* Fastpath */
@@ -182,7 +194,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
man->use_tt)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
i915_ttm_select_tt_caching(obj));
@@ -214,7 +226,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(i915_tt);
}
@@ -356,8 +367,10 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (likely(obj))
+ if (likely(obj)) {
+ __i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_st(obj);
+ }
}
static struct intel_memory_region *
@@ -427,7 +440,9 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
}
static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
+ bool clear,
struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
struct sg_table *dst_st)
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
@@ -437,21 +452,18 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct sg_table *src_st;
struct i915_request *rq;
- struct ttm_tt *ttm = bo->ttm;
+ struct ttm_tt *src_ttm = bo->ttm;
enum i915_cache_level src_level, dst_level;
int ret;
- if (!i915->gt.migrate.context)
+ if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
return -EINVAL;
- dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
- if (!ttm || !ttm_tt_is_populated(ttm)) {
+ dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
+ if (clear) {
if (bo->type == ttm_bo_type_kernel)
return -EINVAL;
- if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
- return 0;
-
intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
dst_st->sgl, dst_level,
@@ -464,10 +476,10 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
}
intel_engine_pm_put(i915->gt.migrate.context->engine);
} else {
- src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
+ src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
obj->ttm.cached_io_st;
- src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
+ src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_copy(i915->gt.migrate.context,
NULL, src_st->sgl, src_level,
@@ -485,6 +497,44 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
return ret;
}
+static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
+ struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
+ struct sg_table *dst_st,
+ bool allow_accel)
+{
+ int ret = -EINVAL;
+
+ if (allow_accel)
+ ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
+ if (ret) {
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct intel_memory_region *dst_reg, *src_reg;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_iomap io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+
+ dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
+ src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
+ GEM_BUG_ON(!dst_reg || !src_reg);
+
+ dst_iter = !cpu_maps_iomem(dst_mem) ?
+ ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
+ ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
+ dst_st, dst_reg->region.start);
+
+ src_iter = !cpu_maps_iomem(bo->resource) ?
+ ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
+ ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
+ obj->ttm.cached_io_st,
+ src_reg->region.start);
+
+ ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
+ }
+}
+
static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *dst_mem,
@@ -493,19 +543,11 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct ttm_resource_manager *dst_man =
ttm_manager_type(bo->bdev, dst_mem->mem_type);
- struct intel_memory_region *dst_reg, *src_reg;
- union {
- struct ttm_kmap_iter_tt tt;
- struct ttm_kmap_iter_iomap io;
- } _dst_iter, _src_iter;
- struct ttm_kmap_iter *dst_iter, *src_iter;
+ struct ttm_tt *ttm = bo->ttm;
struct sg_table *dst_st;
+ bool clear;
int ret;
- dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
- src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
- GEM_BUG_ON(!dst_reg || !src_reg);
-
/* Sync for now. We could do the actual copy async. */
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
@@ -522,9 +564,8 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
}
/* Populate ttm with pages if needed. Typically system memory. */
- if (bo->ttm && (dst_man->use_tt ||
- (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
if (ret)
return ret;
}
@@ -533,23 +574,10 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
if (IS_ERR(dst_st))
return PTR_ERR(dst_st);
- ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
- if (ret) {
- /* If we start mapping GGTT, we can no longer use man::use_tt here. */
- dst_iter = !cpu_maps_iomem(dst_mem) ?
- ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
- ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
- dst_st, dst_reg->region.start);
+ clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
+ __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
- src_iter = !cpu_maps_iomem(bo->resource) ?
- ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
- ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
- obj->ttm.cached_io_st,
- src_reg->region.start);
-
- ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
- }
- /* Below dst_mem becomes bo->resource. */
ttm_bo_move_sync_cleanup(bo, dst_mem);
i915_ttm_adjust_domains_after_move(obj);
i915_ttm_free_cached_io_st(obj);
@@ -787,12 +815,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
*/
static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
- if (obj->ttm.created) {
- ttm_bo_put(i915_gem_to_ttm(obj));
- } else {
- __i915_gem_free_object(obj);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
- }
+ GEM_BUG_ON(!obj->ttm.created);
+
+ ttm_bo_put(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -872,14 +897,19 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- /* This releases all gem object bindings to the backend. */
- __i915_gem_free_object(obj);
-
i915_gem_object_release_memory_region(obj);
mutex_destroy(&obj->ttm.get_io_page.lock);
- if (obj->ttm.created)
+ if (obj->ttm.created) {
+ i915_ttm_backup_free(obj);
+
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
+
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ } else {
+ __i915_gem_object_fini(obj);
+ }
}
/**
@@ -908,7 +938,11 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
- i915_gem_object_init_memory_region(obj, mem);
+
+ /* Don't put on a region list until we're either locked or fully initialized. */
+ obj->mm.region = intel_memory_region_get(mem);
+ INIT_LIST_HEAD(&obj->mm.region_link);
+
i915_gem_object_make_unshrinkable(obj);
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->ttm.get_io_page.lock);
@@ -935,6 +969,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
return i915_ttm_err_to_gem(ret);
obj->ttm.created = true;
+ i915_gem_object_release_memory_region(obj);
+ i915_gem_object_init_memory_region(obj, mem);
i915_ttm_adjust_domains_after_move(obj);
i915_ttm_adjust_gem_after_move(obj);
i915_gem_object_unlock(obj);
@@ -963,3 +999,50 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
+
+/**
+ * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
+ * another
+ * @dst: The destination object
+ * @src: The source object
+ * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
+ * @intr: Whether to perform waits interruptible:
+ *
+ * Note: The caller is responsible for assuring that the underlying
+ * TTM objects are populated if needed and locked.
+ *
+ * Return: Zero on success. Negative error code on error. If @intr == true,
+ * then it may return -ERESTARTSYS or -EINTR.
+ */
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr)
+{
+ struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
+ struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = intr,
+ };
+ struct sg_table *dst_st;
+ int ret;
+
+ assert_object_held(dst);
+ assert_object_held(src);
+
+ /*
+ * Sync for now. This will change with async moves.
+ */
+ ret = ttm_bo_wait_ctx(dst_bo, &ctx);
+ if (!ret)
+ ret = ttm_bo_wait_ctx(src_bo, &ctx);
+ if (ret)
+ return ret;
+
+ dst_st = gpu_binds_iomem(dst_bo->resource) ?
+ dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm);
+
+ __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm,
+ dst_st, allow_accel);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 40927f67b6d9..0b7291dd897c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -46,4 +46,18 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
+
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr);
+
+/* Internal I915 TTM declarations and definitions below. */
+
+#define I915_PL_LMEM0 TTM_PL_PRIV
+#define I915_PL_SYSTEM TTM_PL_SYSTEM
+#define I915_PL_STOLEN TTM_PL_VRAM
+#define I915_PL_GGTT TTM_PL_TT
+
+struct ttm_placement *i915_ttm_sys_placement(void);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
new file mode 100644
index 000000000000..3b6d14b5c604
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_pm.h"
+
+/**
+ * i915_ttm_backup_free - Free any backup attached to this object
+ * @obj: The object whose backup is to be freed.
+ */
+void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
+{
+ if (obj->ttm.backup) {
+ i915_gem_object_put(obj->ttm.backup);
+ obj->ttm.backup = NULL;
+ }
+}
+
+/**
+ * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
+ * @base: The i915_gem_apply_to_region we derive from.
+ * @allow_gpu: Whether using the gpu blitter is allowed.
+ * @backup_pinned: On backup, backup also pinned objects.
+ */
+struct i915_gem_ttm_pm_apply {
+ struct i915_gem_apply_to_region base;
+ bool allow_gpu : 1;
+ bool backup_pinned : 1;
+};
+
+static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_ttm_pm_apply *pm_apply =
+ container_of(apply, typeof(*pm_apply), base);
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_buffer_object *backup_bo;
+ struct drm_i915_private *i915 =
+ container_of(bo->bdev, typeof(*i915), bdev);
+ struct drm_i915_gem_object *backup;
+ struct ttm_operation_ctx ctx = {};
+ int err = 0;
+
+ if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ return 0;
+
+ if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
+ return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+
+ if (!pm_apply->backup_pinned ||
+ (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
+ return 0;
+
+ if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
+ return 0;
+
+ backup = i915_gem_object_create_shmem(i915, obj->base.size);
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ err = i915_gem_object_lock(backup, apply->ww);
+ if (err)
+ goto out_no_lock;
+
+ backup_bo = i915_gem_to_ttm(backup);
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (err)
+ goto out_no_populate;
+
+ err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
+ GEM_WARN_ON(err);
+
+ obj->ttm.backup = backup;
+ return 0;
+
+out_no_populate:
+ i915_gem_ww_unlock_single(backup);
+out_no_lock:
+ i915_gem_object_put(backup);
+
+ return err;
+}
+
+static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj)
+{
+ i915_ttm_backup_free(obj);
+ return 0;
+}
+
+/**
+ * i915_ttm_recover_region - Free the backup of all objects of a region
+ * @mr: The memory region
+ *
+ * Checks all objects of a region if there is backup attached and if so
+ * frees that backup. Typically this is called to recover after a partially
+ * performed backup.
+ */
+void i915_ttm_recover_region(struct intel_memory_region *mr)
+{
+ static const struct i915_gem_apply_to_region_ops recover_ops = {
+ .process_obj = i915_ttm_recover,
+ };
+ struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
+ int ret;
+
+ ret = i915_gem_process_region(mr, &apply);
+ GEM_WARN_ON(ret);
+}
+
+/**
+ * i915_ttm_backup_region - Back up all objects of a region to smem.
+ * @mr: The memory region
+ * @allow_gpu: Whether to allow the gpu blitter for this backup.
+ * @backup_pinned: Backup also pinned objects.
+ *
+ * Loops over all objects of a region and either evicts them if they are
+ * evictable or backs them up using a backup object if they are pinned.
+ *
+ * Return: Zero on success. Negative error code on error.
+ */
+int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
+{
+ static const struct i915_gem_apply_to_region_ops backup_ops = {
+ .process_obj = i915_ttm_backup,
+ };
+ struct i915_gem_ttm_pm_apply pm_apply = {
+ .base = {.ops = &backup_ops},
+ .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
+ .backup_pinned = flags & I915_TTM_BACKUP_PINNED,
+ };
+
+ return i915_gem_process_region(mr, &pm_apply.base);
+}
+
+static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_ttm_pm_apply *pm_apply =
+ container_of(apply, typeof(*pm_apply), base);
+ struct drm_i915_gem_object *backup = obj->ttm.backup;
+ struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
+ struct ttm_operation_ctx ctx = {};
+ int err;
+
+ if (!backup)
+ return 0;
+
+ if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
+ return 0;
+
+ err = i915_gem_object_lock(backup, apply->ww);
+ if (err)
+ return err;
+
+ /* Content may have been swapped. */
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!err) {
+ err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
+ false);
+ GEM_WARN_ON(err);
+
+ obj->ttm.backup = NULL;
+ err = 0;
+ }
+
+ i915_gem_ww_unlock_single(backup);
+
+ if (!err)
+ i915_gem_object_put(backup);
+
+ return err;
+}
+
+/**
+ * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
+ * @mr: The memory region
+ * @allow_gpu: Whether to allow the gpu blitter to recover.
+ *
+ * Loops over all objects of a region and if they are backed-up, restores
+ * them from smem.
+ *
+ * Return: Zero on success. Negative error code on error.
+ */
+int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
+{
+ static const struct i915_gem_apply_to_region_ops restore_ops = {
+ .process_obj = i915_ttm_restore,
+ };
+ struct i915_gem_ttm_pm_apply pm_apply = {
+ .base = {.ops = &restore_ops},
+ .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
+ };
+
+ return i915_gem_process_region(mr, &pm_apply.base);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h
new file mode 100644
index 000000000000..25ed67a31571
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _I915_GEM_TTM_PM_H_
+#define _I915_GEM_TTM_PM_H_
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+
+#define I915_TTM_BACKUP_ALLOW_GPU BIT(0)
+#define I915_TTM_BACKUP_PINNED BIT(1)
+
+int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags);
+
+void i915_ttm_recover_region(struct intel_memory_region *mr);
+
+int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags);
+
+/* Internal I915 TTM functions below. */
+void i915_ttm_backup_free(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8ea0fa665e53..3173c9f9a040 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -165,8 +165,11 @@ alloc_table:
goto err;
}
- sg_page_sizes = i915_sg_dma_sizes(st->sgl);
+ WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
+ if (i915_gem_object_can_bypass_llc(obj))
+ obj->cache_dirty = true;
+ sg_page_sizes = i915_sg_dma_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -546,7 +549,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
+ I915_BO_ALLOC_USER);
obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 5e6e8c91ab38..dbdbdc344d87 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -6,7 +6,6 @@
#include <linux/fs.h>
#include <linux/mount.h>
-#include <linux/pagemap.h>
#include "i915_drv.h"
#include "i915_gemfs.h"
@@ -15,6 +14,7 @@ int i915_gemfs_init(struct drm_i915_private *i915)
{
struct file_system_type *type;
struct vfsmount *gemfs;
+ char *opts;
type = get_fs_type("tmpfs");
if (!type)
@@ -26,10 +26,26 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*
* One example, although it is probably better with a per-file
* control, is selecting huge page allocations ("huge=within_size").
- * Currently unused due to bandwidth issues (slow reads) on Broadwell+.
+ * However, we only do so to offset the overhead of iommu lookups
+ * due to bandwidth issues (slow reads) on Broadwell+.
*/
- gemfs = kern_mount(type);
+ opts = NULL;
+ if (intel_vtd_active()) {
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ static char huge_opt[] = "huge=within_size"; /* r/w */
+
+ opts = huge_opt;
+ drm_info(&i915->drm,
+ "Transparent Hugepage mode '%s'\n",
+ opts);
+ } else {
+ drm_notice(&i915->drm,
+ "Transparent Hugepage support is recommended for optimal performance when IOMMU is enabled!\n");
+ }
+ }
+
+ gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, opts);
if (IS_ERR(gemfs))
return PTR_ERR(gemfs);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index a094f3ce1a90..b2003133deaf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -136,6 +136,8 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages);
obj->mm.dirty = false;
+
+ __start_cpu_write(obj);
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -152,6 +154,7 @@ huge_pages_object(struct drm_i915_private *i915,
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
+ unsigned int cache_level;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
@@ -173,7 +176,9 @@ huge_pages_object(struct drm_i915_private *i915,
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+
+ cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
obj->mm.page_mask = page_mask;
@@ -1456,7 +1461,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1512,13 +1517,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
+ bool should_swap;
int err = 0;
/*
@@ -1567,23 +1573,39 @@ static int igt_shrink_thp(void *arg)
break;
}
i915_gem_context_unlock_engines(ctx);
+ /*
+ * Nuke everything *before* we unpin the pages so we can be reasonably
+ * sure that when later checking get_nr_swap_pages() that some random
+ * leftover object doesn't steal the remaining swap space.
+ */
+ i915_gem_shrink(NULL, i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE);
i915_vma_unpin(vma);
if (err)
goto out_put;
/*
- * Now that the pages are *unpinned* shrink-all should invoke
- * shmem to truncate our pages.
+ * Now that the pages are *unpinned* shrinking should invoke
+ * shmem to truncate our pages, if we have available swap.
*/
- i915_gem_shrink_all(i915);
- if (i915_gem_object_has_pages(obj)) {
- pr_err("shrink-all didn't truncate the pages\n");
+ should_swap = get_nr_swap_pages() > 0;
+ i915_gem_shrink(NULL, i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_WRITEBACK);
+ if (should_swap == i915_gem_object_has_pages(obj)) {
+ pr_err("unexpected pages mismatch, should_swap=%s\n",
+ yesno(should_swap));
err = -EINVAL;
goto out_put;
}
- if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
- pr_err("residual page-size bits left\n");
+ if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
+ pr_err("unexpected residual page-size bits, should_swap=%s\n",
+ yesno(should_swap));
err = -EINVAL;
goto out_put;
}
@@ -1629,7 +1651,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(&dev_priv->gt);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1688,11 +1710,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
goto out_file;
}
- mutex_lock(&ctx->mutex);
- vm = i915_gem_context_vm(ctx);
+ vm = ctx->vm;
if (vm)
WRITE_ONCE(vm->scrub_64K, true);
- mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index ecbcbb86ae1e..8402ed925a69 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -17,13 +17,20 @@
#include "huge_gem_object.h"
#include "mock_context.h"
+enum client_tiling {
+ CLIENT_TILING_LINEAR,
+ CLIENT_TILING_X,
+ CLIENT_TILING_Y,
+ CLIENT_NUM_TILING_TYPES
+};
+
#define WIDTH 512
#define HEIGHT 32
struct blit_buffer {
struct i915_vma *vma;
u32 start_val;
- u32 tiling;
+ enum client_tiling tiling;
};
struct tiled_blits {
@@ -53,9 +60,9 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
- if (src->tiling == I915_TILING_Y)
+ if (src->tiling == CLIENT_TILING_Y)
cmd |= BCS_SRC_Y;
- if (dst->tiling == I915_TILING_Y)
+ if (dst->tiling == CLIENT_TILING_Y)
cmd |= BCS_DST_Y;
*cs++ = cmd;
@@ -172,7 +179,7 @@ static int tiled_blits_create_buffers(struct tiled_blits *t,
t->buffers[i].vma = vma;
t->buffers[i].tiling =
- i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
+ i915_prandom_u32_max_state(CLIENT_TILING_Y + 1, prng);
}
return 0;
@@ -197,17 +204,17 @@ static u64 swizzle_bit(unsigned int bit, u64 offset)
static u64 tiled_offset(const struct intel_gt *gt,
u64 v,
unsigned int stride,
- unsigned int tiling)
+ enum client_tiling tiling)
{
unsigned int swizzle;
u64 x, y;
- if (tiling == I915_TILING_NONE)
+ if (tiling == CLIENT_TILING_LINEAR)
return v;
y = div64_u64_rem(v, stride, &x);
- if (tiling == I915_TILING_X) {
+ if (tiling == CLIENT_TILING_X) {
v = div64_u64_rem(y, 8, &y) * stride * 8;
v += y * 512;
v += div64_u64_rem(x, 512, &x) << 12;
@@ -244,12 +251,12 @@ static u64 tiled_offset(const struct intel_gt *gt,
return v;
}
-static const char *repr_tiling(int tiling)
+static const char *repr_tiling(enum client_tiling tiling)
{
switch (tiling) {
- case I915_TILING_NONE: return "linear";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
+ case CLIENT_TILING_LINEAR: return "linear";
+ case CLIENT_TILING_X: return "X";
+ case CLIENT_TILING_Y: return "Y";
default: return "unknown";
}
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 8eb5050f8cb3..b32f7fed2d9c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -27,12 +27,6 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
-static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
-{
- /* single threaded, private ctx */
- return rcu_dereference_protected(ctx->vm, true);
-}
-
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
@@ -94,7 +88,7 @@ static int live_nop_switch(void *arg)
rq = i915_request_get(this);
i915_request_add(this);
}
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
i915_request_put(rq);
@@ -704,7 +698,7 @@ static int igt_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name,
- yesno(!!rcu_access_pointer(ctx->vm)),
+ yesno(i915_gem_context_has_full_ppgtt(ctx)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
@@ -813,7 +807,7 @@ static int igt_shared_ctx_exec(void *arg)
struct i915_gem_context *ctx;
struct intel_context *ce;
- ctx = kernel_context(i915, ctx_vm(parent));
+ ctx = kernel_context(i915, parent->vm);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_test;
@@ -823,7 +817,7 @@ static int igt_shared_ctx_exec(void *arg)
GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(ctx_vm(parent),
+ obj = create_test_object(parent->vm,
file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -838,7 +832,7 @@ static int igt_shared_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name,
- yesno(!!rcu_access_pointer(ctx->vm)),
+ yesno(i915_gem_context_has_full_ppgtt(ctx)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
@@ -1380,7 +1374,7 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
- vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
+ vm = ctx->vm ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_file;
@@ -1417,7 +1411,7 @@ static int igt_ctx_readonly(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
ce->engine->name,
- yesno(!!ctx_vm(ctx)),
+ yesno(i915_gem_context_has_full_ppgtt(ctx)),
err);
i915_gem_context_unlock_engines(ctx);
goto out_file;
@@ -1499,7 +1493,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
- err = check_scratch(ctx_vm(ctx), offset);
+ err = check_scratch(ctx->vm, offset);
if (err)
return err;
@@ -1528,7 +1522,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
intel_gt_chipset_flush(engine->gt);
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1596,7 +1590,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
- err = check_scratch(ctx_vm(ctx), offset);
+ err = check_scratch(ctx->vm, offset);
if (err)
return err;
@@ -1607,7 +1601,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (GRAPHICS_VER(i915) >= 8) {
const u32 GPR0 = engine->mmio_base + 0x600;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1739,7 +1733,7 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
u32 *vaddr;
int err = 0;
- vm = ctx_vm(ctx);
+ vm = ctx->vm;
if (!vm)
return -ENODEV;
@@ -1801,7 +1795,7 @@ static int igt_vm_isolation(void *arg)
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
+ if (ctx_a->vm == ctx_b->vm)
goto out_file;
/* Read the initial state of the scratch page */
@@ -1813,8 +1807,8 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_file;
- vm_total = ctx_vm(ctx_a)->total;
- GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
+ vm_total = ctx_a->vm->total;
+ GEM_BUG_ON(ctx_b->vm->total != vm_total);
count = 0;
num_engines = 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
deleted file mode 100644
index 16162fc2782d..000000000000
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2020 Intel Corporation
- */
-
-#include "i915_selftest.h"
-
-#include "gt/intel_engine_pm.h"
-#include "selftests/igt_flush_test.h"
-
-static u64 read_reloc(const u32 *map, int x, const u64 mask)
-{
- u64 reloc;
-
- memcpy(&reloc, &map[x], sizeof(reloc));
- return reloc & mask;
-}
-
-static int __igt_gpu_reloc(struct i915_execbuffer *eb,
- struct drm_i915_gem_object *obj)
-{
- const unsigned int offsets[] = { 8, 3, 0 };
- const u64 mask =
- GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
- const u32 *map = page_mask_bits(obj->mm.mapping);
- struct i915_request *rq;
- struct i915_vma *vma;
- int err;
- int i;
-
- vma = i915_vma_instance(obj, eb->context->vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_gem_object_lock(obj, &eb->ww);
- if (err)
- return err;
-
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
- if (err)
- return err;
-
- /* 8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
- if (err <= 0)
- goto reloc_err;
-
- /* !8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
- if (err <= 0)
- goto reloc_err;
-
- /* Skip to the end of the cmd page */
- i = PAGE_SIZE / sizeof(u32) - 1;
- i -= eb->reloc_cache.rq_size;
- memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
- MI_NOOP, i);
- eb->reloc_cache.rq_size += i;
-
- /* Force next batch */
- err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
- if (err <= 0)
- goto reloc_err;
-
- GEM_BUG_ON(!eb->reloc_cache.rq);
- rq = i915_request_get(eb->reloc_cache.rq);
- reloc_gpu_flush(eb, &eb->reloc_cache);
- GEM_BUG_ON(eb->reloc_cache.rq);
-
- err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
- if (err) {
- intel_gt_set_wedged(eb->engine->gt);
- goto put_rq;
- }
-
- if (!i915_request_completed(rq)) {
- pr_err("%s: did not wait for relocations!\n", eb->engine->name);
- err = -EINVAL;
- goto put_rq;
- }
-
- for (i = 0; i < ARRAY_SIZE(offsets); i++) {
- u64 reloc = read_reloc(map, offsets[i], mask);
-
- if (reloc != i) {
- pr_err("%s[%d]: map[%d] %llx != %x\n",
- eb->engine->name, i, offsets[i], reloc, i);
- err = -EINVAL;
- }
- }
- if (err)
- igt_hexdump(map, 4096);
-
-put_rq:
- i915_request_put(rq);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-
-reloc_err:
- if (!err)
- err = -EIO;
- goto unpin_vma;
-}
-
-static int igt_gpu_reloc(void *arg)
-{
- struct i915_execbuffer eb;
- struct drm_i915_gem_object *scratch;
- int err = 0;
- u32 *map;
-
- eb.i915 = arg;
-
- scratch = i915_gem_object_create_internal(eb.i915, 4096);
- if (IS_ERR(scratch))
- return PTR_ERR(scratch);
-
- map = i915_gem_object_pin_map_unlocked(scratch, I915_MAP_WC);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto err_scratch;
- }
-
- intel_gt_pm_get(&eb.i915->gt);
-
- for_each_uabi_engine(eb.engine, eb.i915) {
- if (intel_engine_requires_cmd_parser(eb.engine) ||
- intel_engine_using_cmd_parser(eb.engine))
- continue;
-
- reloc_cache_init(&eb.reloc_cache, eb.i915);
- memset(map, POISON_INUSE, 4096);
-
- intel_engine_pm_get(eb.engine);
- eb.context = intel_context_create(eb.engine);
- if (IS_ERR(eb.context)) {
- err = PTR_ERR(eb.context);
- goto err_pm;
- }
- eb.reloc_pool = NULL;
- eb.reloc_context = NULL;
-
- i915_gem_ww_ctx_init(&eb.ww, false);
-retry:
- err = intel_context_pin_ww(eb.context, &eb.ww);
- if (!err) {
- err = __igt_gpu_reloc(&eb, scratch);
-
- intel_context_unpin(eb.context);
- }
- if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&eb.ww);
- if (!err)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&eb.ww);
-
- if (eb.reloc_pool)
- intel_gt_buffer_pool_put(eb.reloc_pool);
- if (eb.reloc_context)
- intel_context_put(eb.reloc_context);
-
- intel_context_put(eb.context);
-err_pm:
- intel_engine_pm_put(eb.engine);
- if (err)
- break;
- }
-
- if (igt_flush_test(eb.i915))
- err = -EIO;
-
- intel_gt_pm_put(&eb.i915->gt);
-err_scratch:
- i915_gem_object_put(scratch);
- return err;
-}
-
-int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gpu_reloc),
- };
-
- if (intel_gt_is_wedged(&i915->gt))
- return 0;
-
- return i915_live_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index a2c34e5a1c54..6d30cdfa80f3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -903,7 +903,9 @@ static int __igt_mmap(struct drm_i915_private *i915,
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
+ mmap_read_lock(current->mm);
area = vma_lookup(current->mm, addr);
+ mmap_read_unlock(current->mm);
if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index fee070df1c97..c0a8ef368044 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,7 @@ mock_context(struct drm_i915_private *i915,
kref_init(&ctx->ref);
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
mutex_init(&ctx->mutex);
@@ -87,7 +88,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
return ERR_CAST(pc);
ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
+ proto_context_close(i915, pc);
if (IS_ERR(ctx))
return ctx;
@@ -162,7 +163,7 @@ kernel_context(struct drm_i915_private *i915,
}
ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
+ proto_context_close(i915, pc);
if (IS_ERR(ctx))
return ctx;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h
deleted file mode 100644
index f69257eaa1cc..000000000000
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef DEBUGFS_ENGINES_H
-#define DEBUGFS_ENGINES_H
-
-struct intel_gt;
-struct dentry;
-
-void debugfs_engines_register(struct intel_gt *gt, struct dentry *root);
-
-#endif /* DEBUGFS_ENGINES_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
deleted file mode 100644
index 591eb60785db..000000000000
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <linux/debugfs.h>
-
-#include "debugfs_engines.h"
-#include "debugfs_gt.h"
-#include "debugfs_gt_pm.h"
-#include "intel_sseu_debugfs.h"
-#include "uc/intel_uc_debugfs.h"
-#include "i915_drv.h"
-
-void debugfs_gt_register(struct intel_gt *gt)
-{
- struct dentry *root;
-
- if (!gt->i915->drm.primary->debugfs_root)
- return;
-
- root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
- if (IS_ERR(root))
- return;
-
- debugfs_engines_register(gt, root);
- debugfs_gt_pm_register(gt, root);
- intel_sseu_debugfs_register(gt, root);
-
- intel_uc_debugfs_register(&gt->uc, root);
-}
-
-void intel_gt_debugfs_register_files(struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count, void *data)
-{
- while (count--) {
- umode_t mode = files->fops->write ? 0644 : 0444;
-
- if (!files->eval || files->eval(data))
- debugfs_create_file(files->name,
- mode, root, data,
- files->fops);
-
- files++;
- }
-}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
deleted file mode 100644
index 4cf5f5c9da7d..000000000000
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef DEBUGFS_GT_PM_H
-#define DEBUGFS_GT_PM_H
-
-struct intel_gt;
-struct dentry;
-
-void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root);
-
-#endif /* DEBUGFS_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 1aee5e6b1b23..890191f286e3 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -429,7 +429,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
mutex_init(&ppgtt->flush);
mutex_init(&ppgtt->pin_mutex);
- ppgtt_init(&ppgtt->base, gt);
+ ppgtt_init(&ppgtt->base, gt, 0);
ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
ppgtt->base.vm.top = 1;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 6e0e52eeb87a..037a9a6e4889 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -548,6 +548,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+ clflush_cache_range(vaddr, PAGE_SIZE);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -568,6 +569,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
+ clflush_cache_range(vaddr, PAGE_SIZE);
}
}
@@ -751,7 +753,8 @@ err_pd:
* space.
*
*/
-struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
{
struct i915_ppgtt *ppgtt;
int err;
@@ -760,7 +763,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ppgtt_init(ppgtt, gt);
+ ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
index b9028c2ad3c7..f541d19264b4 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -12,7 +12,9 @@ struct i915_address_space;
struct intel_gt;
enum i915_cache_level;
-struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
+
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 17ca4dc4d0cb..5634d14052bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -240,6 +240,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
if (err)
goto err_post_unpin;
+ intel_engine_pm_might_get(ce->engine);
+
if (unlikely(intel_context_is_closed(ce))) {
err = -ENOENT;
goto err_unlock;
@@ -395,19 +397,22 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
spin_lock_init(&ce->guc_state.lock);
INIT_LIST_HEAD(&ce->guc_state.fences);
+ INIT_LIST_HEAD(&ce->guc_state.requests);
+
+ ce->guc_id.id = GUC_INVALID_LRC_ID;
+ INIT_LIST_HEAD(&ce->guc_id.link);
- spin_lock_init(&ce->guc_active.lock);
- INIT_LIST_HEAD(&ce->guc_active.requests);
+ INIT_LIST_HEAD(&ce->destroyed_link);
- ce->guc_id = GUC_INVALID_LRC_ID;
- INIT_LIST_HEAD(&ce->guc_id_link);
+ INIT_LIST_HEAD(&ce->parallel.child_list);
/*
* Initialize fence to be complete as this is expected to be complete
* unless there is a pending schedule disable outstanding.
*/
- i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify);
- i915_sw_fence_commit(&ce->guc_blocked);
+ i915_sw_fence_init(&ce->guc_state.blocked,
+ sw_fence_dummy_notify);
+ i915_sw_fence_commit(&ce->guc_state.blocked);
i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire, 0);
@@ -415,13 +420,20 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
void intel_context_fini(struct intel_context *ce)
{
+ struct intel_context *child, *next;
+
if (ce->timeline)
intel_timeline_put(ce->timeline);
i915_vm_put(ce->vm);
+ /* Need to put the creation ref for the children */
+ if (intel_context_is_parent(ce))
+ for_each_child_safe(ce, child, next)
+ intel_context_put(child);
+
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
- i915_sw_fence_fini(&ce->guc_blocked);
+ i915_sw_fence_fini(&ce->guc_state.blocked);
}
void i915_context_module_exit(void)
@@ -517,24 +529,53 @@ retry:
struct i915_request *intel_context_find_active_request(struct intel_context *ce)
{
+ struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
unsigned long flags;
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
- spin_lock_irqsave(&ce->guc_active.lock, flags);
- list_for_each_entry_reverse(rq, &ce->guc_active.requests,
+ /*
+ * We search the parent list to find an active request on the submitted
+ * context. The parent list contains the requests for all the contexts
+ * in the relationship so we have to do a compare of each request's
+ * context.
+ */
+ spin_lock_irqsave(&parent->guc_state.lock, flags);
+ list_for_each_entry_reverse(rq, &parent->guc_state.requests,
sched.link) {
+ if (rq->context != ce)
+ continue;
if (i915_request_completed(rq))
break;
active = rq;
}
- spin_unlock_irqrestore(&ce->guc_active.lock, flags);
+ spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
}
+void intel_context_bind_parent_child(struct intel_context *parent,
+ struct intel_context *child)
+{
+ /*
+ * Callers responsibility to validate that this function is used
+ * correctly but we use GEM_BUG_ON here ensure that they do.
+ */
+ GEM_BUG_ON(!intel_engine_uses_guc(parent->engine));
+ GEM_BUG_ON(intel_context_is_pinned(parent));
+ GEM_BUG_ON(intel_context_is_child(parent));
+ GEM_BUG_ON(intel_context_is_pinned(child));
+ GEM_BUG_ON(intel_context_is_child(child));
+ GEM_BUG_ON(intel_context_is_parent(child));
+
+ parent->parallel.child_index = parent->parallel.number_children++;
+ list_add_tail(&child->parallel.child_link,
+ &parent->parallel.child_list);
+ child->parallel.parent = parent;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index c41098950746..246c37d72cd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -44,6 +44,54 @@ void intel_context_free(struct intel_context *ce);
int intel_context_reconfigure_sseu(struct intel_context *ce,
const struct intel_sseu sseu);
+#define PARENT_SCRATCH_SIZE PAGE_SIZE
+
+static inline bool intel_context_is_child(struct intel_context *ce)
+{
+ return !!ce->parallel.parent;
+}
+
+static inline bool intel_context_is_parent(struct intel_context *ce)
+{
+ return !!ce->parallel.number_children;
+}
+
+static inline bool intel_context_is_pinned(struct intel_context *ce);
+
+static inline struct intel_context *
+intel_context_to_parent(struct intel_context *ce)
+{
+ if (intel_context_is_child(ce)) {
+ /*
+ * The parent holds ref count to the child so it is always safe
+ * for the parent to access the child, but the child has a
+ * pointer to the parent without a ref. To ensure this is safe
+ * the child should only access the parent pointer while the
+ * parent is pinned.
+ */
+ GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
+
+ return ce->parallel.parent;
+ } else {
+ return ce;
+ }
+}
+
+static inline bool intel_context_is_parallel(struct intel_context *ce)
+{
+ return intel_context_is_child(ce) || intel_context_is_parent(ce);
+}
+
+void intel_context_bind_parent_child(struct intel_context *parent,
+ struct intel_context *child);
+
+#define for_each_child(parent, ce)\
+ list_for_each_entry(ce, &(parent)->parallel.child_list,\
+ parallel.child_link)
+#define for_each_child_safe(parent, ce, cn)\
+ list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
+ parallel.child_link)
+
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
@@ -193,7 +241,13 @@ intel_context_timeline_lock(struct intel_context *ce)
struct intel_timeline *tl = ce->timeline;
int err;
- err = mutex_lock_interruptible(&tl->mutex);
+ if (intel_context_is_parent(ce))
+ err = mutex_lock_interruptible_nested(&tl->mutex, 0);
+ else if (intel_context_is_child(ce))
+ err = mutex_lock_interruptible_nested(&tl->mutex,
+ ce->parallel.child_index + 1);
+ else
+ err = mutex_lock_interruptible(&tl->mutex);
if (err)
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index e54351a170e2..9e0177dc5484 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -55,9 +55,13 @@ struct intel_context_ops {
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
- /* virtual engine/context interface */
+ /* virtual/parallel engine/context interface */
struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
- unsigned int count);
+ unsigned int count,
+ unsigned long flags);
+ struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width);
struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
unsigned int sibling);
};
@@ -112,6 +116,8 @@ struct intel_context {
#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
#define CONTEXT_NOPREEMPT 8
#define CONTEXT_LRCA_DIRTY 9
+#define CONTEXT_GUC_INIT 10
+#define CONTEXT_PERMA_PIN 11
struct {
u64 timeout_us;
@@ -152,52 +158,141 @@ struct intel_context {
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
+ /**
+ * pinned_contexts_link: List link for the engine's pinned contexts.
+ * This is only used if this is a perma-pinned kernel context and
+ * the list is assumed to only be manipulated during driver load
+ * or unload time so no mutex protection currently.
+ */
+ struct list_head pinned_contexts_link;
+
u8 wa_bb_page; /* if set, page num reserved for context workarounds */
struct {
- /** lock: protects everything in guc_state */
+ /** @lock: protects everything in guc_state */
spinlock_t lock;
/**
- * sched_state: scheduling state of this context using GuC
+ * @sched_state: scheduling state of this context using GuC
* submission
*/
- u16 sched_state;
+ u32 sched_state;
/*
- * fences: maintains of list of requests that have a submit
- * fence related to GuC submission
+ * @fences: maintains a list of requests that are currently
+ * being fenced until a GuC operation completes
*/
struct list_head fences;
+ /**
+ * @blocked: fence used to signal when the blocking of a
+ * context's submissions is complete.
+ */
+ struct i915_sw_fence blocked;
+ /** @number_committed_requests: number of committed requests */
+ int number_committed_requests;
+ /** @requests: list of active requests on this context */
+ struct list_head requests;
+ /** @prio: the context's current guc priority */
+ u8 prio;
+ /**
+ * @prio_count: a counter of the number requests in flight in
+ * each priority bucket
+ */
+ u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_state;
struct {
- /** lock: protects everything in guc_active */
- spinlock_t lock;
- /** requests: active requests on this context */
- struct list_head requests;
- } guc_active;
-
- /* GuC scheduling state flags that do not require a lock. */
- atomic_t guc_sched_state_no_lock;
-
- /* GuC LRC descriptor ID */
- u16 guc_id;
+ /**
+ * @id: handle which is used to uniquely identify this context
+ * with the GuC, protected by guc->submission_state.lock
+ */
+ u16 id;
+ /**
+ * @ref: the number of references to the guc_id, when
+ * transitioning in and out of zero protected by
+ * guc->submission_state.lock
+ */
+ atomic_t ref;
+ /**
+ * @link: in guc->guc_id_list when the guc_id has no refs but is
+ * still valid, protected by guc->submission_state.lock
+ */
+ struct list_head link;
+ } guc_id;
- /* GuC LRC descriptor reference count */
- atomic_t guc_id_ref;
+ /**
+ * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
+ * list when context is pending to be destroyed (deregistered with the
+ * GuC), protected by guc->submission_state.lock
+ */
+ struct list_head destroyed_link;
- /*
- * GuC ID link - in list when unpinned but guc_id still valid in GuC
+ /** @parallel: sub-structure for parallel submission members */
+ struct {
+ union {
+ /**
+ * @child_list: parent's list of children
+ * contexts, no protection as immutable after context
+ * creation
+ */
+ struct list_head child_list;
+ /**
+ * @child_link: child's link into parent's list of
+ * children
+ */
+ struct list_head child_link;
+ };
+ /** @parent: pointer to parent if child */
+ struct intel_context *parent;
+ /**
+ * @last_rq: last request submitted on a parallel context, used
+ * to insert submit fences between requests in the parallel
+ * context
+ */
+ struct i915_request *last_rq;
+ /**
+ * @fence_context: fence context composite fence when doing
+ * parallel submission
+ */
+ u64 fence_context;
+ /**
+ * @seqno: seqno for composite fence when doing parallel
+ * submission
+ */
+ u32 seqno;
+ /** @number_children: number of children if parent */
+ u8 number_children;
+ /** @child_index: index into child_list if child */
+ u8 child_index;
+ /** @guc: GuC specific members for parallel submission */
+ struct {
+ /** @wqi_head: head pointer in work queue */
+ u16 wqi_head;
+ /** @wqi_tail: tail pointer in work queue */
+ u16 wqi_tail;
+ /**
+ * @parent_page: page in context state (ce->state) used
+ * by parent for work queue, process descriptor
+ */
+ u8 parent_page;
+ } guc;
+ } parallel;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ /**
+ * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
*/
- struct list_head guc_id_link;
+ bool drop_schedule_enable;
- /* GuC context blocked fence */
- struct i915_sw_fence guc_blocked;
+ /**
+ * @drop_schedule_disable: Force drop of schedule disable G2H for
+ * selftest
+ */
+ bool drop_schedule_disable;
- /*
- * GuC priority management
+ /**
+ * @drop_deregister: Force drop of deregister G2H for selftest
*/
- u8 guc_prio;
- u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
+ bool drop_deregister;
+#endif
};
#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 87579affb952..08559ace0ada 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -2,6 +2,7 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+#include <asm/cacheflush.h>
#include <drm/drm_util.h>
#include <linux/hashtable.h>
@@ -175,6 +176,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
+#define I915_GEM_HWS_PXP 0x60
+#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
#define I915_HWS_CSB_BUF0_INDEX 0x10
@@ -273,15 +276,25 @@ static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return false;
return intel_engine_has_preemption(engine);
}
+#define FORCE_VIRTUAL BIT(0)
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count);
+ unsigned int count, unsigned long flags);
+
+static inline struct intel_context *
+intel_engine_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_engines,
+ unsigned int width)
+{
+ GEM_BUG_ON(!engines[0]->cops->create_parallel);
+ return engines[0]->cops->create_parallel(engines, num_engines, width);
+}
static inline bool
intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
@@ -300,7 +313,7 @@ intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
static inline bool
intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return false;
if (intel_engine_is_virtual(engine))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0d9105a31d84..ff6753ccb129 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -290,7 +290,8 @@ static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
GEM_DEBUG_WARN_ON(iir);
}
-static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ u8 logical_instance)
{
const struct engine_info *info = &intel_engines[id];
struct drm_i915_private *i915 = gt->i915;
@@ -320,6 +321,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+ INIT_LIST_HEAD(&engine->pinned_contexts_list);
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
@@ -334,6 +336,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->class = info->class;
engine->instance = info->instance;
+ engine->logical_mask = BIT(logical_instance);
__sprint_engine_name(engine);
engine->props.heartbeat_interval_ms =
@@ -398,7 +401,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
- if (GRAPHICS_VER(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9 &&
+ engine->gt->info.sfc_mask & BIT(engine->instance))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
}
@@ -474,18 +478,25 @@ void intel_engines_free(struct intel_gt *gt)
}
static
-bool gen11_vdbox_has_sfc(struct drm_i915_private *i915,
+bool gen11_vdbox_has_sfc(struct intel_gt *gt,
unsigned int physical_vdbox,
unsigned int logical_vdbox, u16 vdbox_mask)
{
+ struct drm_i915_private *i915 = gt->i915;
+
/*
* In Gen11, only even numbered logical VDBOXes are hooked
* up to an SFC (Scaler & Format Converter) unit.
* In Gen12, Even numbered physical instance always are connected
* to an SFC. Odd numbered physical instances have SFC only if
* previous even instance is fused off.
+ *
+ * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
+ * in the fuse register that tells us whether a specific SFC is present.
*/
- if (GRAPHICS_VER(i915) == 12)
+ if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
+ return false;
+ else if (GRAPHICS_VER(i915) == 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
else if (GRAPHICS_VER(i915) == 11)
@@ -512,7 +523,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
unsigned int logical_vdbox = 0;
unsigned int i;
- u32 media_fuse;
+ u32 media_fuse, fuse1;
u16 vdbox_mask;
u16 vebox_mask;
@@ -534,6 +545,13 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
+ gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
+ } else {
+ gt->info.sfc_mask = ~0;
+ }
+
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(gt, _VCS(i))) {
vdbox_mask &= ~BIT(i);
@@ -546,7 +564,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
continue;
}
- if (gen11_vdbox_has_sfc(i915, i, logical_vdbox, vdbox_mask))
+ if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
gt->info.vdbox_sfc_access |= BIT(i);
logical_vdbox++;
}
@@ -572,6 +590,37 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
return info->engine_mask;
}
+static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
+ u8 class, const u8 *map, u8 num_instances)
+{
+ int i, j;
+ u8 current_logical_id = 0;
+
+ for (j = 0; j < num_instances; ++j) {
+ for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
+ if (!HAS_ENGINE(gt, i) ||
+ intel_engines[i].class != class)
+ continue;
+
+ if (intel_engines[i].instance == map[j]) {
+ logical_ids[intel_engines[i].instance] =
+ current_logical_id++;
+ break;
+ }
+ }
+ }
+}
+
+static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
+{
+ int i;
+ u8 map[MAX_ENGINE_INSTANCE + 1];
+
+ for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
+ map[i] = i;
+ populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map));
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
@@ -583,7 +632,8 @@ int intel_engines_init_mmio(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int engine_mask = init_engine_mask(gt);
unsigned int mask = 0;
- unsigned int i;
+ unsigned int i, class;
+ u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
int err;
drm_WARN_ON(&i915->drm, engine_mask == 0);
@@ -593,15 +643,23 @@ int intel_engines_init_mmio(struct intel_gt *gt)
if (i915_inject_probe_failure(i915))
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
- if (!HAS_ENGINE(gt, i))
- continue;
+ for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
+ setup_logical_ids(gt, logical_ids, class);
- err = intel_engine_setup(gt, i);
- if (err)
- goto cleanup;
+ for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
+ u8 instance = intel_engines[i].instance;
- mask |= BIT(i);
+ if (intel_engines[i].class != class ||
+ !HAS_ENGINE(gt, i))
+ continue;
+
+ err = intel_engine_setup(gt, i,
+ logical_ids[instance]);
+ if (err)
+ goto cleanup;
+
+ mask |= BIT(i);
+ }
}
/*
@@ -875,6 +933,8 @@ intel_engine_create_pinned_context(struct intel_engine_cs *engine,
return ERR_PTR(err);
}
+ list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
+
/*
* Give our perma-pinned kernel timelines a separate lockdep class,
* so that we can use them from within the normal user timelines
@@ -897,6 +957,7 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
list_del(&ce->timeline->engine_link);
mutex_unlock(&hwsp->vm->mutex);
+ list_del(&ce->pinned_contexts_link);
intel_context_unpin(ce);
intel_context_put(ce);
}
@@ -1163,16 +1224,16 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
+ int iter;
memset(instdone, 0, sizeof(*instdone));
- switch (GRAPHICS_VER(i915)) {
- default:
+ if (GRAPHICS_VER(i915) >= 8) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
- break;
+ return;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
@@ -1182,21 +1243,39 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
instdone->slice_common_extra[1] =
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
}
- for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
- instdone->sampler[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_SAMPLER_INSTDONE);
- instdone->row[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_ROW_INSTDONE);
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
+ } else {
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
}
- break;
- case 7:
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice)
+ instdone->geom_svg[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ XEHPG_INSTDONE_GEOM_SVG);
+ }
+ } else if (GRAPHICS_VER(i915) >= 7) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
- break;
+ return;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
@@ -1204,22 +1283,15 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] =
intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
-
- break;
- case 6:
- case 5:
- case 4:
+ } else if (GRAPHICS_VER(i915) >= 4) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
instdone->slice_common =
intel_uncore_read(uncore, GEN4_INSTDONE1);
- break;
- case 3:
- case 2:
+ } else {
instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
- break;
}
}
@@ -1881,16 +1953,16 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count)
+ unsigned int count, unsigned long flags)
{
if (count == 0)
return ERR_PTR(-EINVAL);
- if (count == 1)
+ if (count == 1 && !(flags & FORCE_VIRTUAL))
return intel_context_create(siblings[0]);
GEM_BUG_ON(!siblings[0]->cops->create_virtual);
- return siblings[0]->cops->create_virtual(siblings, count);
+ return siblings[0]->cops->create_virtual(siblings, count, flags);
}
struct i915_request *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 74775ae961b2..a3698f611f45 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -207,7 +207,7 @@ out:
void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return;
next_heartbeat(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 1f07ac4e0672..a1334b48dde7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -162,6 +162,19 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
unsigned long flags;
bool result = true;
+ /*
+ * This is execlist specific behaviour intended to ensure the GPU is
+ * idle by switching to a known 'safe' context. With GuC submission, the
+ * same idle guarantee is achieved by other means (disabling
+ * scheduling). Further, switching to a 'safe' context has no effect
+ * with GuC submission as the scheduler can just switch back again.
+ *
+ * FIXME: Move this backend scheduler specific behaviour into the
+ * scheduler backend.
+ */
+ if (intel_engine_uses_guc(engine))
+ return true;
+
/* GPU is pointing to the void, as good as in the kernel context. */
if (intel_gt_is_wedged(engine->gt))
return true;
@@ -298,6 +311,29 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
intel_engine_init_heartbeat(engine);
}
+/**
+ * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
+ * an engine.
+ * @engine: The engine whose pinned contexts we want to reset.
+ *
+ * Typically the pinned context LMEM images lose or get their content
+ * corrupted on suspend. This function resets their images.
+ */
+void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ list_for_each_entry(ce, &engine->pinned_contexts_list,
+ pinned_contexts_link) {
+ /* kernel context gets reset at __engine_unpark() */
+ if (ce == engine->kernel_context)
+ continue;
+
+ dbg_poison_ce(ce);
+ ce->ops->reset(ce);
+ }
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_engine_pm.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 70ea46d6cfb0..d68675925b79 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -6,9 +6,11 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "i915_drv.h"
#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
+#include "intel_gt_pm.h"
static inline bool
intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
@@ -16,6 +18,11 @@ intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
return intel_wakeref_is_active(&engine->wakeref);
}
+static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ __intel_wakeref_get(&engine->wakeref);
+}
+
static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
{
intel_wakeref_get(&engine->wakeref);
@@ -26,6 +33,21 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
return intel_wakeref_get_if_active(&engine->wakeref);
}
+static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_is_virtual(engine)) {
+ intel_wakeref_might_get(&engine->wakeref);
+ } else {
+ struct intel_gt *gt = engine->gt;
+ struct intel_engine_cs *tengine;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(tengine, gt, mask, tmp)
+ intel_wakeref_might_get(&tengine->wakeref);
+ }
+ intel_gt_pm_might_get(engine->gt);
+}
+
static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
{
intel_wakeref_put(&engine->wakeref);
@@ -47,6 +69,21 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
intel_wakeref_unlock_wait(&engine->wakeref);
}
+static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_is_virtual(engine)) {
+ intel_wakeref_might_put(&engine->wakeref);
+ } else {
+ struct intel_gt *gt = engine->gt;
+ struct intel_engine_cs *tengine;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(tengine, gt, mask, tmp)
+ intel_wakeref_might_put(&tengine->wakeref);
+ }
+ intel_gt_pm_might_put(engine->gt);
+}
+
static inline struct i915_request *
intel_engine_create_kernel_request(struct intel_engine_cs *engine)
{
@@ -69,4 +106,6 @@ intel_engine_create_kernel_request(struct intel_engine_cs *engine)
void intel_engine_init__pm(struct intel_engine_cs *engine);
+void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
+
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ed91bcff20eb..e0f773585c29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -67,8 +67,11 @@ struct intel_instdone {
/* The following exist only in the RCS engine */
u32 slice_common;
u32 slice_common_extra[2];
- u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
- u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+ u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+
+ /* Added in XeHPG */
+ u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
};
/*
@@ -266,6 +269,13 @@ struct intel_engine_cs {
unsigned int guc_id;
intel_engine_mask_t mask;
+ /**
+ * @logical_mask: logical mask of engine, reported to user space via
+ * query IOCTL and used to communicate with the GuC in logical space.
+ * The logical instance of a physical engine can change based on product
+ * and fusing.
+ */
+ intel_engine_mask_t logical_mask;
u8 class;
u8 instance;
@@ -304,6 +314,13 @@ struct intel_engine_cs {
struct intel_context *kernel_context; /* pinned */
+ /**
+ * pinned_contexts_list: List of pinned contexts. This list is only
+ * assumed to be manipulated during driver load- or unload time and
+ * does therefore not have any additional protection.
+ */
+ struct list_head pinned_contexts_list;
+
intel_engine_mask_t saturated; /* submitting semaphores too late? */
struct {
@@ -546,7 +563,7 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
static inline bool
intel_engine_has_timeslices(const struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return false;
return engine->flags & I915_ENGINE_HAS_TIMESLICES;
@@ -578,4 +595,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
(instdone_has_subslice(dev_priv_, sseu_, slice_, \
subslice_)))
+
+#define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \
+ for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \
+ (iter_) < GEN_MAX_SUBSLICES; \
+ (iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \
+ (dss_) = (iter_) % GEN_DSS_PER_GSLICE) \
+ for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_)))
+
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index de5f9c86b9a4..bedb80057046 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -201,7 +201,8 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
}
static struct intel_context *
-execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags);
static struct i915_request *
__active_request(const struct intel_timeline * const tl,
@@ -2140,10 +2141,6 @@ static void __execlists_unhold(struct i915_request *rq)
if (p->flags & I915_DEPENDENCY_WEAK)
continue;
- /* Propagate any change in error status */
- if (rq->fence.error)
- i915_request_set_error_once(w, rq->fence.error);
-
if (w->engine != rq->engine)
continue;
@@ -2565,7 +2562,7 @@ __execlists_context_pre_pin(struct intel_context *ce,
if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
lrc_init_state(ce, engine, *vaddr);
- __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
+ __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
}
return 0;
@@ -2791,6 +2788,8 @@ static void execlists_sanitize(struct intel_engine_cs *engine)
/* And scrub the dirty cachelines for the HWSP */
clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
}
static void enable_error_interrupt(struct intel_engine_cs *engine)
@@ -3341,7 +3340,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
if (can_preempt(engine)) {
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (CONFIG_DRM_I915_TIMESLICE_DURATION)
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
}
}
@@ -3786,7 +3785,8 @@ unlock:
}
static struct intel_context *
-execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags)
{
struct virtual_engine *ve;
unsigned int n;
@@ -3879,6 +3879,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
ve->siblings[ve->num_siblings++] = sibling;
ve->base.mask |= sibling->mask;
+ ve->base.logical_mask |= sibling->logical_mask;
/*
* All physical engines must be compatible for their emission
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index de3ac58fceec..f17383e76eb7 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -644,7 +644,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
struct i915_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(ggtt->vm.gt);
+ ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -727,7 +727,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, 0);
- rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
flush_workqueue(ggtt->vm.i915->wq);
mutex_lock(&ggtt->vm.mutex);
@@ -814,6 +813,21 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
return 0;
}
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -822,8 +836,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
u32 pte_flags;
int ret;
- /* For Modern GENs the PTEs and register space are split in the BAR */
- phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+ GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
@@ -910,6 +924,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.cleanup = gen6_gmch_remove;
@@ -1373,13 +1388,28 @@ err_st_alloc:
}
static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+remap_pages(struct drm_i915_gem_object *obj,
+ unsigned int offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int row;
+ if (alignment_pad) {
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a convenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, alignment_pad * 4096, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = alignment_pad * 4096;
+ sg = sg_next(sg);
+ }
+
for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE;
@@ -1439,6 +1469,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
+ unsigned int gtt_offset = 0;
int ret = -ENOMEM;
int i;
@@ -1455,10 +1486,19 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- sg = remap_pages(obj, rem_info->plane[i].offset,
+ unsigned int alignment_pad = 0;
+
+ if (rem_info->plane_alignment)
+ alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
+
+ sg = remap_pages(obj,
+ rem_info->plane[i].offset, alignment_pad,
rem_info->plane[i].width, rem_info->plane[i].height,
rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
st, sg);
+
+ gtt_offset += alignment_pad +
+ rem_info->plane[i].dst_stride * rem_info->plane[i].height;
}
i915_sg_trim(st);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 1c3af0fc0456..f8253012d166 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -28,10 +28,13 @@
#define INSTR_26_TO_24_MASK 0x7000000
#define INSTR_26_TO_24_SHIFT 24
+#define __INSTR(client) ((client) << INSTR_CLIENT_SHIFT)
+
/*
* Memory interface instructions used by the kernel
*/
-#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+#define MI_INSTR(opcode, flags) \
+ (__INSTR(INSTR_MI_CLIENT) | (opcode) << 23 | (flags))
/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
#define MI_GLOBAL_GTT (1<<22)
@@ -57,6 +60,7 @@
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_SET_APPID MI_INSTR(0x0e, 0)
+#define MI_SET_APPID_SESSION_ID(x) ((x) << 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -146,6 +150,7 @@
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22)
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
@@ -273,6 +278,19 @@
#define MI_MATH_REG_CF 0x33
/*
+ * Media instructions used by the kernel
+ */
+#define MEDIA_INSTR(pipe, op, sub_op, flags) \
+ (__INSTR(INSTR_RC_CLIENT) | (pipe) << INSTR_SUBCLIENT_SHIFT | \
+ (op) << INSTR_26_TO_24_SHIFT | (sub_op) << 16 | (flags))
+
+#define MFX_WAIT MEDIA_INSTR(1, 0, 0, 0)
+#define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8)
+#define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9)
+
+#define CRYPTO_KEY_EXCHANGE MEDIA_INSTR(2, 6, 9, 0)
+
+/*
* Commands used only by the command parser
*/
#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
@@ -328,8 +346,6 @@
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
-#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
-
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 62d40c986642..1cb1948ac959 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,7 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
-#include "debugfs_gt.h"
+#include "intel_gt_debugfs.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
@@ -15,12 +15,13 @@
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
+#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
#include "intel_uncore.h"
-#include "intel_pm.h"
#include "shmem_utils.h"
+#include "pxp/intel_pxp.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -434,7 +435,7 @@ void intel_gt_driver_register(struct intel_gt *gt)
{
intel_rps_driver_register(&gt->rps);
- debugfs_gt_register(gt);
+ intel_gt_debugfs_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -481,7 +482,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
{
if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
- return &i915_ppgtt_create(gt)->vm;
+ return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
else
return i915_vm_get(&gt->ggtt->vm);
}
@@ -660,6 +661,8 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
return err;
+ intel_gt_init_workarounds(gt);
+
/*
* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -682,6 +685,8 @@ int intel_gt_init(struct intel_gt *gt)
goto err_pm;
}
+ intel_set_mocs_index(gt);
+
err = intel_engines_init(gt);
if (err)
goto err_engines;
@@ -710,6 +715,8 @@ int intel_gt_init(struct intel_gt *gt)
intel_migrate_init(&gt->migrate, gt);
+ intel_pxp_init(&gt->pxp);
+
goto out_fw;
err_gt:
__intel_gt_disable(gt);
@@ -737,6 +744,8 @@ void intel_gt_driver_remove(struct intel_gt *gt)
intel_uc_driver_remove(&gt->uc);
intel_engines_release(gt);
+
+ intel_gt_flush_buffer_pool(gt);
}
void intel_gt_driver_unregister(struct intel_gt *gt)
@@ -745,12 +754,14 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_rps_driver_unregister(&gt->rps);
+ intel_pxp_fini(&gt->pxp);
+
/*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
- intel_gt_set_wedged(gt);
+ intel_gt_set_wedged_on_fini(gt);
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
@@ -765,6 +776,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
if (vm) /* FIXME being called twice on error paths :( */
i915_vm_put(vm);
+ intel_wa_list_free(&gt->wa_list);
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index aa0a59c5b614..acc49c56a9f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -245,8 +245,6 @@ void intel_gt_fini_buffer_pool(struct intel_gt *gt)
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
- intel_gt_flush_buffer_pool(gt);
-
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
new file mode 100644
index 000000000000..f103664b71d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "i915_drv.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_engines_debugfs.h"
+#include "intel_gt_pm_debugfs.h"
+#include "intel_sseu_debugfs.h"
+#include "pxp/intel_pxp_debugfs.h"
+#include "uc/intel_uc_debugfs.h"
+
+int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
+{
+ int ret = intel_gt_terminally_wedged(gt);
+
+ switch (ret) {
+ case -EIO:
+ *val = 1;
+ return 0;
+ case 0:
+ *val = 0;
+ return 0;
+ default:
+ return ret;
+ }
+}
+
+int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
+{
+ /* Flush any previous reset before applying for a new one */
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE,
+ "Manually reset engine mask to %llx", val);
+ return 0;
+}
+
+/*
+ * keep the interface clean where the first parameter
+ * is a 'struct intel_gt *' instead of 'void *'
+ */
+static int __intel_gt_debugfs_reset_show(void *data, u64 *val)
+{
+ return intel_gt_debugfs_reset_show(data, val);
+}
+
+static int __intel_gt_debugfs_reset_store(void *data, u64 val)
+{
+ return intel_gt_debugfs_reset_store(data, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show,
+ __intel_gt_debugfs_reset_store, "%llu\n");
+
+static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "reset", &reset_fops, NULL },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
+
+void intel_gt_debugfs_register(struct intel_gt *gt)
+{
+ struct dentry *root;
+
+ if (!gt->i915->drm.primary->debugfs_root)
+ return;
+
+ root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ gt_debugfs_register(gt, root);
+
+ intel_gt_engines_debugfs_register(gt, root);
+ intel_gt_pm_debugfs_register(gt, root);
+ intel_sseu_debugfs_register(gt, root);
+
+ intel_uc_debugfs_register(&gt->uc, root);
+ intel_pxp_debugfs_register(&gt->pxp, root);
+}
+
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct intel_gt_debugfs_file *files,
+ unsigned long count, void *data)
+{
+ while (count--) {
+ umode_t mode = files->fops->write ? 0644 : 0444;
+
+ if (!files->eval || files->eval(data))
+ debugfs_create_file(files->name,
+ mode, root, data,
+ files->fops);
+
+ files++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
index f77540f727e9..e307ceb99031 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -3,14 +3,14 @@
* Copyright © 2019 Intel Corporation
*/
-#ifndef DEBUGFS_GT_H
-#define DEBUGFS_GT_H
+#ifndef INTEL_GT_DEBUGFS_H
+#define INTEL_GT_DEBUGFS_H
#include <linux/file.h>
struct intel_gt;
-#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __name ## _show, inode->i_private); \
@@ -23,16 +23,20 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
-void debugfs_gt_register(struct intel_gt *gt);
+void intel_gt_debugfs_register(struct intel_gt *gt);
-struct debugfs_gt_file {
+struct intel_gt_debugfs_file {
const char *name;
const struct file_operations *fops;
bool (*eval)(void *data);
};
void intel_gt_debugfs_register_files(struct dentry *root,
- const struct debugfs_gt_file *files,
+ const struct intel_gt_debugfs_file *files,
unsigned long count, void *data);
-#endif /* DEBUGFS_GT_H */
+/* functions that need to be accessed by the upper level non-gt interfaces */
+int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val);
+int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
+
+#endif /* INTEL_GT_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
index 5e3725e62241..8f9b874fdc9c 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
@@ -6,10 +6,10 @@
#include <drm/drm_print.h>
-#include "debugfs_engines.h"
-#include "debugfs_gt.h"
#include "i915_drv.h" /* for_each_engine! */
#include "intel_engine.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_engines_debugfs.h"
static int engines_show(struct seq_file *m, void *data)
{
@@ -24,11 +24,11 @@ static int engines_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(engines);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(engines);
-void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
+void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "engines", &engines_fops },
};
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h
new file mode 100644
index 000000000000..dda113452da9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_ENGINES_DEBUGFS_H
+#define INTEL_GT_ENGINES_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+
+void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_GT_ENGINES_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index b2de83be4d97..699a74582d32 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -13,6 +13,7 @@
#include "intel_lrc_reg.h"
#include "intel_uncore.h"
#include "intel_rps.h"
+#include "pxp/intel_pxp_irq.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
@@ -64,6 +65,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
+ if (instance == OTHER_KCR_INSTANCE)
+ return intel_pxp_irq_handler(&gt->pxp, iir);
+
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
@@ -196,6 +200,9 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~0);
}
void gen11_gt_irq_postinstall(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index dea8e2479897..524eaf678790 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -18,6 +18,9 @@
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
+#include "pxp/intel_pxp_pm.h"
+
+#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
@@ -262,6 +265,8 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uc_resume(&gt->uc);
+ intel_pxp_resume(&gt->pxp);
+
user_forcewake(gt, false);
out_fw:
@@ -279,7 +284,7 @@ static void wait_for_suspend(struct intel_gt *gt)
if (!intel_gt_pm_is_awake(gt))
return;
- if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
@@ -296,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
user_forcewake(gt, true);
wait_for_suspend(gt);
- intel_uc_suspend(&gt->uc);
+ intel_pxp_suspend(&gt->pxp, false);
}
static suspend_state_t pm_suspend_target(void)
@@ -320,6 +325,8 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
+ intel_uc_suspend(&gt->uc);
+
/*
* On disabling the device, we want to turn off HW access to memory
* that we no longer own.
@@ -346,6 +353,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
+ intel_pxp_suspend(&gt->pxp, true);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -353,11 +361,19 @@ void intel_gt_runtime_suspend(struct intel_gt *gt)
int intel_gt_runtime_resume(struct intel_gt *gt)
{
+ int ret;
+
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
intel_ggtt_restore_fences(gt->ggtt);
- return intel_uc_runtime_resume(&gt->uc);
+ ret = intel_uc_runtime_resume(&gt->uc);
+ if (ret)
+ return ret;
+
+ intel_pxp_resume(&gt->pxp);
+
+ return 0;
}
static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index d0588d8aaa44..bc898df7a48c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -31,6 +31,11 @@ static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
return intel_wakeref_get_if_active(&gt->wakeref);
}
+static inline void intel_gt_pm_might_get(struct intel_gt *gt)
+{
+ intel_wakeref_might_get(&gt->wakeref);
+}
+
static inline void intel_gt_pm_put(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
@@ -41,6 +46,15 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt)
intel_wakeref_put_async(&gt->wakeref);
}
+static inline void intel_gt_pm_might_put(struct intel_gt *gt)
+{
+ intel_wakeref_might_put(&gt->wakeref);
+}
+
+#define with_intel_gt_pm(gt, tmp) \
+ for (tmp = 1, intel_gt_pm_get(gt); tmp; \
+ intel_gt_pm_put(gt), tmp = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index d6f5836396f8..404dfa7673c6 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -6,18 +6,59 @@
#include <linux/seq_file.h>
-#include "debugfs_gt.h"
-#include "debugfs_gt_pm.h"
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
#include "intel_gt_pm.h"
+#include "intel_gt_pm_debugfs.h"
#include "intel_llc.h"
+#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_runtime_pm.h"
-#include "intel_sideband.h"
#include "intel_uncore.h"
+#include "vlv_sideband.h"
+
+int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
+{
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
+
+ return 0;
+}
+
+int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
+{
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ intel_uncore_forcewake_user_put(gt->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
+
+ return 0;
+}
+
+static int forcewake_user_open(struct inode *inode, struct file *file)
+{
+ struct intel_gt *gt = inode->i_private;
+
+ return intel_gt_pm_debugfs_forcewake_user_open(gt);
+}
+
+static int forcewake_user_release(struct inode *inode, struct file *file)
+{
+ struct intel_gt *gt = inode->i_private;
+
+ return intel_gt_pm_debugfs_forcewake_user_release(gt);
+}
+
+static const struct file_operations forcewake_user_fops = {
+ .owner = THIS_MODULE,
+ .open = forcewake_user_open,
+ .release = forcewake_user_release,
+};
static int fw_domains_show(struct seq_file *m, void *data)
{
@@ -36,7 +77,7 @@ static int fw_domains_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
static void print_rc6_res(struct seq_file *m,
const char *title,
@@ -238,11 +279,10 @@ static int drpc_show(struct seq_file *m, void *unused)
return err;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
-static int frequency_show(struct seq_file *m, void *unused)
+void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
{
- struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_rps *rps = &gt->rps;
@@ -254,21 +294,21 @@ static int frequency_show(struct seq_file *m, void *unused)
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
+ drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
+ drm_printf(p, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
u32 rpmodectl, freq_sts;
rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
- seq_printf(m, "Video Turbo Mode: %s\n",
+ drm_printf(p, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
+ drm_printf(p, "HW control enabled: %s\n",
yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
+ drm_printf(p, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
@@ -276,25 +316,25 @@ static int frequency_show(struct seq_file *m, void *unused)
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
- seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
- seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
+ drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
- seq_printf(m, "actual GPU freq: %d MHz\n",
+ drm_printf(p, "actual GPU freq: %d MHz\n",
intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
- seq_printf(m, "current GPU freq: %d MHz\n",
+ drm_printf(p, "current GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->cur_freq));
- seq_printf(m, "max GPU freq: %d MHz\n",
+ drm_printf(p, "max GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m, "min GPU freq: %d MHz\n",
+ drm_printf(p, "min GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->min_freq));
- seq_printf(m, "idle GPU freq: %d MHz\n",
+ drm_printf(p, "idle GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->idle_freq));
- seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
} else if (GRAPHICS_VER(i915) >= 6) {
u32 rp_state_limits;
@@ -309,13 +349,11 @@ static int frequency_show(struct seq_file *m, void *unused)
int max_freq;
rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
- if (IS_GEN9_LP(i915)) {
- rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ rp_state_cap = intel_rps_read_state_cap(rps);
+ if (IS_GEN9_LP(i915))
gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
- } else {
- rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+ else
gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
- }
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
@@ -376,113 +414,121 @@ static int frequency_show(struct seq_file *m, void *unused)
}
pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
- seq_printf(m, "Video Turbo Mode: %s\n",
+ drm_printf(p, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
+ drm_printf(p, "HW control enabled: %s\n",
yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
+ drm_printf(p, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
if (GRAPHICS_VER(i915) <= 10)
- seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
- seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+ drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
- seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "Render p-state ratio: %d\n",
+ drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ drm_printf(p, "Render p-state ratio: %d\n",
(gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
- seq_printf(m, "Render p-state VID: %d\n",
+ drm_printf(p, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
- seq_printf(m, "Render p-state limit: %d\n",
+ drm_printf(p, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
- seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
- seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
- seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
- seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
- seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
- seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
+ drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat);
+ drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl);
+ drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ drm_printf(p, "RPNSWREQ: %dMHz\n", reqf);
+ drm_printf(p, "CAGF: %dMHz\n", cagf);
+ drm_printf(p, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
- seq_printf(m, "RP CUR UP: %d (%lldns)\n",
+ drm_printf(p, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%lldns)\n",
+ drm_printf(p, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n",
+ drm_printf(p, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP UP EI: %d (%lldns)\n",
+ drm_printf(p, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
- seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
+ drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
- seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
+ drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
- seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
+ drm_printf(p, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
+ drm_printf(p, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n",
+ drm_printf(p, "Down threshold: %d%%\n",
rps->power.down_threshold);
- seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
+ drm_printf(p, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
- seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
+ drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
- seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ drm_printf(p, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m, "Current freq: %d MHz\n",
+ drm_printf(p, "Current freq: %d MHz\n",
intel_gpu_freq(rps, rps->cur_freq));
- seq_printf(m, "Actual freq: %d MHz\n", cagf);
- seq_printf(m, "Idle freq: %d MHz\n",
+ drm_printf(p, "Actual freq: %d MHz\n", cagf);
+ drm_printf(p, "Idle freq: %d MHz\n",
intel_gpu_freq(rps, rps->idle_freq));
- seq_printf(m, "Min freq: %d MHz\n",
+ drm_printf(p, "Min freq: %d MHz\n",
intel_gpu_freq(rps, rps->min_freq));
- seq_printf(m, "Boost freq: %d MHz\n",
+ drm_printf(p, "Boost freq: %d MHz\n",
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Max freq: %d MHz\n",
+ drm_printf(p, "Max freq: %d MHz\n",
intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m,
+ drm_printf(p,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
} else {
- seq_puts(m, "no P-state info available\n");
+ drm_puts(p, "no P-state info available\n");
}
- seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
- seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+ drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+ drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
intel_runtime_pm_put(uncore->rpm, wakeref);
+}
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_gt_pm_frequency_dump(gt, &p);
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
static int llc_show(struct seq_file *m, void *data)
{
@@ -535,7 +581,7 @@ static bool llc_eval(void *data)
return HAS_LLC(gt->i915);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
static const char *rps_power_to_str(unsigned int power)
{
@@ -614,14 +660,15 @@ static bool rps_eval(void *data)
return HAS_RPS(gt->i915);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
-void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
+void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "drpc", &drpc_fops, NULL },
{ "frequency", &frequency_fops, NULL },
{ "forcewake", &fw_domains_fops, NULL },
+ { "forcewake_user", &forcewake_user_fops, NULL},
{ "llc", &llc_fops, llc_eval },
{ "rps_boost", &rps_boost_fops, rps_eval },
};
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
new file mode 100644
index 000000000000..a8457887ec65
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_DEBUGFS_H
+#define INTEL_GT_PM_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct drm_printer;
+
+void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root);
+void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m);
+
+/* functions that need to be accessed by the upper level non-gt interfaces */
+int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
+int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
+
+#endif /* INTEL_GT_PM_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index a81e21bf1bd1..14216cc471b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -26,6 +26,7 @@
#include "intel_rps_types.h"
#include "intel_migrate_types.h"
#include "intel_wakeref.h"
+#include "pxp/intel_pxp_types.h"
struct drm_i915_private;
struct i915_ggtt;
@@ -72,6 +73,8 @@ struct intel_gt {
struct intel_uc uc;
+ struct i915_wa_list wa_list;
+
struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */
struct list_head active_list;
@@ -184,6 +187,9 @@ struct intel_gt {
u8 num_engines;
+ /* General presence of SFC units */
+ u8 sfc_mask;
+
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
@@ -192,6 +198,12 @@ struct intel_gt {
unsigned long mslice_mask;
} info;
+
+ struct {
+ u8 uc_index;
+ } mocs;
+
+ struct intel_pxp pxp;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index e137dd32b5b8..67d14afa6623 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -28,7 +28,8 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
* used the passed in size for the page size, which should ensure it
* also has the same alignment.
*/
- obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 0);
+ obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
+ vm->lmem_pt_obj_flags);
/*
* Ensure all paging structures for this vm share the same dma-resv
* object underneath, with the idea that one object_lock() will lock
@@ -155,7 +156,7 @@ void i915_vm_resv_release(struct kref *kref)
static void __i915_vm_release(struct work_struct *work)
{
struct i915_address_space *vm =
- container_of(work, struct i915_address_space, rcu.work);
+ container_of(work, struct i915_address_space, release_work);
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -171,7 +172,7 @@ void i915_vm_release(struct kref *kref)
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
- queue_rcu_work(vm->i915->wq, &vm->rcu);
+ queue_work(vm->i915->wq, &vm->release_work);
}
void i915_address_space_init(struct i915_address_space *vm, int subclass)
@@ -185,7 +186,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
if (!kref_read(&vm->resv_ref))
kref_init(&vm->resv_ref);
- INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ INIT_WORK(&vm->release_work, __i915_vm_release);
atomic_set(&vm->open, 1);
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index bc7153018ebd..bc6750263359 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -213,7 +213,7 @@ struct i915_vma_ops {
struct i915_address_space {
struct kref ref;
- struct rcu_work rcu;
+ struct work_struct release_work;
struct drm_mm mm;
struct intel_gt *gt;
@@ -260,6 +260,9 @@ struct i915_address_space {
u8 pd_shift;
u8 scratch_order;
+ /* Flags used when creating page-table objects for this vm */
+ unsigned long lmem_pt_obj_flags;
+
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
@@ -519,7 +522,8 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
}
-void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
@@ -537,7 +541,8 @@ static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
int i915_ppgtt_init_hw(struct intel_gt *gt);
-struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index eb1a15deed22..08d7d5ae263a 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -3,12 +3,13 @@
* Copyright © 2019 Intel Corporation
*/
+#include <asm/tsc.h>
#include <linux/cpufreq.h>
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_llc.h"
-#include "intel_sideband.h"
+#include "intel_pcode.h"
struct ia_constants {
unsigned int min_gpu_freq;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index bb4af4977920..56156cf18c41 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -226,6 +226,40 @@ static const u8 gen12_xcs_offsets[] = {
END
};
+static const u8 dg2_xcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END
+};
+
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
@@ -525,6 +559,49 @@ static const u8 xehp_rcs_offsets[] = {
END
};
+static const u8 dg2_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
#undef END
#undef REG16
#undef REG
@@ -543,7 +620,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
!intel_engine_has_relative_mmio(engine));
if (engine->class == RENDER_CLASS) {
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ return dg2_rcs_offsets;
+ else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
return xehp_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
@@ -554,7 +633,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
else
return gen8_rcs_offsets;
} else {
- if (GRAPHICS_VER(engine->i915) >= 12)
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ return dg2_xcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 9)
return gen9_xcs_offsets;
@@ -861,7 +942,13 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
context_size += PAGE_SIZE;
}
- obj = i915_gem_object_create_lmem(engine->i915, context_size, 0);
+ if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
+ ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
+ context_size += PARENT_SCRATCH_SIZE;
+ }
+
+ obj = i915_gem_object_create_lmem(engine->i915, context_size,
+ I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 1dac21aa7e5c..afb1cce9a352 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -78,7 +78,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
* TODO: Add support for huge LMEM PTEs
*/
- vm = i915_ppgtt_create(gt);
+ vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(vm))
return ERR_CAST(vm);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 582c4423b95d..15f9ada28a7a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -22,6 +22,8 @@ struct drm_i915_mocs_table {
unsigned int size;
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
+ u8 uc_index;
+ u8 unused_entries_index;
};
/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
@@ -40,6 +42,8 @@ struct drm_i915_mocs_table {
#define L3_ESC(value) ((value) << 0)
#define L3_SCC(value) ((value) << 1)
#define _L3_CACHEABILITY(value) ((value) << 4)
+#define L3_GLBGO(value) ((value) << 6)
+#define L3_LKUP(value) ((value) << 7)
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
@@ -88,18 +92,25 @@ struct drm_i915_mocs_table {
*
* Entries not part of the following tables are undefined as far as
* userspace is concerned and shouldn't be relied upon. For Gen < 12
- * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
- * PTE and will be initialized to an invalid value.
+ * they will be initialized to PTE. Gen >= 12 don't have a setting for
+ * PTE and those platforms except TGL/RKL will be initialized L3 WB to
+ * catch accidental use of reserved and unused mocs indexes.
*
* The last few entries are reserved by the hardware. For ICL+ they
* should be initialized according to bspec and never used, for older
* platforms they should never be written to.
*
- * NOTE: These tables are part of bspec and defined as part of hardware
+ * NOTE1: These tables are part of bspec and defined as part of hardware
* interface for ICL+. For older platforms, they are part of kernel
* ABI. It is expected that, for specific hardware platform, existing
* entries will remain constant and the table will only be updated by
* adding new entries, filling unused positions.
+ *
+ * NOTE2: For GEN >= 12 except TGL and RKL, reserved and unspecified MOCS
+ * indices have been set to L3 WB. These reserved entries should never
+ * be used, they may be changed to low performant variants with better
+ * coherency in the future if more entries are needed.
+ * For TGL/RKL, all the unspecified MOCS indexes are mapped to L3 UC.
*/
#define GEN9_MOCS_ENTRIES \
MOCS_ENTRY(I915_MOCS_UNCACHED, \
@@ -282,17 +293,9 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
- /* Error */
- MOCS_ENTRY(0, 0, L3_0_DIRECT),
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
-
- /* Reserved */
- MOCS_ENTRY(2, 0, L3_0_DIRECT),
- MOCS_ENTRY(3, 0, L3_0_DIRECT),
- MOCS_ENTRY(4, 0, L3_0_DIRECT),
-
/* WB - L3 */
MOCS_ENTRY(5, 0, L3_3_WB),
/* WB - L3 50% */
@@ -314,6 +317,83 @@ static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
MOCS_ENTRY(63, 0, L3_1_UC),
};
+static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
+ GEN11_MOCS_ENTRIES,
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+};
+
+static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
+ /* wa_1608975824 */
+ MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
+
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
+ /* UC - Non-Coherent; GO:L3 */
+ MOCS_ENTRY(4, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
+
+ /* HW Reserved - SW program but never use. */
+ MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
+ MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
+ MOCS_ENTRY(60, 0, L3_1_UC),
+ MOCS_ENTRY(61, 0, L3_1_UC),
+ MOCS_ENTRY(62, 0, L3_1_UC),
+ MOCS_ENTRY(63, 0, L3_1_UC),
+};
+
+static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
+static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
+ /* Wa_14011441408: Set Go to Memory for MOCS#0 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
@@ -340,14 +420,45 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
{
unsigned int flags;
- if (IS_DG1(i915)) {
+ memset(table, 0, sizeof(struct drm_i915_mocs_table));
+
+ table->unused_entries_index = I915_MOCS_PTE;
+ if (IS_DG2(i915)) {
+ if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
+ table->table = dg2_mocs_table_g10_ax;
+ } else {
+ table->size = ARRAY_SIZE(dg2_mocs_table);
+ table->table = dg2_mocs_table;
+ }
+ table->uc_index = 1;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 3;
+ } else if (IS_XEHPSDV(i915)) {
+ table->size = ARRAY_SIZE(xehpsdv_mocs_table);
+ table->table = xehpsdv_mocs_table;
+ table->uc_index = 2;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 5;
+ } else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
+ table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- } else if (GRAPHICS_VER(i915) >= 12) {
+ table->uc_index = 1;
+ table->unused_entries_index = 5;
+ } else if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ /* For TGL/RKL, Can't be changed now for ABI reasons */
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 3;
+ } else if (GRAPHICS_VER(i915) >= 12) {
+ table->size = ARRAY_SIZE(gen12_mocs_table);
+ table->table = gen12_mocs_table;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 3;
+ table->unused_entries_index = 2;
} else if (GRAPHICS_VER(i915) == 11) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
@@ -393,16 +504,16 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
}
/*
- * Get control_value from MOCS entry taking into account when it's not used:
- * I915_MOCS_PTE's value is returned in this case.
+ * Get control_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is non-zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
*/
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
unsigned int index)
{
if (index < table->size && table->table[index].used)
return table->table[index].control_value;
-
- return table->table[I915_MOCS_PTE].control_value;
+ return table->table[table->unused_entries_index].control_value;
}
#define for_each_mocs(mocs, t, i) \
@@ -417,6 +528,8 @@ static void __init_mocs_table(struct intel_uncore *uncore,
unsigned int i;
u32 mocs;
+ drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index,
+ "Unused entries index should have been defined\n");
for_each_mocs(mocs, table, i)
intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
}
@@ -443,16 +556,16 @@ static void init_mocs_table(struct intel_engine_cs *engine,
}
/*
- * Get l3cc_value from MOCS entry taking into account when it's not used:
- * I915_MOCS_PTE's value is returned in this case.
+ * Get l3cc_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is not zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
*/
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
unsigned int index)
{
if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
-
- return table->table[I915_MOCS_PTE].l3cc_value;
+ return table->table[table->unused_entries_index].l3cc_value;
}
static u32 l3cc_combine(u16 low, u16 high)
@@ -468,10 +581,9 @@ static u32 l3cc_combine(u16 low, u16 high)
0; \
i++)
-static void init_l3cc_table(struct intel_engine_cs *engine,
+static void init_l3cc_table(struct intel_uncore *uncore,
const struct drm_i915_mocs_table *table)
{
- struct intel_uncore *uncore = engine->uncore;
unsigned int i;
u32 l3cc;
@@ -496,7 +608,7 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
init_mocs_table(engine, &table);
if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
- init_l3cc_table(engine, &table);
+ init_l3cc_table(engine->uncore, &table);
}
static u32 global_mocs_offset(void)
@@ -504,6 +616,14 @@ static u32 global_mocs_offset(void)
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
+void intel_set_mocs_index(struct intel_gt *gt)
+{
+ struct drm_i915_mocs_table table;
+
+ get_mocs_settings(gt->i915, &table);
+ gt->mocs.uc_index = table.uc_index;
+}
+
void intel_mocs_init(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
@@ -515,6 +635,14 @@ void intel_mocs_init(struct intel_gt *gt)
flags = get_mocs_settings(gt->i915, &table);
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
+
+ /*
+ * Initialize the L3CC table as part of mocs initalization to make
+ * sure the LNCFCMOCSx registers are programmed for the subsequent
+ * memory transactions including guc transactions
+ */
+ if (flags & HAS_RENDER_L3CC)
+ init_l3cc_table(gt->uncore, &table);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index d83274f5163b..76db827210c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -36,5 +36,6 @@ struct intel_gt;
void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
+void intel_set_mocs_index(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 886060f7e6fc..4396bfd630d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -155,19 +155,20 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
}
static struct i915_ppgtt *
-__ppgtt_create(struct intel_gt *gt)
+__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
{
if (GRAPHICS_VER(gt->i915) < 8)
return gen6_ppgtt_create(gt);
else
- return gen8_ppgtt_create(gt);
+ return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
}
-struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
{
struct i915_ppgtt *ppgtt;
- ppgtt = __ppgtt_create(gt);
+ ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
if (IS_ERR(ppgtt))
return ppgtt;
@@ -298,7 +299,8 @@ int ppgtt_set_pages(struct i915_vma *vma)
return 0;
}
-void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
{
struct drm_i915_private *i915 = gt->i915;
@@ -306,6 +308,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
dma_resv_init(&ppgtt->vm._resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 799d382eea79..43093dd2d0c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -9,8 +9,8 @@
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_pcode.h"
#include "intel_rc6.h"
-#include "intel_sideband.h"
/**
* DOC: RC6
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index a74b72f50cc9..afb35d2e5c73 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -32,7 +32,7 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
mem->remap_addr = dma_map_resource(i915->drm.dev,
mem->region.start,
mem->fake_mappable.size,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
drm_mm_remove_node(&mem->fake_mappable);
@@ -62,7 +62,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
dma_unmap_resource(mem->i915->drm.dev,
mem->remap_addr,
mem->fake_mappable.size,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 7c4d5158e03b..2fdd52b62092 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -112,7 +112,8 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 2958e2fae380..586dca1731ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -17,6 +17,7 @@
#include "intel_ring.h"
#include "shmem_utils.h"
#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -291,7 +292,9 @@ static void xcs_sanitize(struct intel_engine_cs *engine)
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
}
static void reset_prepare(struct intel_engine_cs *engine)
@@ -1265,7 +1268,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
int size, err;
if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
- return 0;
+ return NULL;
err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
if (err < 0)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0a03fbed9f9b..5e275f8dda8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -11,8 +11,9 @@
#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
+#include "intel_pcode.h"
#include "intel_rps.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
#define BUSY_MAX_EI 20u /* ms */
@@ -994,20 +995,16 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
static void gen6_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 rp_state_cap = intel_rps_read_state_cap(rps);
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(i915)) {
- u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
-
rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
rps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
- u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
-
rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
rps->min_freq = (rp_state_cap >> 16) & 0xff;
@@ -2144,6 +2141,19 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
return set_min_freq(rps, val);
}
+u32 intel_rps_read_state_cap(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ if (IS_XEHPSDV(i915))
+ return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
+ else if (IS_GEN9_LP(i915))
+ return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ else
+ return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+}
+
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 4213bcce1667..11960d64ca82 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -41,6 +41,7 @@ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
+u32 intel_rps_read_state_cap(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index bbd272943c3f..bdf09051b8a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -46,11 +46,11 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
}
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u32 ss_mask)
+ u8 *subslice_mask, u32 ss_mask)
{
int offset = slice * sseu->ss_stride;
- memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride);
+ memcpy(&subslice_mask[offset], &ss_mask, sseu->ss_stride);
}
unsigned int
@@ -100,14 +100,24 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
- u8 s_en, u32 ss_en, u16 eu_en)
+static u32 get_ss_stride_mask(struct sseu_dev_info *sseu, u8 s, u32 ss_en)
+{
+ u32 ss_mask;
+
+ ss_mask = ss_en >> (s * sseu->max_subslices);
+ ss_mask &= GENMASK(sseu->max_subslices - 1, 0);
+
+ return ss_mask;
+}
+
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, u8 s_en,
+ u32 g_ss_en, u32 c_ss_en, u16 eu_en)
{
int s, ss;
- /* ss_en represents entire subslice mask across all slices */
+ /* g_ss_en/c_ss_en represent entire subslice mask across all slices */
GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
- sizeof(ss_en) * BITS_PER_BYTE);
+ sizeof(g_ss_en) * BITS_PER_BYTE);
for (s = 0; s < sseu->max_slices; s++) {
if ((s_en & BIT(s)) == 0)
@@ -115,7 +125,22 @@ static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
sseu->slice_mask |= BIT(s);
- intel_sseu_set_subslices(sseu, s, ss_en);
+ /*
+ * XeHP introduces the concept of compute vs geometry DSS. To
+ * reduce variation between GENs around subslice usage, store a
+ * mask for both the geometry and compute enabled masks since
+ * userspace will need to be able to query these masks
+ * independently. Also compute a total enabled subslice count
+ * for the purposes of selecting subslices to use in a
+ * particular GEM context.
+ */
+ intel_sseu_set_subslices(sseu, s, sseu->compute_subslice_mask,
+ get_ss_stride_mask(sseu, s, c_ss_en));
+ intel_sseu_set_subslices(sseu, s, sseu->geometry_subslice_mask,
+ get_ss_stride_mask(sseu, s, g_ss_en));
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ get_ss_stride_mask(sseu, s,
+ g_ss_en | c_ss_en));
for (ss = 0; ss < sseu->max_subslices; ss++)
if (intel_sseu_has_subslice(sseu, s, ss))
@@ -129,7 +154,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
{
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
- u32 dss_en;
+ u32 g_dss_en, c_dss_en = 0;
u16 eu_en = 0;
u8 eu_en_fuse;
u8 s_en;
@@ -160,7 +185,9 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
GEN11_GT_S_ENA_MASK;
- dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
+ g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+ c_dss_en = intel_uncore_read(uncore, GEN12_GT_COMPUTE_DSS_ENABLE);
/* one bit per pair of EUs */
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
@@ -173,7 +200,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
if (eu_en_fuse & BIT(eu))
eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
- gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+ gen11_compute_sseu_info(sseu, s_en, g_dss_en, c_dss_en, eu_en);
/* TGL only supports slice-level power gating */
sseu->has_slice_pg = 1;
@@ -199,7 +226,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
GEN11_EU_DIS_MASK);
- gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
+ gen11_compute_sseu_info(sseu, s_en, ss_en, 0, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -240,7 +267,7 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
- intel_sseu_set_subslices(sseu, 0, subslice_mask);
+ intel_sseu_set_subslices(sseu, 0, sseu->subslice_mask, subslice_mask);
sseu->eu_total = compute_eu_total(sseu);
@@ -296,7 +323,8 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ subslice_mask);
eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
@@ -408,7 +436,8 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
@@ -485,10 +514,9 @@ static void hsw_sseu_info_init(struct intel_gt *gt)
}
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
- switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+ switch (REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1)) {
default:
- MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
- HSW_F1_EU_DIS_SHIFT);
+ MISSING_CASE(REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1));
fallthrough;
case HSW_F1_EU_DIS_10EUS:
sseu->eu_per_subslice = 10;
@@ -506,7 +534,8 @@ static void hsw_sseu_info_init(struct intel_gt *gt)
sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 22fef98887c0..60882a74741e 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -26,9 +26,14 @@ struct drm_printer;
#define GEN_DSS_PER_CSLICE 8
#define GEN_DSS_PER_MSLICE 8
+#define GEN_MAX_GSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_CSLICE)
+
struct sseu_dev_info {
u8 slice_mask;
u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 geometry_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 compute_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
@@ -78,6 +83,10 @@ intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
u8 mask;
int ss_idx = subslice / BITS_PER_BYTE;
+ if (slice >= sseu->max_slices ||
+ subslice >= sseu->max_subslices)
+ return false;
+
GEM_BUG_ON(ss_idx >= sseu->ss_stride);
mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
@@ -97,7 +106,7 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u32 ss_mask);
+ u8 *subslice_mask, u32 ss_mask);
void intel_sseu_info_init(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index 1ba8b7da9d37..8bb3a91dad82 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -4,9 +4,9 @@
* Copyright © 2020 Intel Corporation
*/
-#include "debugfs_gt.h"
-#include "intel_sseu_debugfs.h"
#include "i915_drv.h"
+#include "intel_gt_debugfs.h"
+#include "intel_sseu_debugfs.h"
static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
int slice, u8 *to_mask)
@@ -282,7 +282,7 @@ static int sseu_status_show(struct seq_file *m, void *unused)
return intel_sseu_status(m, gt);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
static int rcs_topology_show(struct seq_file *m, void *unused)
{
@@ -293,11 +293,11 @@ static int rcs_topology_show(struct seq_file *m, void *unused)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "sseu_status", &sseu_status_fops, NULL },
{ "rcs_topology", &rcs_topology_fops, NULL },
};
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 1257f4f11e66..438bbc7b8147 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline)
timeline->hwsp_map = vaddr;
timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
- clflush(vaddr + ofs);
+ drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
return 0;
}
@@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl)
memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
WRITE_ONCE(*hwsp_seqno, tl->seqno);
- clflush(hwsp_seqno);
+ drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
}
void intel_timeline_enter(struct intel_timeline *tl)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index aae609d7d85d..e1f362530889 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -644,6 +644,72 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
}
+static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /*
+ * This is a "fake" workaround defined by software to ensure we
+ * maintain reliable, backward-compatible behavior for userspace with
+ * regards to how nested MI_BATCH_BUFFER_START commands are handled.
+ *
+ * The per-context setting of MI_MODE[12] determines whether the bits
+ * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
+ * in the traditional manner or whether they should instead use a new
+ * tgl+ meaning that breaks backward compatibility, but allows nesting
+ * into 3rd-level batchbuffers. When this new capability was first
+ * added in TGL, it remained off by default unless a context
+ * intentionally opted in to the new behavior. However Xe_HPG now
+ * flips this on by default and requires that we explicitly opt out if
+ * we don't want the new behavior.
+ *
+ * From a SW perspective, we want to maintain the backward-compatible
+ * behavior for userspace, so we'll apply a fake workaround to set it
+ * back to the legacy behavior on platforms where the hardware default
+ * is to break compatibility. At the moment there is no Linux
+ * userspace that utilizes third-level batchbuffers, so this will avoid
+ * userspace from needing to make any changes. using the legacy
+ * meaning is the correct thing to do. If/when we have userspace
+ * consumers that want to utilize third-level batch nesting, we can
+ * provide a context parameter to allow them to opt-in.
+ */
+ wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
+}
+
+static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ u8 mocs;
+
+ /*
+ * Some blitter commands do not have a field for MOCS, those
+ * commands will use MOCS index pointed by BLIT_CCTL.
+ * BLIT_CCTL registers are needed to be programmed to un-cached.
+ */
+ if (engine->class == COPY_ENGINE_CLASS) {
+ mocs = engine->gt->mocs.uc_index;
+ wa_write_clr_set(wal,
+ BLIT_CCTL(engine->mmio_base),
+ BLIT_CCTL_MASK,
+ BLIT_CCTL_MOCS(mocs, mocs));
+ }
+}
+
+/*
+ * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
+ * defined by the hardware team, but it programming general context registers.
+ * Adding those context register programming in context workaround
+ * allow us to use the wa framework for proper application and validation.
+ */
+static void
+gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ fakewa_disable_nestedbb_mode(engine, wal);
+
+ gen12_ctx_gt_mocs_init(engine, wal);
+}
+
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
@@ -651,11 +717,19 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
{
struct drm_i915_private *i915 = engine->i915;
- if (engine->class != RENDER_CLASS)
- return;
-
wa_init_start(wal, name, engine->name);
+ /* Applies to all engines */
+ /*
+ * Fake workarounds are not the actual workaround but
+ * programming of context registers using workaround framework.
+ */
+ if (GRAPHICS_VER(i915) >= 12)
+ gen12_ctx_gt_fake_wa_init(engine, wal);
+
+ if (engine->class != RENDER_CLASS)
+ goto done;
+
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -685,6 +759,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
else
MISSING_CASE(GRAPHICS_VER(i915));
+done:
wa_init_finish(wal);
}
@@ -729,7 +804,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
}
static void
-gen4_gt_workarounds_init(struct drm_i915_private *i915,
+gen4_gt_workarounds_init(struct intel_gt *gt,
struct i915_wa_list *wal)
{
/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
@@ -737,29 +812,29 @@ gen4_gt_workarounds_init(struct drm_i915_private *i915,
}
static void
-g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen4_gt_workarounds_init(i915, wal);
+ gen4_gt_workarounds_init(gt, wal);
/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
}
static void
-ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- g4x_gt_workarounds_init(i915, wal);
+ g4x_gt_workarounds_init(gt, wal);
wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
}
static void
-snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
}
static void
-ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
@@ -775,7 +850,7 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -788,7 +863,7 @@ vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* L3 caching of data atomics doesn't work -- disable it. */
wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -803,8 +878,10 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = gt->i915;
+
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
@@ -829,9 +906,9 @@ gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal
}
static void
-skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
@@ -839,19 +916,19 @@ skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_GT_STEP(i915, STEP_A0, STEP_H0))
+ if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void
-kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GT_STEP(i915, 0, STEP_C0))
+ if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -868,15 +945,15 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
}
static void
-cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
@@ -901,21 +978,21 @@ static void __set_mcr_steering(struct i915_wa_list *wal,
wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
}
-static void __add_mcr_wa(struct drm_i915_private *i915, struct i915_wa_list *wal,
+static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
unsigned int slice, unsigned int subslice)
{
- drm_dbg(&i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
+ drm_dbg(&gt->i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
}
static void
-icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
+icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
unsigned int slice, subslice;
- GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
+ GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
slice = 0;
@@ -935,16 +1012,15 @@ icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* then we can just rely on the default steering and won't need to
* worry about explicitly re-steering L3BANK reads later.
*/
- if (i915->gt.info.l3bank_mask & BIT(subslice))
- i915->gt.steering_table[L3BANK] = NULL;
+ if (gt->info.l3bank_mask & BIT(subslice))
+ gt->steering_table[L3BANK] = NULL;
- __add_mcr_wa(i915, wal, slice, subslice);
+ __add_mcr_wa(gt, wal, slice, subslice);
}
static void
xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
const struct sseu_dev_info *sseu = &gt->info.sseu;
unsigned long slice, subslice = 0, slice_mask = 0;
u64 dss_mask = 0;
@@ -1008,7 +1084,7 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
WARN_ON(subslice > GEN_DSS_PER_GSLICE);
WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0);
- __add_mcr_wa(i915, wal, slice, subslice);
+ __add_mcr_wa(gt, wal, slice, subslice);
/*
* SQIDI ranges are special because they use different steering
@@ -1024,9 +1100,11 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- icl_wa_init_mcr(i915, wal);
+ struct drm_i915_private *i915 = gt->i915;
+
+ icl_wa_init_mcr(gt, wal);
/* WaModifyGamTlbPartitioning:icl */
wa_write_clr_set(wal,
@@ -1077,10 +1155,9 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
* the engine-specific workaround list.
*/
static void
-wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
+wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
- struct intel_gt *gt = &i915->gt;
int id;
for_each_engine(engine, gt, id) {
@@ -1094,22 +1171,23 @@ wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-gen12_gt_workarounds_init(struct drm_i915_private *i915,
- struct i915_wa_list *wal)
+gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- icl_wa_init_mcr(i915, wal);
+ icl_wa_init_mcr(gt, wal);
/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
- wa_14011060649(i915, wal);
+ wa_14011060649(gt, wal);
/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
}
static void
-tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen12_gt_workarounds_init(i915, wal);
+ struct drm_i915_private *i915 = gt->i915;
+
+ gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:tgl */
if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1130,9 +1208,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen12_gt_workarounds_init(i915, wal);
+ struct drm_i915_private *i915 = gt->i915;
+
+ gen12_gt_workarounds_init(gt, wal);
/* Wa_1607087056:dg1 */
if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1154,60 +1234,62 @@ dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-xehpsdv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- xehp_init_mcr(&i915->gt, wal);
+ xehp_init_mcr(gt, wal);
}
static void
-gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
+gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = gt->i915;
+
if (IS_XEHPSDV(i915))
- xehpsdv_gt_workarounds_init(i915, wal);
+ xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
- dg1_gt_workarounds_init(i915, wal);
+ dg1_gt_workarounds_init(gt, wal);
else if (IS_TIGERLAKE(i915))
- tgl_gt_workarounds_init(i915, wal);
+ tgl_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
- gen12_gt_workarounds_init(i915, wal);
+ gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
- icl_gt_workarounds_init(i915, wal);
+ icl_gt_workarounds_init(gt, wal);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
- cfl_gt_workarounds_init(i915, wal);
+ cfl_gt_workarounds_init(gt, wal);
else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915, wal);
+ glk_gt_workarounds_init(gt, wal);
else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915, wal);
+ kbl_gt_workarounds_init(gt, wal);
else if (IS_BROXTON(i915))
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915, wal);
+ skl_gt_workarounds_init(gt, wal);
else if (IS_HASWELL(i915))
- hsw_gt_workarounds_init(i915, wal);
+ hsw_gt_workarounds_init(gt, wal);
else if (IS_VALLEYVIEW(i915))
- vlv_gt_workarounds_init(i915, wal);
+ vlv_gt_workarounds_init(gt, wal);
else if (IS_IVYBRIDGE(i915))
- ivb_gt_workarounds_init(i915, wal);
+ ivb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 6)
- snb_gt_workarounds_init(i915, wal);
+ snb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 5)
- ilk_gt_workarounds_init(i915, wal);
+ ilk_gt_workarounds_init(gt, wal);
else if (IS_G4X(i915))
- g4x_gt_workarounds_init(i915, wal);
+ g4x_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 4)
- gen4_gt_workarounds_init(i915, wal);
+ gen4_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) <= 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
}
-void intel_gt_init_workarounds(struct drm_i915_private *i915)
+void intel_gt_init_workarounds(struct intel_gt *gt)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
+ struct i915_wa_list *wal = &gt->wa_list;
wa_init_start(wal, "GT", "global");
- gt_init_workarounds(i915, wal);
+ gt_init_workarounds(gt, wal);
wa_init_finish(wal);
}
@@ -1278,7 +1360,7 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
void intel_gt_apply_workarounds(struct intel_gt *gt)
{
- wa_list_apply(gt, &gt->i915->gt_wa_list);
+ wa_list_apply(gt, &gt->wa_list);
}
static bool wa_list_verify(struct intel_gt *gt,
@@ -1310,7 +1392,7 @@ static bool wa_list_verify(struct intel_gt *gt,
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
{
- return wa_list_verify(gt, &gt->i915->gt_wa_list, from);
+ return wa_list_verify(gt, &gt->wa_list, from);
}
__maybe_unused
@@ -1604,6 +1686,31 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
i915_mmio_reg_offset(RING_NOPID(base)));
}
+/*
+ * engine_fake_wa_init(), a place holder to program the registers
+ * which are not part of an official workaround defined by the
+ * hardware team.
+ * Adding programming of those register inside workaround will
+ * allow utilizing wa framework to proper application and verification.
+ */
+static void
+engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ u8 mocs;
+
+ /*
+ * RING_CMD_CCTL are need to be programed to un-cached
+ * for memory writes and reads outputted by Command
+ * Streamers on Gen12 onward platforms.
+ */
+ if (GRAPHICS_VER(engine->i915) >= 12) {
+ mocs = engine->gt->mocs.uc_index;
+ wa_masked_field_set(wal,
+ RING_CMD_CCTL(engine->mmio_base),
+ CMD_CCTL_MOCS_MASK,
+ CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
+ }
+}
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
@@ -2044,6 +2151,8 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
return;
+ engine_fake_wa_init(engine, wal);
+
if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
@@ -2067,12 +2176,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->gt, &engine->wa_list);
}
-struct mcr_range {
- u32 start;
- u32 end;
-};
-
-static const struct mcr_range mcr_ranges_gen8[] = {
+static const struct i915_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
@@ -2081,7 +2185,7 @@ static const struct mcr_range mcr_ranges_gen8[] = {
{},
};
-static const struct mcr_range mcr_ranges_gen12[] = {
+static const struct i915_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
@@ -2090,7 +2194,7 @@ static const struct mcr_range mcr_ranges_gen12[] = {
{},
};
-static const struct mcr_range mcr_ranges_xehp[] = {
+static const struct i915_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
@@ -2109,7 +2213,7 @@ static const struct mcr_range mcr_ranges_xehp[] = {
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
- const struct mcr_range *mcr_ranges;
+ const struct i915_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 15abb68b6c00..9beaab77c7f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -24,7 +24,7 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal)
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
-void intel_gt_init_workarounds(struct drm_i915_private *i915);
+void intel_gt_init_workarounds(struct intel_gt *gt);
void intel_gt_apply_workarounds(struct intel_gt *gt);
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2c1af030310c..8b89215afe46 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -376,6 +376,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
{
struct intel_context *ce;
+ INIT_LIST_HEAD(&engine->pinned_contexts_list);
+
engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
if (!engine->sched_engine)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 317eebf086c3..6e6e4d747cca 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -290,7 +290,7 @@ static int live_heartbeat_fast(void *arg)
int err = 0;
/* Check that the heartbeat ticks at the desired rate. */
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return 0;
for_each_engine(engine, gt, id) {
@@ -352,7 +352,7 @@ static int live_heartbeat_off(void *arg)
int err = 0;
/* Check that we can turn off heartbeat and not interrupt VIP */
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return 0;
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index f12ffe797639..b367ecfa42de 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -992,7 +992,7 @@ static int live_timeslice_preempt(void *arg)
* need to preempt the current task and replace it with another
* ready task.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
@@ -1122,7 +1122,7 @@ static int live_timeslice_rewind(void *arg)
* but only a few of those requests, forcing us to rewind the
* RING_TAIL of the original request.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
for_each_engine(engine, gt, id) {
@@ -1299,7 +1299,7 @@ static int live_timeslice_queue(void *arg)
* ELSP[1] is already occupied, so must rely on timeslicing to
* eject ELSP[0] in favour of the queue.)
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
@@ -1420,7 +1420,7 @@ static int live_timeslice_nopreempt(void *arg)
* We should not timeslice into a request that is marked with
* I915_REQUEST_NOPREEMPT.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
if (igt_spinner_init(&spin, gt))
@@ -2260,7 +2260,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
int err;
/* Preempt cancel non-preemptible spinner in ELSP0 */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(arg->engine->gt))
@@ -2316,7 +2316,7 @@ static int __cancel_fail(struct live_preempt_cancel *arg)
struct i915_request *rq;
int err;
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(engine->gt))
@@ -3375,7 +3375,7 @@ static int live_preempt_timeout(void *arg)
* Check that we force preemption to occur by cancelling the previous
* context if it refuses to yield the GPU.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(gt))
@@ -3493,7 +3493,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (batch) {
struct i915_address_space *vm;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(batch, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma))
@@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
- ve[n] = intel_engine_create_virtual(siblings, nsibling);
+ ve[n] = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve[n])) {
err = PTR_ERR(ve[n]);
nctx = n;
@@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
* restrict it to our desired engine within the virtual engine.
*/
- ve = intel_engine_create_virtual(siblings, nsibling);
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
@@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt,
i915_request_add(rq);
}
- ce = intel_engine_create_virtual(siblings, nsibling);
+ ce = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt,
/* XXX We do not handle oversubscription and fairness with normal rq */
for (n = 0; n < nsibling; n++) {
- ce = intel_engine_create_virtual(siblings, nsibling);
+ ce = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
if (err)
goto out_scratch;
- ve = intel_engine_create_virtual(siblings, nsibling);
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_scratch;
@@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
- ve = intel_engine_create_virtual(siblings, nsibling);
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_spin;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 2c1ed32ca5ac..7e2d99dd012d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -117,7 +117,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct intel_gt *gt = h->gt;
- struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
@@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context,
- rq->fence.seqno, rq->context->guc_id, err);
+ rq->fence.seqno, rq->context->guc_id.id, err);
}
skip:
@@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context,
- rq->fence.seqno, rq->context->guc_id, err);
+ rq->fence.seqno, rq->context->guc_id.id, err);
}
count++;
@@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
engine->name, test_name,
rq->fence.context,
- rq->fence.seqno, rq->context->guc_id);
+ rq->fence.seqno, rq->context->guc_id.id);
i915_request_put(rq);
GEM_TRACE_DUMP();
@@ -1596,7 +1596,7 @@ static int igt_reset_evict_ppgtt(void *arg)
if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
return 0;
- ppgtt = i915_ppgtt_create(gt);
+ ppgtt = i915_ppgtt_create(gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index e623ac45f4aa..962e91ba3be4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -66,7 +66,7 @@ reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
- gt_init_workarounds(gt->i915, &lists->gt_wa_list);
+ gt_init_workarounds(gt, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 8ff582222aff..ba10bd374cee 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -142,6 +142,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
+ INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index fbfcae727d7f..6e228343e8cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -3,6 +3,7 @@
* Copyright © 2014-2019 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
@@ -647,7 +648,14 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(gt->i915, size);
+ if (HAS_LMEM(gt->i915))
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CPU_CLEAR |
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_PM_EARLY);
+ else
+ obj = i915_gem_object_create_shmem(gt->i915, size);
+
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -748,3 +756,32 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
}
}
}
+
+void intel_guc_write_barrier(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
+ /*
+ * Ensure intel_uncore_write_fw can be used rather than
+ * intel_uncore_write.
+ */
+ GEM_BUG_ON(guc->send_regs.fw_domains);
+
+ /*
+ * This register is used by the i915 and GuC for MMIO based
+ * communication. Once we are in this code CTBs are the only
+ * method the i915 uses to communicate with the GuC so it is
+ * safe to write to this register (a value of 0 is NOP for MMIO
+ * communication). If we ever start mixing CTBs and MMIOs a new
+ * register will have to be chosen. This function is also used
+ * to enforce ordering of a work queue item write and an update
+ * to the process descriptor. When a work queue is being used,
+ * CTBs are also the only mechanism of communication.
+ */
+ intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
+ } else {
+ /* wmb() sufficient for a barrier if in smem */
+ wmb();
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2e27fe59786b..31cf9fb48c7e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -22,74 +22,155 @@
struct __guc_ads_blob;
-/*
- * Top level structure of GuC. It handles firmware loading and manages client
- * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
- * submission.
+/**
+ * struct intel_guc - Top level structure of GuC.
+ *
+ * It handles firmware loading and manages client pool. intel_guc owns an
+ * i915_sched_engine for submission.
*/
struct intel_guc {
+ /** @fw: the GuC firmware */
struct intel_uc_fw fw;
+ /** @log: sub-structure containing GuC log related data and objects */
struct intel_guc_log log;
+ /** @ct: the command transport communication channel */
struct intel_guc_ct ct;
+ /** @slpc: sub-structure containing SLPC related data and objects */
struct intel_guc_slpc slpc;
- /* Global engine used to submit requests to GuC */
+ /** @sched_engine: Global engine used to submit requests to GuC */
struct i915_sched_engine *sched_engine;
+ /**
+ * @stalled_request: if GuC can't process a request for any reason, we
+ * save it until GuC restarts processing. No other request can be
+ * submitted until the stalled request is processed.
+ */
struct i915_request *stalled_request;
+ /**
+ * @submission_stall_reason: reason why submission is stalled
+ */
+ enum {
+ STALL_NONE,
+ STALL_REGISTER_CONTEXT,
+ STALL_MOVE_LRC_TAIL,
+ STALL_ADD_REQUEST,
+ } submission_stall_reason;
/* intel_guc_recv interrupt related state */
+ /** @irq_lock: protects GuC irq state */
spinlock_t irq_lock;
+ /**
+ * @msg_enabled_mask: mask of events that are processed when receiving
+ * an INTEL_GUC_ACTION_DEFAULT G2H message.
+ */
unsigned int msg_enabled_mask;
+ /**
+ * @outstanding_submission_g2h: number of outstanding GuC to Host
+ * responses related to GuC submission, used to determine if the GT is
+ * idle
+ */
atomic_t outstanding_submission_g2h;
+ /** @interrupts: pointers to GuC interrupt-managing functions. */
struct {
void (*reset)(struct intel_guc *guc);
void (*enable)(struct intel_guc *guc);
void (*disable)(struct intel_guc *guc);
} interrupts;
- /*
- * contexts_lock protects the pool of free guc ids and a linked list of
- * guc ids available to be stolen
+ /**
+ * @submission_state: sub-structure for submission state protected by
+ * single lock
+ */
+ struct {
+ /**
+ * @lock: protects everything in submission_state,
+ * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
+ * out of zero
+ */
+ spinlock_t lock;
+ /**
+ * @guc_ids: used to allocate new guc_ids, single-lrc
+ */
+ struct ida guc_ids;
+ /**
+ * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+ */
+ unsigned long *guc_ids_bitmap;
+ /**
+ * @guc_id_list: list of intel_context with valid guc_ids but no
+ * refs
+ */
+ struct list_head guc_id_list;
+ /**
+ * @destroyed_contexts: list of contexts waiting to be destroyed
+ * (deregistered with the GuC)
+ */
+ struct list_head destroyed_contexts;
+ /**
+ * @destroyed_worker: worker to deregister contexts, need as we
+ * need to take a GT PM reference and can't from destroy
+ * function as it might be in an atomic context (no sleeping)
+ */
+ struct work_struct destroyed_worker;
+ } submission_state;
+
+ /**
+ * @submission_supported: tracks whether we support GuC submission on
+ * the current platform
*/
- spinlock_t contexts_lock;
- struct ida guc_ids;
- struct list_head guc_id_list;
-
bool submission_supported;
+ /** @submission_selected: tracks whether the user enabled GuC submission */
bool submission_selected;
+ /**
+ * @rc_supported: tracks whether we support GuC rc on the current platform
+ */
bool rc_supported;
+ /** @rc_selected: tracks whether the user enabled GuC rc */
bool rc_selected;
+ /** @ads_vma: object allocated to hold the GuC ADS */
struct i915_vma *ads_vma;
+ /** @ads_blob: contents of the GuC ADS */
struct __guc_ads_blob *ads_blob;
+ /** @ads_regset_size: size of the save/restore regsets in the ADS */
u32 ads_regset_size;
+ /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool;
+ /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */
void *lrc_desc_pool_vaddr;
- /* guc_id to intel_context lookup */
+ /**
+ * @context_lookup: used to resolve intel_context from guc_id, if a
+ * context is present in this structure it is registered with the GuC
+ */
struct xarray context_lookup;
- /* Control params for fw initialization */
+ /** @params: Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
- /* GuC's FW specific registers used in MMIO send */
+ /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
struct {
u32 base;
unsigned int count;
enum forcewake_domains fw_domains;
} send_regs;
- /* register used to send interrupts to the GuC FW */
+ /** @notify_reg: register used to send interrupts to the GuC FW */
i915_reg_t notify_reg;
- /* Store msg (e.g. log flush) that we see while CTBs are disabled */
+ /**
+ * @mmio_msg: notification bitmask that the GuC writes in one of its
+ * registers when the CT channel is disabled, to be processed when the
+ * channel is back up.
+ */
u32 mmio_msg;
- /* To serialize the intel_guc_send actions */
+ /** @send_mutex: used to serialize the intel_guc_send actions */
struct mutex send_mutex;
};
@@ -295,4 +376,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc);
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+void intel_guc_write_barrier(struct intel_guc *guc);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 6926919bcac6..621c893a009f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -176,7 +176,7 @@ static void guc_mapping_table_init(struct intel_gt *gt,
for_each_engine(engine, gt, id) {
u8 guc_class = engine_class_to_guc_class(engine->class);
- system_info->mapping_table[guc_class][engine->instance] =
+ system_info->mapping_table[guc_class][ilog2(engine->logical_mask)] =
engine->instance;
}
}
@@ -349,6 +349,8 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
info->engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt);
}
+#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
+#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE)
static int guc_prep_golden_context(struct intel_guc *guc,
struct __guc_ads_blob *blob)
{
@@ -396,7 +398,18 @@ static int guc_prep_golden_context(struct intel_guc *guc,
if (!blob)
continue;
- blob->ads.eng_state_size[guc_class] = real_size;
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists context. This is required to
+ * allow the GuC to restore just the engine state when a
+ * watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
+ blob->ads.eng_state_size[guc_class] = real_size - LRC_SKIP_SIZE;
blob->ads.golden_context_lrca[guc_class] = addr_ggtt;
addr_ggtt += alloc_size;
}
@@ -436,11 +449,6 @@ static void guc_init_golden_context(struct intel_guc *guc)
u8 engine_class, guc_class;
u8 *ptr;
- /* Skip execlist and PPGTT registers + HWSP */
- const u32 lr_hw_context_size = 80 * sizeof(u32);
- const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
- lr_hw_context_size;
-
if (!intel_uc_uses_guc_submission(&gt->uc))
return;
@@ -476,12 +484,12 @@ static void guc_init_golden_context(struct intel_guc *guc)
continue;
}
- GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size);
+ GEM_BUG_ON(blob->ads.eng_state_size[guc_class] !=
+ real_size - LRC_SKIP_SIZE);
GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt);
addr_ggtt += alloc_size;
- shmem_read(engine->default_state, skip_size, ptr + skip_size,
- real_size - skip_size);
+ shmem_read(engine->default_state, 0, ptr, real_size);
ptr += alloc_size;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 22b4733b55e2..a0cc34be7b56 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -168,12 +168,15 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
};
+ int ret;
GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
GEM_BUG_ON(size % SZ_4K);
/* CT registration must go over MMIO */
- return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ return ret > 0 ? -EPROTO : ret;
}
static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
@@ -188,8 +191,8 @@ static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
desc_addr, buff_addr, size);
if (unlikely(err))
- CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
- guc_ct_buffer_type_to_str(type), err);
+ CT_ERROR(ct, "Failed to register %s buffer (%pe)\n",
+ guc_ct_buffer_type_to_str(type), ERR_PTR(err));
return err;
}
@@ -201,11 +204,14 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
};
+ int ret;
GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
/* CT deregistration must go over MMIO */
- return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ return ret > 0 ? -EPROTO : ret;
}
static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
@@ -213,8 +219,8 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
if (unlikely(err))
- CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
- guc_ct_buffer_type_to_str(type), err);
+ CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n",
+ guc_ct_buffer_type_to_str(type), ERR_PTR(err));
return err;
}
@@ -377,28 +383,6 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
return ++ct->requests.last_fence;
}
-static void write_barrier(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_gt *gt = guc_to_gt(guc);
-
- if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
- GEM_BUG_ON(guc->send_regs.fw_domains);
- /*
- * This register is used by the i915 and GuC for MMIO based
- * communication. Once we are in this code CTBs are the only
- * method the i915 uses to communicate with the GuC so it is
- * safe to write to this register (a value of 0 is NOP for MMIO
- * communication). If we ever start mixing CTBs and MMIOs a new
- * register will have to be chosen.
- */
- intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
- } else {
- /* wmb() sufficient for a barrier if in smem */
- wmb();
- }
-}
-
static int ct_write(struct intel_guc_ct *ct,
const u32 *action,
u32 len /* in dwords */,
@@ -468,7 +452,7 @@ static int ct_write(struct intel_guc_ct *ct,
* make sure H2G buffer update and LRC tail update (if this triggering a
* submission) are visible before updating the descriptor tail
*/
- write_barrier(ct);
+ intel_guc_write_barrier(ct_to_guc(ct));
/* update local copies */
ctb->tail = tail;
@@ -522,9 +506,6 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
#undef done
- if (unlikely(err))
- DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
-
*status = req->status;
return err;
}
@@ -722,8 +703,11 @@ retry:
err = wait_for_ct_request_update(&request, status);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
- if (unlikely(err))
+ if (unlikely(err)) {
+ CT_ERROR(ct, "No response for request %#x (fence %u)\n",
+ action[0], request.fence);
goto unlink;
+ }
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
err = -EIO;
@@ -775,8 +759,8 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
- action[0], ret, status);
+ CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
+ action[0], ERR_PTR(ret), status);
} else if (unlikely(ret)) {
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
action[0], ret, ret);
@@ -1042,9 +1026,9 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
container_of(w, struct intel_guc_ct, requests.worker);
bool done;
- done = ct_process_incoming_requests(ct);
- if (!done)
- queue_work(system_unbound_wq, &ct->requests.worker);
+ do {
+ done = ct_process_incoming_requests(ct);
+ } while (!done);
}
static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
index 887c8c8f35db..25f09a420561 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -5,14 +5,14 @@
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
+#include "gt/uc/intel_guc_ads.h"
+#include "gt/uc/intel_guc_ct.h"
+#include "gt/uc/intel_guc_slpc.h"
+#include "gt/uc/intel_guc_submission.h"
#include "intel_guc.h"
#include "intel_guc_debugfs.h"
#include "intel_guc_log_debugfs.h"
-#include "gt/uc/intel_guc_ct.h"
-#include "gt/uc/intel_guc_ads.h"
-#include "gt/uc/intel_guc_submission.h"
-#include "gt/uc/intel_guc_slpc.h"
static int guc_info_show(struct seq_file *m, void *data)
{
@@ -35,7 +35,7 @@ static int guc_info_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_info);
static int guc_registered_contexts_show(struct seq_file *m, void *data)
{
@@ -49,7 +49,7 @@ static int guc_registered_contexts_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
static int guc_slpc_info_show(struct seq_file *m, void *unused)
{
@@ -62,7 +62,7 @@ static int guc_slpc_info_show(struct seq_file *m, void *unused)
return intel_guc_slpc_print_info(slpc, &p);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
static bool intel_eval_slpc_support(void *data)
{
@@ -73,7 +73,7 @@ static bool intel_eval_slpc_support(void *data)
void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "guc_info", &guc_info_fops, NULL },
{ "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
{ "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 76fe766ad1bc..196424be0998 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -41,18 +41,21 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
}
/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
- struct intel_uncore *uncore)
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
{
u32 rsa[UOS_RSA_SCRATCH_COUNT];
size_t copied;
int i;
copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
- GEM_BUG_ON(copied < sizeof(rsa));
+ if (copied < sizeof(rsa))
+ return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
}
/*
@@ -141,7 +144,9 @@ int intel_guc_fw_upload(struct intel_guc *guc)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- guc_xfer_rsa(&guc->fw, uncore);
+ ret = guc_xfer_rsa(&guc->fw, uncore);
+ if (ret)
+ goto out;
/*
* Current uCode expects the code to be loaded at 8k; locations below
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index fa4be13c8854..722933e26347 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -52,27 +52,27 @@
#define GUC_DOORBELL_INVALID 256
-#define GUC_WQ_SIZE (PAGE_SIZE * 2)
-
-/* Work queue item header definitions */
+/*
+ * Work queue item header definitions
+ *
+ * Work queue is circular buffer used to submit complex (multi-lrc) submissions
+ * to the GuC. A work queue item is an entry in the circular buffer.
+ */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
#define WQ_STATUS_CMD_ERROR 3
#define WQ_STATUS_ENGINE_ID_NOT_USED 4
#define WQ_STATUS_SUSPENDED_FROM_RESET 5
-#define WQ_TYPE_SHIFT 0
-#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT)
-#define WQ_TARGET_SHIFT 10
-#define WQ_LEN_SHIFT 16
-#define WQ_NO_WCFLUSH_WAIT (1 << 27)
-#define WQ_PRESENT_WORKLOAD (1 << 28)
-
-#define WQ_RING_TAIL_SHIFT 20
-#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
-#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
+#define WQ_TYPE_BATCH_BUF 0x1
+#define WQ_TYPE_PSEUDO 0x2
+#define WQ_TYPE_INORDER 0x3
+#define WQ_TYPE_NOOP 0x4
+#define WQ_TYPE_MULTI_LRC 0x5
+#define WQ_TYPE_MASK GENMASK(7, 0)
+#define WQ_LEN_MASK GENMASK(26, 16)
+
+#define WQ_GUC_ID_MASK GENMASK(15, 0)
+#define WQ_RING_TAIL_MASK GENMASK(28, 18)
#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
@@ -186,7 +186,7 @@ struct guc_process_desc {
u32 wq_status;
u32 engine_presence;
u32 priority;
- u32 reserved[30];
+ u32 reserved[36];
} __packed;
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
index 64e0b86bf258..46026c2c1722 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -6,7 +6,7 @@
#include <linux/fs.h>
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "intel_guc.h"
#include "intel_guc_log.h"
#include "intel_guc_log_debugfs.h"
@@ -17,7 +17,7 @@ static int guc_log_dump_show(struct seq_file *m, void *data)
return intel_guc_log_dump(m->private, &p, false);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
{
@@ -25,7 +25,7 @@ static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
return intel_guc_log_dump(m->private, &p, true);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
static int guc_log_level_get(void *data, u64 *val)
{
@@ -109,7 +109,7 @@ static const struct file_operations guc_log_relay_fops = {
void intel_guc_log_debugfs_register(struct intel_guc_log *log,
struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "guc_log_dump", &guc_log_dump_fops, NULL },
{ "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
{ "guc_log_level", &guc_log_level_fops, NULL },
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 87d8dc8f51b9..d7710debcd47 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -11,6 +11,7 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
@@ -28,21 +29,6 @@
/**
* DOC: GuC-based command submission
*
- * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
- * firmware is moving to an updated submission interface and we plan to
- * turn submission back on when that lands. The below documentation (and related
- * code) matches the old submission model and will be updated as part of the
- * upgrade to the new flow.
- *
- * GuC stage descriptor:
- * During initialization, the driver allocates a static pool of 1024 such
- * descriptors, and shares them with the GuC. Currently, we only use one
- * descriptor. This stage descriptor lets the GuC know about the workqueue and
- * process descriptor. Theoretically, it also lets the GuC know about our HW
- * contexts (context ID, etc...), but we actually employ a kind of submission
- * where the GuC uses the LRCA sent via the work item instead. This is called
- * a "proxy" submission.
- *
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
* a value to the action register (SOFT_SCRATCH_0) along with any data. It then
@@ -51,14 +37,85 @@
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
*
- * Work Items:
- * There are several types of work items that the host may place into a
- * workqueue, each with its own requirements and limitations. Currently only
- * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
- * represents in-order queue. The kernel driver packs ring tail pointer and an
- * ELSP context descriptor dword into Work Item.
- * See guc_add_request()
+ * Command Transport buffers (CTBs):
+ * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
+ * - G2H) are a message interface between the i915 and GuC.
+ *
+ * Context registration:
+ * Before a context can be submitted it must be registered with the GuC via a
+ * H2G. A unique guc_id is associated with each context. The context is either
+ * registered at request creation time (normal operation) or at submission time
+ * (abnormal operation, e.g. after a reset).
+ *
+ * Context submission:
+ * The i915 updates the LRC tail value in memory. The i915 must enable the
+ * scheduling of the context within the GuC for the GuC to actually consider it.
+ * Therefore, the first time a disabled context is submitted we use a schedule
+ * enable H2G, while follow up submissions are done via the context submit H2G,
+ * which informs the GuC that a previously enabled context has new work
+ * available.
+ *
+ * Context unpin:
+ * To unpin a context a H2G is used to disable scheduling. When the
+ * corresponding G2H returns indicating the scheduling disable operation has
+ * completed it is safe to unpin the context. While a disable is in flight it
+ * isn't safe to resubmit the context so a fence is used to stall all future
+ * requests of that context until the G2H is returned.
+ *
+ * Context deregistration:
+ * Before a context can be destroyed or if we steal its guc_id we must
+ * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
+ * safe to submit anything to this guc_id until the deregister completes so a
+ * fence is used to stall all requests associated with this guc_id until the
+ * corresponding G2H returns indicating the guc_id has been deregistered.
+ *
+ * submission_state.guc_ids:
+ * Unique number associated with private GuC context data passed in during
+ * context registration / submission / deregistration. 64k available. Simple ida
+ * is used for allocation.
+ *
+ * Stealing guc_ids:
+ * If no guc_ids are available they can be stolen from another context at
+ * request creation time if that context is unpinned. If a guc_id can't be found
+ * we punt this problem to the user as we believe this is near impossible to hit
+ * during normal use cases.
+ *
+ * Locking:
+ * In the GuC submission code we have 3 basic spin locks which protect
+ * everything. Details about each below.
+ *
+ * sched_engine->lock
+ * This is the submission lock for all contexts that share an i915 schedule
+ * engine (sched_engine), thus only one of the contexts which share a
+ * sched_engine can be submitting at a time. Currently only one sched_engine is
+ * used for all of GuC submission but that could change in the future.
+ *
+ * guc->submission_state.lock
+ * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
+ * list.
+ *
+ * ce->guc_state.lock
+ * Protects everything under ce->guc_state. Ensures that a context is in the
+ * correct state before issuing a H2G. e.g. We don't issue a schedule disable
+ * on a disabled context (bad idea), we don't issue a schedule enable when a
+ * schedule disable is in flight, etc... Also protects list of inflight requests
+ * on the context and the priority management state. Lock is individual to each
+ * context.
+ *
+ * Lock ordering rules:
+ * sched_engine->lock -> ce->guc_state.lock
+ * guc->submission_state.lock -> ce->guc_state.lock
*
+ * Reset races:
+ * When a full GT reset is triggered it is assumed that some G2H responses to
+ * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
+ * fatal as we do certain operations upon receiving a G2H (e.g. destroy
+ * contexts, release guc_ids, etc...). When this occurs we can scrub the
+ * context state and cleanup appropriately, however this is quite racey.
+ * To avoid races, the reset code must disable submission before scrubbing for
+ * the missing G2H, while the submission code must check for submission being
+ * disabled and skip sending H2Gs and updating context states when it is. Both
+ * sides must also make sure to hold the relevant locks.
*/
/* GuC Virtual Engine */
@@ -68,91 +125,56 @@ struct guc_virtual_engine {
};
static struct intel_context *
-guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags);
+
+static struct intel_context *
+guc_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width);
#define GUC_REQUEST_SIZE 64 /* bytes */
/*
- * Below is a set of functions which control the GuC scheduling state which do
- * not require a lock as all state transitions are mutually exclusive. i.e. It
- * is not possible for the context pinning code and submission, for the same
- * context, to be executing simultaneously. We still need an atomic as it is
- * possible for some of the bits to changing at the same time though.
+ * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
+ * per the GuC submission interface. A different allocation algorithm is used
+ * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
+ * partition the guc_id space. We believe the number of multi-lrc contexts in
+ * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
+ * multi-lrc.
*/
-#define SCHED_STATE_NO_LOCK_ENABLED BIT(0)
-#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1)
-#define SCHED_STATE_NO_LOCK_REGISTERED BIT(2)
-static inline bool context_enabled(struct intel_context *ce)
-{
- return (atomic_read(&ce->guc_sched_state_no_lock) &
- SCHED_STATE_NO_LOCK_ENABLED);
-}
-
-static inline void set_context_enabled(struct intel_context *ce)
-{
- atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock);
-}
-
-static inline void clr_context_enabled(struct intel_context *ce)
-{
- atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline bool context_pending_enable(struct intel_context *ce)
-{
- return (atomic_read(&ce->guc_sched_state_no_lock) &
- SCHED_STATE_NO_LOCK_PENDING_ENABLE);
-}
-
-static inline void set_context_pending_enable(struct intel_context *ce)
-{
- atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline void clr_context_pending_enable(struct intel_context *ce)
-{
- atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline bool context_registered(struct intel_context *ce)
-{
- return (atomic_read(&ce->guc_sched_state_no_lock) &
- SCHED_STATE_NO_LOCK_REGISTERED);
-}
-
-static inline void set_context_registered(struct intel_context *ce)
-{
- atomic_or(SCHED_STATE_NO_LOCK_REGISTERED,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline void clr_context_registered(struct intel_context *ce)
-{
- atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED,
- &ce->guc_sched_state_no_lock);
-}
+#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16)
/*
* Below is a set of functions which control the GuC scheduling state which
- * require a lock, aside from the special case where the functions are called
- * from guc_lrc_desc_pin(). In that case it isn't possible for any other code
- * path to be executing on the context.
+ * require a lock.
*/
#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
#define SCHED_STATE_DESTROYED BIT(1)
#define SCHED_STATE_PENDING_DISABLE BIT(2)
#define SCHED_STATE_BANNED BIT(3)
-#define SCHED_STATE_BLOCKED_SHIFT 4
+#define SCHED_STATE_ENABLED BIT(4)
+#define SCHED_STATE_PENDING_ENABLE BIT(5)
+#define SCHED_STATE_REGISTERED BIT(6)
+#define SCHED_STATE_BLOCKED_SHIFT 7
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
+
static inline void init_sched_state(struct intel_context *ce)
{
- /* Only should be called from guc_lrc_desc_pin() */
- atomic_set(&ce->guc_sched_state_no_lock, 0);
- ce->guc_state.sched_state = 0;
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
+}
+
+__maybe_unused
+static bool sched_state_is_init(struct intel_context *ce)
+{
+ /*
+ * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
+ * suspend.
+ */
+ return !(ce->guc_state.sched_state &=
+ ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
}
static inline bool
@@ -165,7 +187,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce)
static inline void
set_context_wait_for_deregister_to_register(struct intel_context *ce)
{
- /* Only should be called from guc_lrc_desc_pin() without lock */
+ lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |=
SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
}
@@ -225,6 +247,57 @@ static inline void clr_context_banned(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
}
+static inline bool context_enabled(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
+}
+
+static inline void set_context_enabled(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
+}
+
+static inline void clr_context_enabled(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
+}
+
+static inline bool context_pending_enable(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline void set_context_pending_enable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline void clr_context_pending_enable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline bool context_registered(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
+}
+
+static inline void set_context_registered(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
+}
+
+static inline void clr_context_registered(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
+}
+
static inline u32 context_blocked(struct intel_context *ce)
{
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
@@ -233,7 +306,6 @@ static inline u32 context_blocked(struct intel_context *ce)
static inline void incr_context_blocked(struct intel_context *ce)
{
- lockdep_assert_held(&ce->engine->sched_engine->lock);
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
@@ -243,7 +315,6 @@ static inline void incr_context_blocked(struct intel_context *ce)
static inline void decr_context_blocked(struct intel_context *ce)
{
- lockdep_assert_held(&ce->engine->sched_engine->lock);
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
@@ -251,14 +322,39 @@ static inline void decr_context_blocked(struct intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
}
+static inline bool context_has_committed_requests(struct intel_context *ce)
+{
+ return !!ce->guc_state.number_committed_requests;
+}
+
+static inline void incr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ++ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static inline void decr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ --ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static struct intel_context *
+request_to_scheduling_context(struct i915_request *rq)
+{
+ return intel_context_to_parent(rq->context);
+}
+
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
- return ce->guc_id == GUC_INVALID_LRC_ID;
+ return ce->guc_id.id == GUC_INVALID_LRC_ID;
}
static inline void set_context_guc_id_invalid(struct intel_context *ce)
{
- ce->guc_id = GUC_INVALID_LRC_ID;
+ ce->guc_id.id = GUC_INVALID_LRC_ID;
}
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
@@ -271,6 +367,104 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
return rb_entry(rb, struct i915_priolist, node);
}
+/*
+ * When using multi-lrc submission a scratch memory area is reserved in the
+ * parent's context state for the process descriptor, work queue, and handshake
+ * between the parent + children contexts to insert safe preemption points
+ * between each of the BBs. Currently the scratch area is sized to a page.
+ *
+ * The layout of this scratch area is below:
+ * 0 guc_process_desc
+ * + sizeof(struct guc_process_desc) child go
+ * + CACHELINE_BYTES child join[0]
+ * ...
+ * + CACHELINE_BYTES child join[n - 1]
+ * ... unused
+ * PARENT_SCRATCH_SIZE / 2 work queue start
+ * ... work queue
+ * PARENT_SCRATCH_SIZE - 1 work queue end
+ */
+#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
+#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
+
+struct sync_semaphore {
+ u32 semaphore;
+ u8 unused[CACHELINE_BYTES - sizeof(u32)];
+};
+
+struct parent_scratch {
+ struct guc_process_desc pdesc;
+
+ struct sync_semaphore go;
+ struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
+
+ u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
+ sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
+
+ u32 wq[WQ_SIZE / sizeof(u32)];
+};
+
+static u32 __get_parent_scratch_offset(struct intel_context *ce)
+{
+ GEM_BUG_ON(!ce->parallel.guc.parent_page);
+
+ return ce->parallel.guc.parent_page * PAGE_SIZE;
+}
+
+static u32 __get_wq_offset(struct intel_context *ce)
+{
+ BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
+
+ return __get_parent_scratch_offset(ce) + WQ_OFFSET;
+}
+
+static struct parent_scratch *
+__get_parent_scratch(struct intel_context *ce)
+{
+ BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
+ BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
+
+ /*
+ * Need to subtract LRC_STATE_OFFSET here as the
+ * parallel.guc.parent_page is the offset into ce->state while
+ * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
+ */
+ return (struct parent_scratch *)
+ (ce->lrc_reg_state +
+ ((__get_parent_scratch_offset(ce) -
+ LRC_STATE_OFFSET) / sizeof(u32)));
+}
+
+static struct guc_process_desc *
+__get_process_desc(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->pdesc;
+}
+
+static u32 *get_wq_pointer(struct guc_process_desc *desc,
+ struct intel_context *ce,
+ u32 wqi_size)
+{
+ /*
+ * Check for space in work queue. Caching a value of head pointer in
+ * intel_context structure in order reduce the number accesses to shared
+ * GPU memory which may be across a PCIe bus.
+ */
+#define AVAILABLE_SPACE \
+ CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
+ if (wqi_size > AVAILABLE_SPACE) {
+ ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
+
+ if (wqi_size > AVAILABLE_SPACE)
+ return NULL;
+ }
+#undef AVAILABLE_SPACE
+
+ return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
+}
+
static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
{
struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
@@ -352,20 +546,29 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
+static void decr_outstanding_submission_g2h(struct intel_guc *guc)
+{
+ if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
+ wake_up_all(&guc->ct.wq);
+}
+
static int guc_submission_send_busy_loop(struct intel_guc *guc,
const u32 *action,
u32 len,
u32 g2h_len_dw,
bool loop)
{
- int err;
-
- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ /*
+ * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
+ * so we don't handle the case where we don't get a reply because we
+ * aborted the send due to the channel being busy.
+ */
+ GEM_BUG_ON(g2h_len_dw && !loop);
- if (!err && g2h_len_dw)
+ if (g2h_len_dw)
atomic_inc(&guc->outstanding_submission_g2h);
- return err;
+ return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
}
int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
@@ -421,15 +624,17 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
-static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
int err = 0;
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
u32 action[3];
int len = 0;
u32 g2h_len_dw = 0;
bool enabled;
+ lockdep_assert_held(&rq->engine->sched_engine->lock);
+
/*
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
@@ -437,41 +642,34 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
if (unlikely(intel_context_is_banned(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
- goto out;
+ return 0;
}
- GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
+ GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
- /*
- * Corner case where the GuC firmware was blown away and reloaded while
- * this context was pinned.
- */
- if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) {
- err = guc_lrc_desc_pin(ce, false);
- if (unlikely(err))
- goto out;
- }
+ spin_lock(&ce->guc_state.lock);
/*
* The request / context will be run on the hardware when scheduling
- * gets enabled in the unblock.
+ * gets enabled in the unblock. For multi-lrc we still submit the
+ * context to move the LRC tails.
*/
- if (unlikely(context_blocked(ce)))
+ if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
goto out;
- enabled = context_enabled(ce);
+ enabled = context_enabled(ce) || context_blocked(ce);
if (!enabled) {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
- action[len++] = ce->guc_id;
+ action[len++] = ce->guc_id.id;
action[len++] = GUC_CONTEXT_ENABLE;
set_context_pending_enable(ce);
intel_context_get(ce);
g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
} else {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
- action[len++] = ce->guc_id;
+ action[len++] = ce->guc_id.id;
}
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
@@ -479,6 +677,18 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
trace_intel_context_sched_enable(ce);
atomic_inc(&guc->outstanding_submission_g2h);
set_context_enabled(ce);
+
+ /*
+ * Without multi-lrc KMD does the submission step (moving the
+ * lrc tail) so enabling scheduling is sufficient to submit the
+ * context. This isn't the case in multi-lrc submission as the
+ * GuC needs to move the tails, hence the need for another H2G
+ * to submit a multi-lrc context after enabling scheduling.
+ */
+ if (intel_context_is_parent(ce)) {
+ action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
+ err = intel_guc_send_nb(guc, action, len - 1, 0);
+ }
} else if (!enabled) {
clr_context_pending_enable(ce);
intel_context_put(ce);
@@ -487,9 +697,22 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
trace_i915_request_guc_submit(rq);
out:
+ spin_unlock(&ce->guc_state.lock);
return err;
}
+static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+ int ret = __guc_add_request(guc, rq);
+
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_ADD_REQUEST;
+ }
+
+ return ret;
+}
+
static inline void guc_set_lrc_tail(struct i915_request *rq)
{
rq->context->lrc_reg_state[CTX_RING_TAIL] =
@@ -501,6 +724,135 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
+static bool is_multi_lrc_rq(struct i915_request *rq)
+{
+ return intel_context_is_parallel(rq->context);
+}
+
+static bool can_merge_rq(struct i915_request *rq,
+ struct i915_request *last)
+{
+ return request_to_scheduling_context(rq) ==
+ request_to_scheduling_context(last);
+}
+
+static u32 wq_space_until_wrap(struct intel_context *ce)
+{
+ return (WQ_SIZE - ce->parallel.guc.wqi_tail);
+}
+
+static void write_wqi(struct guc_process_desc *desc,
+ struct intel_context *ce,
+ u32 wqi_size)
+{
+ BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
+
+ /*
+ * Ensure WQI are visible before updating tail
+ */
+ intel_guc_write_barrier(ce_to_guc(ce));
+
+ ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
+ (WQ_SIZE - 1);
+ WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
+}
+
+static int guc_wq_noop_append(struct intel_context *ce)
+{
+ struct guc_process_desc *desc = __get_process_desc(ce);
+ u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
+ u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
+
+ if (!wqi)
+ return -EBUSY;
+
+ GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ ce->parallel.guc.wqi_tail = 0;
+
+ return 0;
+}
+
+static int __guc_wq_item_append(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ struct intel_context *child;
+ struct guc_process_desc *desc = __get_process_desc(ce);
+ unsigned int wqi_size = (ce->parallel.number_children + 4) *
+ sizeof(u32);
+ u32 *wqi;
+ u32 len_dw = (wqi_size / sizeof(u32)) - 1;
+ int ret;
+
+ /* Ensure context is in correct state updating work queue */
+ GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(ce));
+ GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
+ GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
+
+ /* Insert NOOP if this work queue item will wrap the tail pointer. */
+ if (wqi_size > wq_space_until_wrap(ce)) {
+ ret = guc_wq_noop_append(ce);
+ if (ret)
+ return ret;
+ }
+
+ wqi = get_wq_pointer(desc, ce, wqi_size);
+ if (!wqi)
+ return -EBUSY;
+
+ GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ *wqi++ = ce->lrc.lrca;
+ *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
+ FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
+ *wqi++ = 0; /* fence_id */
+ for_each_child(ce, child)
+ *wqi++ = child->ring->tail / sizeof(u64);
+
+ write_wqi(desc, ce, wqi_size);
+
+ return 0;
+}
+
+static int guc_wq_item_append(struct intel_guc *guc,
+ struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ int ret = 0;
+
+ if (likely(!intel_context_is_banned(ce))) {
+ ret = __guc_wq_item_append(rq);
+
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
+ }
+ }
+
+ return ret;
+}
+
+static bool multi_lrc_submit(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ intel_ring_set_tail(rq->ring, rq->tail);
+
+ /*
+ * We expect the front end (execbuf IOCTL) to set this flag on the last
+ * request generated from a multi-BB submission. This indicates to the
+ * backend (GuC interface) that we should submit this context thus
+ * submitting all the requests generated in parallel.
+ */
+ return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
+ intel_context_is_banned(ce);
+}
+
static int guc_dequeue_one_context(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
@@ -514,7 +866,17 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
if (guc->stalled_request) {
submit = true;
last = guc->stalled_request;
- goto resubmit;
+
+ switch (guc->submission_stall_reason) {
+ case STALL_REGISTER_CONTEXT:
+ goto register_context;
+ case STALL_MOVE_LRC_TAIL:
+ goto move_lrc_tail;
+ case STALL_ADD_REQUEST:
+ goto add_request;
+ default:
+ MISSING_CASE(guc->submission_stall_reason);
+ }
}
while ((rb = rb_first_cached(&sched_engine->queue))) {
@@ -522,8 +884,8 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
struct i915_request *rq, *rn;
priolist_for_each_request_consume(rq, rn, p) {
- if (last && rq->context != last->context)
- goto done;
+ if (last && !can_merge_rq(rq, last))
+ goto register_context;
list_del_init(&rq->sched.link);
@@ -531,33 +893,84 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
trace_i915_request_in(rq, 0);
last = rq;
- submit = true;
+
+ if (is_multi_lrc_rq(rq)) {
+ /*
+ * We need to coalesce all multi-lrc requests in
+ * a relationship into a single H2G. We are
+ * guaranteed that all of these requests will be
+ * submitted sequentially.
+ */
+ if (multi_lrc_submit(rq)) {
+ submit = true;
+ goto register_context;
+ }
+ } else {
+ submit = true;
+ }
}
rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
-done:
+
+register_context:
if (submit) {
- guc_set_lrc_tail(last);
-resubmit:
+ struct intel_context *ce = request_to_scheduling_context(last);
+
+ if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
+ !intel_context_is_banned(ce))) {
+ ret = guc_lrc_desc_pin(ce, false);
+ if (unlikely(ret == -EPIPE)) {
+ goto deadlk;
+ } else if (ret == -EBUSY) {
+ guc->stalled_request = last;
+ guc->submission_stall_reason =
+ STALL_REGISTER_CONTEXT;
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ }
+
+move_lrc_tail:
+ if (is_multi_lrc_rq(last)) {
+ ret = guc_wq_item_append(guc, last);
+ if (ret == -EBUSY) {
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ } else {
+ guc_set_lrc_tail(last);
+ }
+
+add_request:
ret = guc_add_request(guc, last);
- if (unlikely(ret == -EPIPE))
+ if (unlikely(ret == -EPIPE)) {
+ goto deadlk;
+ } else if (ret == -EBUSY) {
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
goto deadlk;
- else if (ret == -EBUSY) {
- tasklet_schedule(&sched_engine->tasklet);
- guc->stalled_request = last;
- return false;
}
}
guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
return submit;
deadlk:
sched_engine->tasklet.callback = NULL;
tasklet_disable_nosync(&sched_engine->tasklet);
return false;
+
+schedule_tasklet:
+ tasklet_schedule(&sched_engine->tasklet);
+ return false;
}
static void guc_submission_tasklet(struct tasklet_struct *t)
@@ -596,10 +1009,18 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
unsigned long index, flags;
bool pending_disable, pending_enable, deregister, destroyed, banned;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- /* Flush context */
- spin_lock_irqsave(&ce->guc_state.lock, flags);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ /*
+ * Corner case where the ref count on the object is zero but and
+ * deregister G2H was lost. In this case we don't touch the ref
+ * count and finish the destroy of the context.
+ */
+ bool do_put = kref_get_unless_zero(&ce->ref);
+
+ xa_unlock(&guc->context_lookup);
+
+ spin_lock(&ce->guc_state.lock);
/*
* Once we are at this point submission_disabled() is guaranteed
@@ -615,11 +1036,16 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
banned = context_banned(ce);
init_sched_state(ce);
+ spin_unlock(&ce->guc_state.lock);
+
+ GEM_BUG_ON(!do_put && !destroyed);
+
if (pending_enable || destroyed || deregister) {
- atomic_dec(&guc->outstanding_submission_g2h);
+ decr_outstanding_submission_g2h(guc);
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
+ intel_gt_pm_put_async(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -635,14 +1061,20 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
intel_engine_signal_breadcrumbs(ce->engine);
}
intel_context_sched_disable_unpin(ce);
- atomic_dec(&guc->outstanding_submission_g2h);
- spin_lock_irqsave(&ce->guc_state.lock, flags);
+ decr_outstanding_submission_g2h(guc);
+
+ spin_lock(&ce->guc_state.lock);
guc_blocked_fence_complete(ce);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ spin_unlock(&ce->guc_state.lock);
intel_context_put(ce);
}
+
+ if (do_put)
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
}
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
}
static inline bool
@@ -692,6 +1124,8 @@ static void guc_flush_submissions(struct intel_guc *guc)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+static void guc_flush_destroyed_contexts(struct intel_guc *guc);
+
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
{
int i;
@@ -710,6 +1144,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
guc_flush_submissions(guc);
+ guc_flush_destroyed_contexts(guc);
/*
* Handle any outstanding G2Hs before reset. Call IRQ handler directly
@@ -725,6 +1160,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
wait_for_reset(guc, &guc->outstanding_submission_g2h);
} while (!list_empty(&guc->ct.requests.incoming));
}
+
scrub_guc_desc_for_outstanding_g2h(guc);
}
@@ -796,16 +1232,14 @@ __unwind_incomplete_requests(struct intel_context *ce)
unsigned long flags;
spin_lock_irqsave(&sched_engine->lock, flags);
- spin_lock(&ce->guc_active.lock);
- list_for_each_entry_safe(rq, rn,
- &ce->guc_active.requests,
- sched.link) {
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry_safe_reverse(rq, rn,
+ &ce->guc_state.requests,
+ sched.link) {
if (i915_request_completed(rq))
continue;
list_del_init(&rq->sched.link);
- spin_unlock(&ce->guc_active.lock);
-
__i915_request_unsubmit(rq);
/* Push the request back into the queue for later resubmission. */
@@ -816,64 +1250,111 @@ __unwind_incomplete_requests(struct intel_context *ce)
}
GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
- list_add_tail(&rq->sched.link, pl);
+ list_add(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- spin_lock(&ce->guc_active.lock);
}
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static void __guc_reset_context(struct intel_context *ce, bool stalled)
{
+ bool local_stalled;
struct i915_request *rq;
+ unsigned long flags;
u32 head;
+ int i, number_children = ce->parallel.number_children;
+ bool skip = false;
+ struct intel_context *parent = ce;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
intel_context_get(ce);
/*
- * GuC will implicitly mark the context as non-schedulable
- * when it sends the reset notification. Make sure our state
- * reflects this change. The context will be marked enabled
- * on resubmission.
+ * GuC will implicitly mark the context as non-schedulable when it sends
+ * the reset notification. Make sure our state reflects this change. The
+ * context will be marked enabled on resubmission.
+ *
+ * XXX: If the context is reset as a result of the request cancellation
+ * this G2H is received after the schedule disable complete G2H which is
+ * wrong as this creates a race between the request cancellation code
+ * re-submitting the context and this G2H handler. This is a bug in the
+ * GuC but can be worked around in the meantime but converting this to a
+ * NOP if a pending enable is in flight as this indicates that a request
+ * cancellation has occurred.
*/
- clr_context_enabled(ce);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (likely(!context_pending_enable(ce)))
+ clr_context_enabled(ce);
+ else
+ skip = true;
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(skip))
+ goto out_put;
- rq = intel_context_find_active_request(ce);
- if (!rq) {
- head = ce->ring->tail;
- stalled = false;
- goto out_replay;
- }
+ /*
+ * For each context in the relationship find the hanging request
+ * resetting each context / request as needed
+ */
+ for (i = 0; i < number_children + 1; ++i) {
+ if (!intel_context_is_pinned(ce))
+ goto next_context;
+
+ local_stalled = false;
+ rq = intel_context_find_active_request(ce);
+ if (!rq) {
+ head = ce->ring->tail;
+ goto out_replay;
+ }
- if (!i915_request_started(rq))
- stalled = false;
+ if (i915_request_started(rq))
+ local_stalled = true;
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
- head = intel_ring_wrap(ce->ring, rq->head);
- __i915_request_reset(rq, stalled);
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ head = intel_ring_wrap(ce->ring, rq->head);
+ __i915_request_reset(rq, local_stalled && stalled);
out_replay:
- guc_reset_state(ce, head, stalled);
- __unwind_incomplete_requests(ce);
- intel_context_put(ce);
+ guc_reset_state(ce, head, local_stalled && stalled);
+next_context:
+ if (i != number_children)
+ ce = list_next_entry(ce, parallel.child_link);
+ }
+
+ __unwind_incomplete_requests(parent);
+out_put:
+ intel_context_put(parent);
}
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
{
struct intel_context *ce;
unsigned long index;
+ unsigned long flags;
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
}
- xa_for_each(&guc->context_lookup, index, ce)
- if (intel_context_is_pinned(ce))
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
+
+ xa_unlock(&guc->context_lookup);
+
+ if (intel_context_is_pinned(ce) &&
+ !intel_context_is_child(ce))
__guc_reset_context(ce, stalled);
+ intel_context_put(ce);
+
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
}
@@ -886,10 +1367,10 @@ static void guc_cancel_context_requests(struct intel_context *ce)
/* Mark all executing requests as skipped. */
spin_lock_irqsave(&sched_engine->lock, flags);
- spin_lock(&ce->guc_active.lock);
- list_for_each_entry(rq, &ce->guc_active.requests, sched.link)
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
@@ -948,11 +1429,25 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
{
struct intel_context *ce;
unsigned long index;
+ unsigned long flags;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
- xa_for_each(&guc->context_lookup, index, ce)
- if (intel_context_is_pinned(ce))
+ xa_unlock(&guc->context_lookup);
+
+ if (intel_context_is_pinned(ce) &&
+ !intel_context_is_child(ce))
guc_cancel_context_requests(ce);
+ intel_context_put(ce);
+
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
guc_cancel_sched_engine_requests(guc->sched_engine);
/* GuC is blown away, drop all references to contexts */
@@ -981,6 +1476,8 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
intel_gt_unpark_heartbeats(guc_to_gt(guc));
}
+static void destroyed_worker_func(struct work_struct *w);
+
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -1003,9 +1500,17 @@ int intel_guc_submission_init(struct intel_guc *guc)
xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
- spin_lock_init(&guc->contexts_lock);
- INIT_LIST_HEAD(&guc->guc_id_list);
- ida_init(&guc->guc_ids);
+ spin_lock_init(&guc->submission_state.lock);
+ INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
+ ida_init(&guc->submission_state.guc_ids);
+ INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
+ INIT_WORK(&guc->submission_state.destroyed_worker,
+ destroyed_worker_func);
+
+ guc->submission_state.guc_ids_bitmap =
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL);
+ if (!guc->submission_state.guc_ids_bitmap)
+ return -ENOMEM;
return 0;
}
@@ -1015,8 +1520,10 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->lrc_desc_pool)
return;
+ guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine);
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
@@ -1027,21 +1534,28 @@ static inline void queue_request(struct i915_sched_engine *sched_engine,
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ tasklet_hi_schedule(&sched_engine->tasklet);
}
static int guc_bypass_tasklet_submit(struct intel_guc *guc,
struct i915_request *rq)
{
- int ret;
+ int ret = 0;
__i915_request_submit(rq);
trace_i915_request_in(rq, 0);
- guc_set_lrc_tail(rq);
- ret = guc_add_request(guc, rq);
- if (ret == -EBUSY)
- guc->stalled_request = rq;
+ if (is_multi_lrc_rq(rq)) {
+ if (multi_lrc_submit(rq)) {
+ ret = guc_wq_item_append(guc, rq);
+ if (!ret)
+ ret = guc_add_request(guc, rq);
+ }
+ } else {
+ guc_set_lrc_tail(rq);
+ ret = guc_add_request(guc, rq);
+ }
if (unlikely(ret == -EPIPE))
disable_submission(guc);
@@ -1049,6 +1563,16 @@ static int guc_bypass_tasklet_submit(struct intel_guc *guc,
return ret;
}
+static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
+{
+ struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ return submission_disabled(guc) || guc->stalled_request ||
+ !i915_sched_engine_is_empty(sched_engine) ||
+ !lrc_desc_registered(guc, ce->guc_id.id);
+}
+
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
@@ -1058,8 +1582,7 @@ static void guc_submit_request(struct i915_request *rq)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&sched_engine->lock, flags);
- if (submission_disabled(guc) || guc->stalled_request ||
- !i915_sched_engine_is_empty(sched_engine))
+ if (need_tasklet(guc, rq))
queue_request(sched_engine, rq, rq_prio(rq));
else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
tasklet_hi_schedule(&sched_engine->tasklet);
@@ -1067,72 +1590,117 @@ static void guc_submit_request(struct i915_request *rq)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
-static int new_guc_id(struct intel_guc *guc)
+static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
- return ida_simple_get(&guc->guc_ids, 0,
- GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ int ret;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ if (intel_context_is_parent(ce))
+ ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+ NUMBER_MULTI_LRC_GUC_ID,
+ order_base_2(ce->parallel.number_children
+ + 1));
+ else
+ ret = ida_simple_get(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID,
+ GUC_MAX_LRC_DESCRIPTORS,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ce->guc_id.id = ret;
+ return 0;
}
static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
+ GEM_BUG_ON(intel_context_is_child(ce));
+
if (!context_guc_id_invalid(ce)) {
- ida_simple_remove(&guc->guc_ids, ce->guc_id);
- reset_lrc_desc(guc, ce->guc_id);
+ if (intel_context_is_parent(ce))
+ bitmap_release_region(guc->submission_state.guc_ids_bitmap,
+ ce->guc_id.id,
+ order_base_2(ce->parallel.number_children
+ + 1));
+ else
+ ida_simple_remove(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
+ reset_lrc_desc(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
- if (!list_empty(&ce->guc_id_link))
- list_del_init(&ce->guc_id_link);
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
}
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
unsigned long flags;
- spin_lock_irqsave(&guc->contexts_lock, flags);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
__release_guc_id(guc, ce);
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
-static int steal_guc_id(struct intel_guc *guc)
+static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
- struct intel_context *ce;
- int guc_id;
+ struct intel_context *cn;
- lockdep_assert_held(&guc->contexts_lock);
+ lockdep_assert_held(&guc->submission_state.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
+ GEM_BUG_ON(intel_context_is_parent(ce));
- if (!list_empty(&guc->guc_id_list)) {
- ce = list_first_entry(&guc->guc_id_list,
+ if (!list_empty(&guc->submission_state.guc_id_list)) {
+ cn = list_first_entry(&guc->submission_state.guc_id_list,
struct intel_context,
- guc_id_link);
+ guc_id.link);
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref));
- GEM_BUG_ON(context_guc_id_invalid(ce));
+ GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(cn));
+ GEM_BUG_ON(intel_context_is_child(cn));
+ GEM_BUG_ON(intel_context_is_parent(cn));
- list_del_init(&ce->guc_id_link);
- guc_id = ce->guc_id;
- clr_context_registered(ce);
- set_context_guc_id_invalid(ce);
- return guc_id;
+ list_del_init(&cn->guc_id.link);
+ ce->guc_id = cn->guc_id;
+
+ spin_lock(&ce->guc_state.lock);
+ clr_context_registered(cn);
+ spin_unlock(&ce->guc_state.lock);
+
+ set_context_guc_id_invalid(cn);
+
+ return 0;
} else {
return -EAGAIN;
}
}
-static int assign_guc_id(struct intel_guc *guc, u16 *out)
+static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret;
- lockdep_assert_held(&guc->contexts_lock);
+ lockdep_assert_held(&guc->submission_state.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
- ret = new_guc_id(guc);
+ ret = new_guc_id(guc, ce);
if (unlikely(ret < 0)) {
- ret = steal_guc_id(guc);
+ if (intel_context_is_parent(ce))
+ return -ENOSPC;
+
+ ret = steal_guc_id(guc, ce);
if (ret < 0)
return ret;
}
- *out = ret;
+ if (intel_context_is_parent(ce)) {
+ struct intel_context *child;
+ int i = 1;
+
+ for_each_child(ce, child)
+ child->guc_id.id = ce->guc_id.id + i++;
+ }
+
return 0;
}
@@ -1142,26 +1710,28 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
int ret = 0;
unsigned long flags, tries = PIN_GUC_ID_TRIES;
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref));
+ GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
try_again:
- spin_lock_irqsave(&guc->contexts_lock, flags);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+
+ might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) {
- ret = assign_guc_id(guc, &ce->guc_id);
+ ret = assign_guc_id(guc, ce);
if (ret)
goto out_unlock;
ret = 1; /* Indidcates newly assigned guc_id */
}
- if (!list_empty(&ce->guc_id_link))
- list_del_init(&ce->guc_id_link);
- atomic_inc(&ce->guc_id_ref);
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+ atomic_inc(&ce->guc_id.ref);
out_unlock:
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
/*
- * -EAGAIN indicates no guc_ids are available, let's retire any
+ * -EAGAIN indicates no guc_id are available, let's retire any
* outstanding requests to see if that frees up a guc_id. If the first
* retire didn't help, insert a sleep with the timeslice duration before
* attempting to retire more requests. Double the sleep period each
@@ -1189,16 +1759,43 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
unsigned long flags;
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref) < 0);
+ GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
+ GEM_BUG_ON(intel_context_is_child(ce));
- if (unlikely(context_guc_id_invalid(ce)))
+ if (unlikely(context_guc_id_invalid(ce) ||
+ intel_context_is_parent(ce)))
return;
- spin_lock_irqsave(&guc->contexts_lock, flags);
- if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id_link) &&
- !atomic_read(&ce->guc_id_ref))
- list_add_tail(&ce->guc_id_link, &guc->guc_id_list);
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
+ !atomic_read(&ce->guc_id.ref))
+ list_add_tail(&ce->guc_id.link,
+ &guc->submission_state.guc_id_list);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static int __guc_action_register_multi_lrc(struct intel_guc *guc,
+ struct intel_context *ce,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[4 + MAX_ENGINE_INSTANCE];
+ int len = 0;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = guc_id;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = offset;
+ for_each_child(ce, child) {
+ offset += sizeof(struct guc_lrc_desc);
+ action[len++] = offset;
+ }
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_context(struct intel_guc *guc,
@@ -1220,21 +1817,31 @@ static int register_context(struct intel_context *ce, bool loop)
{
struct intel_guc *guc = ce_to_guc(ce);
u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
- ce->guc_id * sizeof(struct guc_lrc_desc);
+ ce->guc_id.id * sizeof(struct guc_lrc_desc);
int ret;
+ GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- ret = __guc_action_register_context(guc, ce->guc_id, offset, loop);
- if (likely(!ret))
+ if (intel_context_is_parent(ce))
+ ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
+ offset, loop);
+ else
+ ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
+ loop);
+ if (likely(!ret)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ }
return ret;
}
static int __guc_action_deregister_context(struct intel_guc *guc,
- u32 guc_id,
- bool loop)
+ u32 guc_id)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
@@ -1243,33 +1850,38 @@ static int __guc_action_deregister_context(struct intel_guc *guc,
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
G2H_LEN_DW_DEREGISTER_CONTEXT,
- loop);
+ true);
}
-static int deregister_context(struct intel_context *ce, u32 guc_id, bool loop)
+static int deregister_context(struct intel_context *ce, u32 guc_id)
{
struct intel_guc *guc = ce_to_guc(ce);
+ GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_deregister(ce);
- return __guc_action_deregister_context(guc, guc_id, loop);
+ return __guc_action_deregister_context(guc, guc_id);
}
-static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask)
+static inline void clear_children_join_go_memory(struct intel_context *ce)
{
- switch (class) {
- case RENDER_CLASS:
- return mask >> RCS0;
- case VIDEO_ENHANCEMENT_CLASS:
- return mask >> VECS0;
- case VIDEO_DECODE_CLASS:
- return mask >> VCS0;
- case COPY_ENGINE_CLASS:
- return mask >> BCS0;
- default:
- MISSING_CASE(class);
- return 0;
- }
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+ int i;
+
+ ps->go.semaphore = 0;
+ for (i = 0; i < ce->parallel.number_children + 1; ++i)
+ ps->join[i].semaphore = 0;
+}
+
+static inline u32 get_children_go_value(struct intel_context *ce)
+{
+ return __get_parent_scratch(ce)->go.semaphore;
+}
+
+static inline u32 get_children_join_value(struct intel_context *ce,
+ u8 child_index)
+{
+ return __get_parent_scratch(ce)->join[child_index].semaphore;
}
static void guc_context_policy_init(struct intel_engine_cs *engine,
@@ -1285,22 +1897,20 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
}
-static inline u8 map_i915_prio_to_guc_prio(int prio);
-
static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 desc_idx = ce->guc_id;
+ u32 desc_idx = ce->guc_id.id;
struct guc_lrc_desc *desc;
- const struct i915_gem_context *ctx;
- int prio = I915_CONTEXT_DEFAULT_PRIORITY;
bool context_registered;
intel_wakeref_t wakeref;
+ struct intel_context *child;
int ret = 0;
GEM_BUG_ON(!engine->mask);
+ GEM_BUG_ON(!sched_state_is_init(ce));
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
@@ -1311,25 +1921,53 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
context_registered = lrc_desc_registered(guc, desc_idx);
- rcu_read_lock();
- ctx = rcu_dereference(ce->gem_context);
- if (ctx)
- prio = ctx->sched.priority;
- rcu_read_unlock();
-
reset_lrc_desc(guc, desc_idx);
set_lrc_desc_registered(guc, desc_idx, ce);
desc = __get_lrc_desc(guc, desc_idx);
desc->engine_class = engine_class_to_guc_class(engine->class);
- desc->engine_submit_mask = adjust_engine_mask(engine->class,
- engine->mask);
+ desc->engine_submit_mask = engine->logical_mask;
desc->hw_context_desc = ce->lrc.lrca;
- ce->guc_prio = map_i915_prio_to_guc_prio(prio);
- desc->priority = ce->guc_prio;
+ desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc);
- init_sched_state(ce);
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc *pdesc;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ desc->process_desc = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ desc->wq_addr = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ desc->wq_size = WQ_SIZE;
+
+ pdesc = __get_process_desc(ce);
+ memset(pdesc, 0, sizeof(*(pdesc)));
+ pdesc->stage_id = ce->guc_id.id;
+ pdesc->wq_base_addr = desc->wq_addr;
+ pdesc->wq_size_bytes = desc->wq_size;
+ pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+ for_each_child(ce, child) {
+ desc = __get_lrc_desc(guc, child->guc_id.id);
+
+ desc->engine_class =
+ engine_class_to_guc_class(engine->class);
+ desc->hw_context_desc = child->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init(engine, desc);
+ }
+
+ clear_children_join_go_memory(ce);
+ }
/*
* The context_lookup xarray is used to determine if the hardware
@@ -1340,26 +1978,23 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
* registering this context.
*/
if (context_registered) {
+ bool disabled;
+ unsigned long flags;
+
trace_intel_context_steal_guc_id(ce);
- if (!loop) {
+ GEM_BUG_ON(!loop);
+
+ /* Seal race with Reset */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ disabled = submission_disabled(guc);
+ if (likely(!disabled)) {
set_context_wait_for_deregister_to_register(ce);
intel_context_get(ce);
- } else {
- bool disabled;
- unsigned long flags;
-
- /* Seal race with Reset */
- spin_lock_irqsave(&ce->guc_state.lock, flags);
- disabled = submission_disabled(guc);
- if (likely(!disabled)) {
- set_context_wait_for_deregister_to_register(ce);
- intel_context_get(ce);
- }
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (unlikely(disabled)) {
- reset_lrc_desc(guc, desc_idx);
- return 0; /* Will get registered later */
- }
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(disabled)) {
+ reset_lrc_desc(guc, desc_idx);
+ return 0; /* Will get registered later */
}
/*
@@ -1367,20 +2002,18 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
* context whose guc_id was stolen.
*/
with_intel_runtime_pm(runtime_pm, wakeref)
- ret = deregister_context(ce, ce->guc_id, loop);
- if (unlikely(ret == -EBUSY)) {
- clr_context_wait_for_deregister_to_register(ce);
- intel_context_put(ce);
- } else if (unlikely(ret == -ENODEV)) {
+ ret = deregister_context(ce, ce->guc_id.id);
+ if (unlikely(ret == -ENODEV))
ret = 0; /* Will get registered later */
- }
} else {
with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop);
- if (unlikely(ret == -EBUSY))
+ if (unlikely(ret == -EBUSY)) {
+ reset_lrc_desc(guc, desc_idx);
+ } else if (unlikely(ret == -ENODEV)) {
reset_lrc_desc(guc, desc_idx);
- else if (unlikely(ret == -ENODEV))
ret = 0; /* Will get registered later */
+ }
}
return ret;
@@ -1419,7 +2052,12 @@ static int guc_context_pre_pin(struct intel_context *ce,
static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
- return __guc_context_pin(ce, ce->engine, vaddr);
+ int ret = __guc_context_pin(ce, ce->engine, vaddr);
+
+ if (likely(!ret && !intel_context_is_barrier(ce)))
+ intel_engine_pm_get(ce->engine);
+
+ return ret;
}
static void guc_context_unpin(struct intel_context *ce)
@@ -1428,6 +2066,9 @@ static void guc_context_unpin(struct intel_context *ce)
unpin_guc_id(guc, ce);
lrc_unpin(ce);
+
+ if (likely(!intel_context_is_barrier(ce)))
+ intel_engine_pm_put_async(ce->engine);
}
static void guc_context_post_unpin(struct intel_context *ce)
@@ -1440,7 +2081,7 @@ static void __guc_context_sched_enable(struct intel_guc *guc,
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
- ce->guc_id,
+ ce->guc_id.id,
GUC_CONTEXT_ENABLE
};
@@ -1456,12 +2097,13 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
- guc_id, /* ce->guc_id not stable */
+ guc_id, /* ce->guc_id.id not stable */
GUC_CONTEXT_DISABLE
};
GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
+ GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_sched_disable(ce);
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
@@ -1472,24 +2114,24 @@ static void guc_blocked_fence_complete(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
- if (!i915_sw_fence_done(&ce->guc_blocked))
- i915_sw_fence_complete(&ce->guc_blocked);
+ if (!i915_sw_fence_done(&ce->guc_state.blocked))
+ i915_sw_fence_complete(&ce->guc_state.blocked);
}
static void guc_blocked_fence_reinit(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
- GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked));
+ GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
/*
* This fence is always complete unless a pending schedule disable is
* outstanding. We arm the fence here and complete it when we receive
* the pending schedule disable complete message.
*/
- i915_sw_fence_fini(&ce->guc_blocked);
- i915_sw_fence_reinit(&ce->guc_blocked);
- i915_sw_fence_await(&ce->guc_blocked);
- i915_sw_fence_commit(&ce->guc_blocked);
+ i915_sw_fence_fini(&ce->guc_state.blocked);
+ i915_sw_fence_reinit(&ce->guc_state.blocked);
+ i915_sw_fence_await(&ce->guc_state.blocked);
+ i915_sw_fence_commit(&ce->guc_state.blocked);
}
static u16 prep_context_pending_disable(struct intel_context *ce)
@@ -1501,35 +2143,30 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
guc_blocked_fence_reinit(ce);
intel_context_get(ce);
- return ce->guc_id;
+ return ce->guc_id.id;
}
static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
- struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref;
u16 guc_id;
bool enabled;
+ GEM_BUG_ON(intel_context_is_child(ce));
+
spin_lock_irqsave(&ce->guc_state.lock, flags);
- /*
- * Sync with submission path, increment before below changes to context
- * state.
- */
- spin_lock(&sched_engine->lock);
incr_context_blocked(ce);
- spin_unlock(&sched_engine->lock);
enabled = context_enabled(ce);
if (unlikely(!enabled || submission_disabled(guc))) {
if (enabled)
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- return &ce->guc_blocked;
+ return &ce->guc_state.blocked;
}
/*
@@ -1545,26 +2182,41 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_sched_disable(guc, ce, guc_id);
- return &ce->guc_blocked;
+ return &ce->guc_state.blocked;
+}
+
+#define SCHED_STATE_MULTI_BLOCKED_MASK \
+ (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
+#define SCHED_STATE_NO_UNBLOCK \
+ (SCHED_STATE_MULTI_BLOCKED_MASK | \
+ SCHED_STATE_PENDING_DISABLE | \
+ SCHED_STATE_BANNED)
+
+static bool context_cant_unblock(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
+ context_guc_id_invalid(ce) ||
+ !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
+ !intel_context_is_pinned(ce);
}
static void guc_context_unblock(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
- struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref;
bool enable;
GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (unlikely(submission_disabled(guc) ||
- !intel_context_is_pinned(ce) ||
- context_pending_disable(ce) ||
- context_blocked(ce) > 1)) {
+ context_cant_unblock(ce))) {
enable = false;
} else {
enable = true;
@@ -1573,13 +2225,7 @@ static void guc_context_unblock(struct intel_context *ce)
intel_context_get(ce);
}
- /*
- * Sync with submission path, decrement after above changes to context
- * state.
- */
- spin_lock(&sched_engine->lock);
decr_context_blocked(ce);
- spin_unlock(&sched_engine->lock);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -1592,16 +2238,29 @@ static void guc_context_unblock(struct intel_context *ce)
static void guc_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{
+ struct intel_context *block_context =
+ request_to_scheduling_context(rq);
+
if (i915_sw_fence_signaled(&rq->submit)) {
- struct i915_sw_fence *fence = guc_context_block(ce);
+ struct i915_sw_fence *fence;
+ intel_context_get(ce);
+ fence = guc_context_block(block_context);
i915_sw_fence_wait(fence);
if (!i915_request_completed(rq)) {
__i915_request_skip(rq);
guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
true);
}
- guc_context_unblock(ce);
+
+ /*
+ * XXX: Racey if context is reset, see comment in
+ * __guc_reset_context().
+ */
+ flush_work(&ce_to_guc(ce)->ct.requests.worker);
+
+ guc_context_unblock(block_context);
+ intel_context_put(ce);
}
}
@@ -1626,6 +2285,8 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
intel_wakeref_t wakeref;
unsigned long flags;
+ GEM_BUG_ON(intel_context_is_child(ce));
+
guc_flush_submissions(guc);
spin_lock_irqsave(&ce->guc_state.lock, flags);
@@ -1662,7 +2323,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
if (!context_guc_id_invalid(ce))
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc,
- ce->guc_id,
+ ce->guc_id.id,
1);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
@@ -1675,40 +2336,24 @@ static void guc_context_sched_disable(struct intel_context *ce)
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
u16 guc_id;
- bool enabled;
- if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
- !lrc_desc_registered(guc, ce->guc_id)) {
- clr_context_enabled(ce);
- goto unpin;
- }
-
- if (!context_enabled(ce))
- goto unpin;
+ GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
/*
- * We have to check if the context has been disabled by another thread.
- * We also have to check if the context has been pinned again as another
- * pin operation is allowed to pass this function. Checking the pin
- * count, within ce->guc_state.lock, synchronizes this function with
- * guc_request_alloc ensuring a request doesn't slip through the
- * 'context_pending_disable' fence. Checking within the spin lock (can't
- * sleep) ensures another process doesn't pin this context and generate
- * a request before we set the 'context_pending_disable' flag here.
+ * We have to check if the context has been disabled by another thread,
+ * check if submssion has been disabled to seal a race with reset and
+ * finally check if any more requests have been committed to the
+ * context ensursing that a request doesn't slip through the
+ * 'context_pending_disable' fence.
*/
- enabled = context_enabled(ce);
- if (unlikely(!enabled || submission_disabled(guc))) {
- if (enabled)
- clr_context_enabled(ce);
+ if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
+ context_has_committed_requests(ce))) {
+ clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
goto unpin;
}
- if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- return;
- }
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -1724,21 +2369,40 @@ unpin:
static inline void guc_lrc_desc_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
+ struct intel_gt *gt = guc_to_gt(guc);
+ unsigned long flags;
+ bool disabled;
- GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id));
- GEM_BUG_ON(ce != __get_context(guc, ce->guc_id));
+ GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
+ GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
+ GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce));
- clr_context_registered(ce);
- deregister_context(ce, ce->guc_id, true);
+ /* Seal race with Reset */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ disabled = submission_disabled(guc);
+ if (likely(!disabled)) {
+ __intel_gt_pm_get(gt);
+ set_context_destroyed(ce);
+ clr_context_registered(ce);
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(disabled)) {
+ release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ return;
+ }
+
+ deregister_context(ce, ce->guc_id.id);
}
static void __guc_context_destroy(struct intel_context *ce)
{
- GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
- ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
- ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
- ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+ GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+ GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
intel_context_fini(ce);
@@ -1756,76 +2420,86 @@ static void __guc_context_destroy(struct intel_context *ce)
}
}
+static void guc_flush_destroyed_contexts(struct intel_guc *guc)
+{
+ struct intel_context *ce, *cn;
+ unsigned long flags;
+
+ GEM_BUG_ON(!submission_disabled(guc) &&
+ guc_submission_initialized(guc));
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ list_for_each_entry_safe(ce, cn,
+ &guc->submission_state.destroyed_contexts,
+ destroyed_link) {
+ list_del_init(&ce->destroyed_link);
+ __release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ }
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static void deregister_destroyed_contexts(struct intel_guc *guc)
+{
+ struct intel_context *ce, *cn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ list_for_each_entry_safe(ce, cn,
+ &guc->submission_state.destroyed_contexts,
+ destroyed_link) {
+ list_del_init(&ce->destroyed_link);
+ guc_lrc_desc_unpin(ce);
+ }
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static void destroyed_worker_func(struct work_struct *w)
+{
+ struct intel_guc *guc = container_of(w, struct intel_guc,
+ submission_state.destroyed_worker);
+ struct intel_gt *gt = guc_to_gt(guc);
+ int tmp;
+
+ with_intel_gt_pm(gt, tmp)
+ deregister_destroyed_contexts(guc);
+}
+
static void guc_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
- struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
struct intel_guc *guc = ce_to_guc(ce);
- intel_wakeref_t wakeref;
unsigned long flags;
- bool disabled;
+ bool destroy;
/*
* If the guc_id is invalid this context has been stolen and we can free
* it immediately. Also can be freed immediately if the context is not
* registered with the GuC or the GuC is in the middle of a reset.
*/
- if (context_guc_id_invalid(ce)) {
- __guc_context_destroy(ce);
- return;
- } else if (submission_disabled(guc) ||
- !lrc_desc_registered(guc, ce->guc_id)) {
- release_guc_id(guc, ce);
- __guc_context_destroy(ce);
- return;
- }
-
- /*
- * We have to acquire the context spinlock and check guc_id again, if it
- * is valid it hasn't been stolen and needs to be deregistered. We
- * delete this context from the list of unpinned guc_ids available to
- * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB
- * returns indicating this context has been deregistered the guc_id is
- * returned to the pool of available guc_ids.
- */
- spin_lock_irqsave(&guc->contexts_lock, flags);
- if (context_guc_id_invalid(ce)) {
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
- __guc_context_destroy(ce);
- return;
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
+ !lrc_desc_registered(guc, ce->guc_id.id);
+ if (likely(!destroy)) {
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+ list_add_tail(&ce->destroyed_link,
+ &guc->submission_state.destroyed_contexts);
+ } else {
+ __release_guc_id(guc, ce);
}
-
- if (!list_empty(&ce->guc_id_link))
- list_del_init(&ce->guc_id_link);
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
-
- /* Seal race with Reset */
- spin_lock_irqsave(&ce->guc_state.lock, flags);
- disabled = submission_disabled(guc);
- if (likely(!disabled))
- set_context_destroyed(ce);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (unlikely(disabled)) {
- release_guc_id(guc, ce);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+ if (unlikely(destroy)) {
__guc_context_destroy(ce);
return;
}
/*
- * We defer GuC context deregistration until the context is destroyed
- * in order to save on CTBs. With this optimization ideally we only need
- * 1 CTB to register the context during the first pin and 1 CTB to
- * deregister the context when the context is destroyed. Without this
- * optimization, a CTB would be needed every pin & unpin.
- *
- * XXX: Need to acqiure the runtime wakeref as this can be triggered
- * from context_free_worker when runtime wakeref is not held.
- * guc_lrc_desc_unpin requires the runtime as a GuC register is written
- * in H2G CTB to deregister the context. A future patch may defer this
- * H2G CTB if the runtime wakeref is zero.
+ * We use a worker to issue the H2G to deregister the context as we can
+ * take the GT PM for the first time which isn't allowed from an atomic
+ * context.
*/
- with_intel_runtime_pm(runtime_pm, wakeref)
- guc_lrc_desc_unpin(ce);
+ queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
}
static int guc_context_alloc(struct intel_context *ce)
@@ -1839,20 +2513,23 @@ static void guc_context_set_prio(struct intel_guc *guc,
{
u32 action[] = {
INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
- ce->guc_id,
+ ce->guc_id.id,
prio,
};
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL);
+ lockdep_assert_held(&ce->guc_state.lock);
- if (ce->guc_prio == prio || submission_disabled(guc) ||
- !context_registered(ce))
+ if (ce->guc_state.prio == prio || submission_disabled(guc) ||
+ !context_registered(ce)) {
+ ce->guc_state.prio = prio;
return;
+ }
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
- ce->guc_prio = prio;
+ ce->guc_state.prio = prio;
trace_intel_context_set_prio(ce);
}
@@ -1871,25 +2548,25 @@ static inline u8 map_i915_prio_to_guc_prio(int prio)
static inline void add_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
- lockdep_assert_held(&ce->guc_active.lock);
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
- ++ce->guc_prio_count[guc_prio];
+ ++ce->guc_state.prio_count[guc_prio];
/* Overflow protection */
- GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+ GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
}
static inline void sub_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
- lockdep_assert_held(&ce->guc_active.lock);
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
/* Underflow protection */
- GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+ GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
- --ce->guc_prio_count[guc_prio];
+ --ce->guc_state.prio_count[guc_prio];
}
static inline void update_context_prio(struct intel_context *ce)
@@ -1900,10 +2577,10 @@ static inline void update_context_prio(struct intel_context *ce)
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
- lockdep_assert_held(&ce->guc_active.lock);
+ lockdep_assert_held(&ce->guc_state.lock);
- for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
- if (ce->guc_prio_count[i]) {
+ for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
+ if (ce->guc_state.prio_count[i]) {
guc_context_set_prio(guc, ce, i);
break;
}
@@ -1918,13 +2595,14 @@ static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
static void add_to_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
+ GEM_BUG_ON(intel_context_is_child(ce));
GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
- spin_lock(&ce->guc_active.lock);
- list_move_tail(&rq->sched.link, &ce->guc_active.requests);
+ spin_lock(&ce->guc_state.lock);
+ list_move_tail(&rq->sched.link, &ce->guc_state.requests);
if (rq->guc_prio == GUC_PRIO_INIT) {
rq->guc_prio = new_guc_prio;
@@ -1936,12 +2614,12 @@ static void add_to_context(struct i915_request *rq)
}
update_context_prio(ce);
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
}
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
{
- lockdep_assert_held(&ce->guc_active.lock);
+ lockdep_assert_held(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI) {
@@ -1953,9 +2631,11 @@ static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
static void remove_from_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
- spin_lock_irq(&ce->guc_active.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irq(&ce->guc_state.lock);
list_del_init(&rq->sched.link);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1965,9 +2645,11 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce);
- spin_unlock_irq(&ce->guc_active.lock);
+ decr_context_committed_requests(ce);
+
+ spin_unlock_irq(&ce->guc_state.lock);
- atomic_dec(&ce->guc_id_ref);
+ atomic_dec(&ce->guc_id.ref);
i915_request_notify_execute_cb_imm(rq);
}
@@ -1992,19 +2674,35 @@ static const struct intel_context_ops guc_context_ops = {
.destroy = guc_context_destroy,
.create_virtual = guc_create_virtual,
+ .create_parallel = guc_create_parallel,
};
+static void submit_work_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+ might_lock(&rq->engine->sched_engine->lock);
+ i915_sw_fence_complete(&rq->submit);
+}
+
static void __guc_signal_context_fence(struct intel_context *ce)
{
- struct i915_request *rq;
+ struct i915_request *rq, *rn;
lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
- list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
- i915_sw_fence_complete(&rq->submit);
+ /*
+ * Use an IRQ to ensure locking order of sched_engine->lock ->
+ * ce->guc_state.lock is preserved.
+ */
+ list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
+ guc_fence_link) {
+ list_del(&rq->guc_fence_link);
+ irq_work_queue(&rq->submit_work);
+ }
INIT_LIST_HEAD(&ce->guc_state.fences);
}
@@ -2013,6 +2711,8 @@ static void guc_signal_context_fence(struct intel_context *ce)
{
unsigned long flags;
+ GEM_BUG_ON(intel_context_is_child(ce));
+
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_wait_for_deregister_to_register(ce);
__guc_signal_context_fence(ce);
@@ -2022,13 +2722,28 @@ static void guc_signal_context_fence(struct intel_context *ce)
static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
- !lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) &&
+ !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce));
}
+static void guc_context_init(struct intel_context *ce)
+{
+ const struct i915_gem_context *ctx;
+ int prio = I915_CONTEXT_DEFAULT_PRIORITY;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx)
+ prio = ctx->sched.priority;
+ rcu_read_unlock();
+
+ ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
+ set_bit(CONTEXT_GUC_INIT, &ce->flags);
+}
+
static int guc_request_alloc(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
int ret;
@@ -2057,14 +2772,17 @@ static int guc_request_alloc(struct i915_request *rq)
rq->reserved_space -= GUC_REQUEST_SIZE;
+ if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
+ guc_context_init(ce);
+
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
- * guc_ids and creating horrible race conditions. This is especially bad
- * when guc_ids are being stolen due to over subscription. By the time
+ * guc_id and creating horrible race conditions. This is especially bad
+ * when guc_id are being stolen due to over subscription. By the time
* this function is reached, it is guaranteed that the guc_id will be
* persistent until the generated request is retired. Thus, sealing these
- * race conditions. It is still safe to fail here if guc_ids are
+ * race conditions. It is still safe to fail here if guc_id are
* exhausted and return -EAGAIN to the user indicating that they can try
* again in the future.
*
@@ -2074,7 +2792,7 @@ static int guc_request_alloc(struct i915_request *rq)
* decremented on each retire. When it is zero, a lock around the
* increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
*/
- if (atomic_add_unless(&ce->guc_id_ref, 1, 0))
+ if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
goto out;
ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
@@ -2087,7 +2805,7 @@ static int guc_request_alloc(struct i915_request *rq)
disable_submission(guc);
goto out; /* GPU will be reset */
}
- atomic_dec(&ce->guc_id_ref);
+ atomic_dec(&ce->guc_id.ref);
unpin_guc_id(guc, ce);
return ret;
}
@@ -2102,22 +2820,16 @@ out:
* schedule enable or context registration if either G2H is pending
* respectfully. Once a G2H returns, the fence is released that is
* blocking these requests (see guc_signal_context_fence).
- *
- * We can safely check the below fields outside of the lock as it isn't
- * possible for these fields to transition from being clear to set but
- * converse is possible, hence the need for the check within the lock.
*/
- if (likely(!context_wait_for_deregister_to_register(ce) &&
- !context_pending_disable(ce)))
- return 0;
-
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+ init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
}
+ incr_context_committed_requests(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;
@@ -2135,8 +2847,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce,
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+ int ret = __guc_context_pin(ce, engine, vaddr);
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
- return __guc_context_pin(ce, engine, vaddr);
+ if (likely(!ret))
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_get(engine);
+
+ return ret;
+}
+
+static void guc_virtual_context_unpin(struct intel_context *ce)
+{
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
+ struct intel_engine_cs *engine;
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_put_async(engine);
}
static void guc_virtual_context_enter(struct intel_context *ce)
@@ -2173,7 +2907,98 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
- .unpin = guc_context_unpin,
+ .unpin = guc_virtual_context_unpin,
+ .post_unpin = guc_context_post_unpin,
+
+ .ban = guc_context_ban,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .sched_disable = guc_context_sched_disable,
+
+ .destroy = guc_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+ struct intel_guc *guc = ce_to_guc(ce);
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ ret = pin_guc_id(guc, ce);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return __guc_context_pin(ce, engine, vaddr);
+}
+
+static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ __intel_context_pin(ce->parallel.parent);
+ return __guc_context_pin(ce, engine, vaddr);
+}
+
+static void guc_parent_context_unpin(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ if (ce->parallel.last_rq)
+ i915_request_put(ce->parallel.last_rq);
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+}
+
+static void guc_child_context_unpin(struct intel_context *ce)
+{
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ lrc_unpin(ce);
+}
+
+static void guc_child_context_post_unpin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ lrc_post_unpin(ce);
+ intel_context_unpin(ce->parallel.parent);
+}
+
+static void guc_child_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ __guc_context_destroy(ce);
+}
+
+static const struct intel_context_ops virtual_parent_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_parent_context_pin,
+ .unpin = guc_parent_context_unpin,
.post_unpin = guc_context_post_unpin,
.ban = guc_context_ban,
@@ -2190,6 +3015,110 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.get_sibling = guc_virtual_get_sibling,
};
+static const struct intel_context_ops virtual_child_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_child_context_pin,
+ .unpin = guc_child_context_unpin,
+ .post_unpin = guc_child_context_post_unpin,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .destroy = guc_child_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+/*
+ * The below override of the breadcrumbs is enabled when the user configures a
+ * context for parallel submission (multi-lrc, parent-child).
+ *
+ * The overridden breadcrumbs implements an algorithm which allows the GuC to
+ * safely preempt all the hw contexts configured for parallel submission
+ * between each BB. The contract between the i915 and GuC is if the parent
+ * context can be preempted, all the children can be preempted, and the GuC will
+ * always try to preempt the parent before the children. A handshake between the
+ * parent / children breadcrumbs ensures the i915 holds up its end of the deal
+ * creating a window to preempt between each set of BBs.
+ */
+static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+static u32 *
+emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs);
+static u32 *
+emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs);
+
+static struct intel_context *
+guc_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width)
+{
+ struct intel_engine_cs **siblings = NULL;
+ struct intel_context *parent = NULL, *ce, *err;
+ int i, j;
+
+ siblings = kmalloc_array(num_siblings,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < width; ++i) {
+ for (j = 0; j < num_siblings; ++j)
+ siblings[j] = engines[i * num_siblings + j];
+
+ ce = intel_engine_create_virtual(siblings, num_siblings,
+ FORCE_VIRTUAL);
+ if (!ce) {
+ err = ERR_PTR(-ENOMEM);
+ goto unwind;
+ }
+
+ if (i == 0) {
+ parent = ce;
+ parent->ops = &virtual_parent_context_ops;
+ } else {
+ ce->ops = &virtual_child_context_ops;
+ intel_context_bind_parent_child(parent, ce);
+ }
+ }
+
+ parent->parallel.fence_context = dma_fence_context_alloc(1);
+
+ parent->engine->emit_bb_start =
+ emit_bb_start_parent_no_preempt_mid_batch;
+ parent->engine->emit_fini_breadcrumb =
+ emit_fini_breadcrumb_parent_no_preempt_mid_batch;
+ parent->engine->emit_fini_breadcrumb_dw =
+ 12 + 4 * parent->parallel.number_children;
+ for_each_child(parent, ce) {
+ ce->engine->emit_bb_start =
+ emit_bb_start_child_no_preempt_mid_batch;
+ ce->engine->emit_fini_breadcrumb =
+ emit_fini_breadcrumb_child_no_preempt_mid_batch;
+ ce->engine->emit_fini_breadcrumb_dw = 16;
+ }
+
+ kfree(siblings);
+ return parent;
+
+unwind:
+ if (parent)
+ intel_context_put(parent);
+ kfree(siblings);
+ return err;
+}
+
static bool
guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
{
@@ -2249,7 +3178,7 @@ static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
static void guc_bump_inflight_request_prio(struct i915_request *rq,
int prio)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
/* Short circuit function */
@@ -2259,7 +3188,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
!new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
return;
- spin_lock(&ce->guc_active.lock);
+ spin_lock(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_FINI) {
if (rq->guc_prio != GUC_PRIO_INIT)
sub_context_inflight_prio(ce, rq->guc_prio);
@@ -2267,16 +3196,16 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
add_context_inflight_prio(ce, rq->guc_prio);
update_context_prio(ce);
}
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
}
static void guc_retire_inflight_request_prio(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
- spin_lock(&ce->guc_active.lock);
+ spin_lock(&ce->guc_state.lock);
guc_prio_fini(rq, ce);
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -2310,6 +3239,8 @@ static void guc_sanitize(struct intel_engine_cs *engine)
/* And scrub the dirty cachelines for the HWSP */
clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
}
static void setup_hwsp(struct intel_engine_cs *engine)
@@ -2385,9 +3316,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
* and even it did this code would be run again.
*/
- for_each_engine(engine, gt, id)
- if (engine->kernel_context)
- guc_kernel_context_pin(guc, engine->kernel_context);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ list_for_each_entry(ce, &engine->pinned_contexts_list,
+ pinned_contexts_link)
+ guc_kernel_context_pin(guc, ce);
+ }
}
static void guc_release(struct intel_engine_cs *engine)
@@ -2580,13 +3515,13 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
return NULL;
}
- return ce;
-}
+ if (unlikely(intel_context_is_child(ce))) {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Context is child, desc_idx %u", desc_idx);
+ return NULL;
+ }
-static void decr_outstanding_submission_g2h(struct intel_guc *guc)
-{
- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
- wake_up_all(&guc->ct.wq);
+ return ce;
}
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
@@ -2607,6 +3542,13 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
trace_intel_context_deregister_done(ce);
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_deregister)) {
+ ce->drop_deregister = false;
+ return 0;
+ }
+#endif
+
if (context_wait_for_deregister_to_register(ce)) {
struct intel_runtime_pm *runtime_pm =
&ce->engine->gt->i915->runtime_pm;
@@ -2622,6 +3564,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
+ intel_gt_pm_put_async(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -2652,8 +3595,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Bad context sched_state 0x%x, 0x%x, desc_idx %u",
- atomic_read(&ce->guc_sched_state_no_lock),
+ "Bad context sched_state 0x%x, desc_idx %u",
ce->guc_state.sched_state, desc_idx);
return -EPROTO;
}
@@ -2661,10 +3603,26 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
trace_intel_context_sched_done(ce);
if (context_pending_enable(ce)) {
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_schedule_enable)) {
+ ce->drop_schedule_enable = false;
+ return 0;
+ }
+#endif
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_pending_enable(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
} else if (context_pending_disable(ce)) {
bool banned;
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_schedule_disable)) {
+ ce->drop_schedule_disable = false;
+ return 0;
+ }
+#endif
+
/*
* Unpin must be done before __guc_signal_context_fence,
* otherwise a race exists between the requests getting
@@ -2721,7 +3679,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
+ /*
+ * XXX: Racey if request cancellation has occurred, see comment in
+ * __guc_reset_context().
+ */
+ if (likely(!intel_context_is_banned(ce) &&
+ !context_blocked(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
}
@@ -2797,33 +3760,47 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
+ unsigned long flags;
/* Reset called during driver load? GuC not yet initialised! */
if (unlikely(!guc_submission_initialized(guc)))
return;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- if (!intel_context_is_pinned(ce))
+ if (!kref_get_unless_zero(&ce->ref))
continue;
+ xa_unlock(&guc->context_lookup);
+
+ if (!intel_context_is_pinned(ce))
+ goto next;
+
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
- continue;
+ goto next;
} else {
if (ce->engine != engine)
- continue;
+ goto next;
}
- list_for_each_entry(rq, &ce->guc_active.requests, sched.link) {
+ list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
- return;
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
+ goto done;
}
+next:
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
}
+done:
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
}
void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
@@ -2839,23 +3816,34 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
if (unlikely(!guc_submission_initialized(guc)))
return;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- if (!intel_context_is_pinned(ce))
+ if (!kref_get_unless_zero(&ce->ref))
continue;
+ xa_unlock(&guc->context_lookup);
+
+ if (!intel_context_is_pinned(ce))
+ goto next;
+
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
- continue;
+ goto next;
} else {
if (ce->engine != engine)
- continue;
+ goto next;
}
- spin_lock_irqsave(&ce->guc_active.lock, flags);
- intel_engine_dump_active_requests(&ce->guc_active.requests,
+ spin_lock(&ce->guc_state.lock);
+ intel_engine_dump_active_requests(&ce->guc_state.requests,
hung_rq, m);
- spin_unlock_irqrestore(&ce->guc_active.lock, flags);
+ spin_unlock(&ce->guc_state.lock);
+
+next:
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
}
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
}
void intel_guc_submission_print_info(struct intel_guc *guc,
@@ -2881,7 +3869,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
priolist_for_each_request(rq, pl)
drm_printf(p, "guc_id=%u, seqno=%llu\n",
- rq->context->guc_id,
+ rq->context->guc_id.id,
rq->fence.seqno);
}
spin_unlock_irqrestore(&sched_engine->lock, flags);
@@ -2893,46 +3881,348 @@ static inline void guc_log_context_priority(struct drm_printer *p,
{
int i;
- drm_printf(p, "\t\tPriority: %d\n",
- ce->guc_prio);
+ drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
i < GUC_CLIENT_PRIORITY_NUM; ++i) {
drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
- i, ce->guc_prio_count[i]);
+ i, ce->guc_state.prio_count[i]);
}
drm_printf(p, "\n");
}
+static inline void guc_log_context(struct drm_printer *p,
+ struct intel_context *ce)
+{
+ drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
+ drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ drm_printf(p, "\t\tContext Pin Count: %u\n",
+ atomic_read(&ce->pin_count));
+ drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
+ atomic_read(&ce->guc_id.ref));
+ drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
+ ce->guc_state.sched_state);
+}
+
void intel_guc_submission_print_context_info(struct intel_guc *guc,
struct drm_printer *p)
{
struct intel_context *ce;
unsigned long index;
+ unsigned long flags;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id);
- drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
- drm_printf(p, "\t\tContext Pin Count: %u\n",
- atomic_read(&ce->pin_count));
- drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
- atomic_read(&ce->guc_id_ref));
- drm_printf(p, "\t\tSchedule State: 0x%x, 0x%x\n\n",
- ce->guc_state.sched_state,
- atomic_read(&ce->guc_sched_state_no_lock));
+ GEM_BUG_ON(intel_context_is_child(ce));
+ guc_log_context(p, ce);
guc_log_context_priority(p, ce);
+
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc *desc = __get_process_desc(ce);
+ struct intel_context *child;
+
+ drm_printf(p, "\t\tNumber children: %u\n",
+ ce->parallel.number_children);
+ drm_printf(p, "\t\tWQI Head: %u\n",
+ READ_ONCE(desc->head));
+ drm_printf(p, "\t\tWQI Tail: %u\n",
+ READ_ONCE(desc->tail));
+ drm_printf(p, "\t\tWQI Status: %u\n\n",
+ READ_ONCE(desc->wq_status));
+
+ if (ce->engine->emit_bb_start ==
+ emit_bb_start_parent_no_preempt_mid_batch) {
+ u8 i;
+
+ drm_printf(p, "\t\tChildren Go: %u\n\n",
+ get_children_go_value(ce));
+ for (i = 0; i < ce->parallel.number_children; ++i)
+ drm_printf(p, "\t\tChildren Join: %u\n",
+ get_children_join_value(ce, i));
+ }
+
+ for_each_child(ce, child)
+ guc_log_context(p, child);
+ }
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+static inline u32 get_children_go_addr(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ return i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce) +
+ offsetof(struct parent_scratch, go.semaphore);
+}
+
+static inline u32 get_children_join_addr(struct intel_context *ce,
+ u8 child_index)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ return i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce) +
+ offsetof(struct parent_scratch, join[child_index].semaphore);
+}
+
+#define PARENT_GO_BB 1
+#define PARENT_GO_FINI_BREADCRUMB 0
+#define CHILD_GO_BB 1
+#define CHILD_GO_FINI_BREADCRUMB 0
+static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ struct intel_context *ce = rq->context;
+ u32 *cs;
+ u8 i;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Wait on children */
+ for (i = 0; i < ce->parallel.number_children; ++i) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = PARENT_GO_BB;
+ *cs++ = get_children_join_addr(ce, i);
+ *cs++ = 0;
+ }
+
+ /* Turn off preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ /* Tell children go */
+ cs = gen8_emit_ggtt_write(cs,
+ CHILD_GO_BB,
+ get_children_go_addr(ce),
+ 0);
+
+ /* Jump to batch */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_context *parent = intel_context_to_parent(ce);
+ u32 *cs;
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Signal parent */
+ cs = gen8_emit_ggtt_write(cs,
+ PARENT_GO_BB,
+ get_children_join_addr(parent,
+ ce->parallel.child_index),
+ 0);
+
+ /* Wait on parent for go */
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = CHILD_GO_BB;
+ *cs++ = get_children_go_addr(parent);
+ *cs++ = 0;
+
+ /* Turn off preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* Jump to batch */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 *
+__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ u8 i;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ /* Wait on children */
+ for (i = 0; i < ce->parallel.number_children; ++i) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = PARENT_GO_FINI_BREADCRUMB;
+ *cs++ = get_children_join_addr(ce, i);
+ *cs++ = 0;
+ }
+
+ /* Turn on preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ /* Tell children go */
+ cs = gen8_emit_ggtt_write(cs,
+ CHILD_GO_FINI_BREADCRUMB,
+ get_children_go_addr(ce),
+ 0);
+
+ return cs;
+}
+
+/*
+ * If this true, a submission of multi-lrc requests had an error and the
+ * requests need to be skipped. The front end (execuf IOCTL) should've called
+ * i915_request_skip which squashes the BB but we still need to emit the fini
+ * breadrcrumbs seqno write. At this point we don't know how many of the
+ * requests in the multi-lrc submission were generated so we can't do the
+ * handshake between the parent and children (e.g. if 4 requests should be
+ * generated but 2nd hit an error only 1 would be seen by the GuC backend).
+ * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
+ * has occurred on any of the requests in submission / relationship.
+ */
+static inline bool skip_handshake(struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
+}
+
+static u32 *
+emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ if (unlikely(skip_handshake(rq))) {
+ /*
+ * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
+ * the -6 comes from the length of the emits below.
+ */
+ memset(cs, 0, sizeof(u32) *
+ (ce->engine->emit_fini_breadcrumb_dw - 6));
+ cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ } else {
+ cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
+ }
+
+ /* Emit fini breadcrumb */
+ cs = gen8_emit_ggtt_write(cs,
+ rq->fence.seqno,
+ i915_request_active_timeline(rq)->hwsp_offset,
+ 0);
+
+ /* User interrupt */
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+
+ return cs;
+}
+
+static u32 *
+__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_context *parent = intel_context_to_parent(ce);
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ /* Turn on preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ /* Signal parent */
+ cs = gen8_emit_ggtt_write(cs,
+ PARENT_GO_FINI_BREADCRUMB,
+ get_children_join_addr(parent,
+ ce->parallel.child_index),
+ 0);
+
+ /* Wait parent on for go */
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = CHILD_GO_FINI_BREADCRUMB;
+ *cs++ = get_children_go_addr(parent);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ if (unlikely(skip_handshake(rq))) {
+ /*
+ * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
+ * the -6 comes from the length of the emits below.
+ */
+ memset(cs, 0, sizeof(u32) *
+ (ce->engine->emit_fini_breadcrumb_dw - 6));
+ cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ } else {
+ cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
}
+
+ /* Emit fini breadcrumb */
+ cs = gen8_emit_ggtt_write(cs,
+ rq->fence.seqno,
+ i915_request_active_timeline(rq)->hwsp_offset,
+ 0);
+
+ /* User interrupt */
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+
+ return cs;
}
static struct intel_context *
-guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags)
{
struct guc_virtual_engine *ve;
struct intel_guc *guc;
@@ -2981,6 +4271,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
}
ve->base.mask |= sibling->mask;
+ ve->base.logical_mask |= sibling->logical_mask;
if (n != 0 && ve->base.class != sibling->class) {
DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
@@ -3036,3 +4327,8 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
return false;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_guc.c"
+#include "selftest_guc_multi_lrc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index fc5387b410a2..ff4b6869b80b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -87,17 +87,25 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
+ err = PTR_ERR(vaddr);
+ goto unpin_out;
}
copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
- GEM_BUG_ON(copied < huc->fw.rsa_size);
-
i915_gem_object_unpin_map(vma->obj);
+ if (copied < huc->fw.rsa_size) {
+ err = -ENOMEM;
+ goto unpin_out;
+ }
+
huc->rsa_data = vma;
return 0;
+
+unpin_out:
+ i915_vma_unpin_and_release(&vma, 0);
+ return err;
}
static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
index 5733c15fd123..15998963b863 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
@@ -5,7 +5,7 @@
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "intel_huc.h"
#include "intel_huc_debugfs.h"
@@ -21,11 +21,11 @@ static int huc_info_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(huc_info);
void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "huc_info", &huc_info_fops, NULL },
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 86c318516e14..2fef3b0bbe95 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -35,7 +35,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
}
/* Intermediate platforms are HuC authentication only */
- if (IS_DG1(i915) || IS_ALDERLAKE_S(i915)) {
+ if (IS_ALDERLAKE_S(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index 089d98662f46..c2f7924295e7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -6,7 +6,7 @@
#include <linux/debugfs.h>
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "intel_guc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
@@ -32,11 +32,11 @@ static int uc_usage_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(uc_usage);
void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "usage", &uc_usage_fops, NULL },
};
struct dentry *root;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 3a16d08608a5..3aa87be4f2e4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -7,6 +7,7 @@
#include <linux/firmware.h>
#include <drm/drm_print.h>
+#include "gem/i915_gem_lmem.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -50,6 +51,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \
fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
@@ -370,7 +372,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
- obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
+ if (!IS_ERR(obj))
+ obj->flags |= I915_BO_ALLOC_PM_EARLY;
+ } else {
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ }
+
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fail;
@@ -413,20 +422,25 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
- struct i915_vma dummy = {
- .node.start = uc_fw_ggtt_offset(uc_fw),
- .node.size = obj->base.size,
- .pages = obj->mm.pages,
- .vm = &ggtt->vm,
- };
+ struct i915_vma *dummy = &uc_fw->dummy;
+ u32 pte_flags = 0;
+
+ dummy->node.start = uc_fw_ggtt_offset(uc_fw);
+ dummy->node.size = obj->base.size;
+ dummy->pages = obj->mm.pages;
+ dummy->vm = &ggtt->vm;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+ GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
- drm_clflush_sg(dummy.pages);
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(dummy->pages);
- ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
@@ -585,13 +599,68 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
*/
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
{
- struct sg_table *pages = uc_fw->obj->mm.pages;
+ struct intel_memory_region *mr = uc_fw->obj->mm.region;
u32 size = min_t(u32, uc_fw->rsa_size, max_len);
u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
+ struct sgt_iter iter;
+ size_t count = 0;
+ int idx;
+ /* Called during reset handling, must be atomic [no fs_reclaim] */
GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
- return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+ idx = offset >> PAGE_SHIFT;
+ offset = offset_in_page(offset);
+ if (i915_gem_object_has_struct_page(uc_fw->obj)) {
+ struct page *page;
+
+ for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
+ u32 len = min_t(u32, size, PAGE_SIZE - offset);
+ void *vaddr;
+
+ if (idx > 0) {
+ idx--;
+ continue;
+ }
+
+ vaddr = kmap_atomic(page);
+ memcpy(dst, vaddr + offset, len);
+ kunmap_atomic(vaddr);
+
+ offset = 0;
+ dst += len;
+ size -= len;
+ count += len;
+ if (!size)
+ break;
+ }
+ } else {
+ dma_addr_t addr;
+
+ for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
+ u32 len = min_t(u32, size, PAGE_SIZE - offset);
+ void __iomem *vaddr;
+
+ if (idx > 0) {
+ idx--;
+ continue;
+ }
+
+ vaddr = io_mapping_map_atomic_wc(&mr->iomap,
+ addr - mr->region.start);
+ memcpy_fromio(dst, vaddr + offset, len);
+ io_mapping_unmap_atomic(vaddr);
+
+ offset = 0;
+ dst += len;
+ size -= len;
+ count += len;
+ if (!size)
+ break;
+ }
+ }
+
+ return count;
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 99bb1fe1af66..1e00bf65639e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -10,6 +10,7 @@
#include "intel_uc_fw_abi.h"
#include "intel_device_info.h"
#include "i915_gem.h"
+#include "i915_vma.h"
struct drm_printer;
struct drm_i915_private;
@@ -75,6 +76,14 @@ struct intel_uc_fw {
bool user_overridden;
size_t size;
struct drm_i915_gem_object *obj;
+ /**
+ * @dummy: A vma used in binding the uc fw to ggtt. We can't define this
+ * vma on the stack as it can lead to a stack overflow, so we define it
+ * here. Safe to have 1 copy per uc fw because the binding is single
+ * threaded as it done during driver load (inherently single threaded)
+ * or during a GT reset (mutex guarantees single threaded).
+ */
+ struct i915_vma dummy;
/*
* The firmware build process will generate a version header file with major and
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
new file mode 100644
index 000000000000..fb0e4a7bd8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2021 Intel Corporation
+ */
+
+#include "selftests/intel_scheduler_helpers.h"
+
+static struct i915_request *nop_user_request(struct intel_context *ce,
+ struct i915_request *from)
+{
+ struct i915_request *rq;
+ int ret;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (from) {
+ ret = i915_sw_fence_await_dma_fence(&rq->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+ if (ret < 0) {
+ i915_request_put(rq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_guc_scrub_ctbs(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ int i;
+ struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ engine = intel_selftest_find_any_engine(gt);
+
+ /* Submit requests and inject errors forcing G2H to be dropped */
+ for (i = 0; i < 3; ++i) {
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ pr_err("Failed to create context, %d: %d\n", i, ret);
+ goto err;
+ }
+
+ switch (i) {
+ case 0:
+ ce->drop_schedule_enable = true;
+ break;
+ case 1:
+ ce->drop_schedule_disable = true;
+ break;
+ case 2:
+ ce->drop_deregister = true;
+ break;
+ }
+
+ rq = nop_user_request(ce, NULL);
+ intel_context_put(ce);
+
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ pr_err("Failed to create request, %d: %d\n", i, ret);
+ goto err;
+ }
+
+ last[i] = rq;
+ }
+
+ for (i = 0; i < 3; ++i) {
+ ret = i915_request_wait(last[i], 0, HZ);
+ if (ret < 0) {
+ pr_err("Last request failed to complete: %d\n", ret);
+ goto err;
+ }
+ i915_request_put(last[i]);
+ last[i] = NULL;
+ }
+
+ /* Force all H2G / G2H to be submitted / processed */
+ intel_gt_retire_requests(gt);
+ msleep(500);
+
+ /* Scrub missing G2H */
+ intel_gt_handle_error(engine->gt, -1, 0, "selftest reset");
+
+ /* GT will not idle if G2H are lost */
+ ret = intel_gt_wait_for_idle(gt, HZ);
+ if (ret < 0) {
+ pr_err("GT failed to idle: %d\n", ret);
+ goto err;
+ }
+
+err:
+ for (i = 0; i < 3; ++i)
+ if (last[i])
+ i915_request_put(last[i]);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return ret;
+}
+
+int intel_guc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_guc_scrub_ctbs),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
new file mode 100644
index 000000000000..50953c8e8b53
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2019 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+static void logical_sort(struct intel_engine_cs **engines, int num_engines)
+{
+ struct intel_engine_cs *sorted[MAX_ENGINE_INSTANCE + 1];
+ int i, j;
+
+ for (i = 0; i < num_engines; ++i)
+ for (j = 0; j < MAX_ENGINE_INSTANCE + 1; ++j) {
+ if (engines[j]->logical_mask & BIT(i)) {
+ sorted[i] = engines[j];
+ break;
+ }
+ }
+
+ memcpy(*engines, *sorted,
+ sizeof(struct intel_engine_cs *) * num_engines);
+}
+
+static struct intel_context *
+multi_lrc_create_parent(struct intel_gt *gt, u8 class,
+ unsigned long flags)
+{
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i = 0;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != class)
+ continue;
+
+ siblings[i++] = engine;
+ }
+
+ if (i <= 1)
+ return ERR_PTR(0);
+
+ logical_sort(siblings, i);
+
+ return intel_engine_create_parallel(siblings, 1, i);
+}
+
+static void multi_lrc_context_unpin(struct intel_context *ce)
+{
+ struct intel_context *child;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+}
+
+static void multi_lrc_context_put(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ /*
+ * Only the parent gets the creation ref put in the uAPI, the parent
+ * itself is responsible for creation ref put on the children.
+ */
+ intel_context_put(ce);
+}
+
+static struct i915_request *
+multi_lrc_nop_request(struct intel_context *ce)
+{
+ struct intel_context *child;
+ struct i915_request *rq, *child_rq;
+ int i = 0;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ for_each_child(ce, child) {
+ child_rq = intel_context_create_request(child);
+ if (IS_ERR(child_rq))
+ goto child_error;
+
+ if (++i == ce->parallel.number_children)
+ set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
+ &child_rq->fence.flags);
+ i915_request_add(child_rq);
+ }
+
+ return rq;
+
+child_error:
+ i915_request_put(rq);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
+{
+ struct intel_context *parent;
+ struct i915_request *rq;
+ int ret;
+
+ parent = multi_lrc_create_parent(gt, class, 0);
+ if (IS_ERR(parent)) {
+ pr_err("Failed creating contexts: %ld", PTR_ERR(parent));
+ return PTR_ERR(parent);
+ } else if (!parent) {
+ pr_debug("Not enough engines in class: %d", class);
+ return 0;
+ }
+
+ rq = multi_lrc_nop_request(parent);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ pr_err("Failed creating requests: %d", ret);
+ goto out;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret)
+ pr_err("Failed waiting on request: %d", ret);
+
+ i915_request_put(rq);
+
+ if (ret >= 0) {
+ ret = intel_gt_wait_for_idle(gt, HZ * 5);
+ if (ret < 0)
+ pr_err("GT failed to idle: %d\n", ret);
+ }
+
+out:
+ multi_lrc_context_unpin(parent);
+ multi_lrc_context_put(parent);
+ return ret;
+}
+
+static int intel_guc_multi_lrc_basic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ unsigned int class;
+ int ret;
+
+ for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
+ ret = __intel_guc_multi_lrc_basic(gt, class);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_guc_multi_lrc_basic),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index e5c2fdfc20e3..53d0cb327539 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -745,7 +745,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
@@ -849,7 +849,7 @@ retry:
*/
spt->shadow_page.type = type;
daddr = dma_map_page(kdev, spt->shadow_page.page,
- 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ 0, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) {
gvt_vgpu_err("fail to map dma addr\n");
ret = -EINVAL;
@@ -865,7 +865,7 @@ retry:
return spt;
err_unmap_dma:
- dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
err_free_spt:
free_spt(spt);
return ERR_PTR(ret);
@@ -2409,8 +2409,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
- daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
- 4096, PCI_DMA_BIDIRECTIONAL);
+ daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) {
gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt));
@@ -2461,7 +2460,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
I915_GTT_PAGE_SHIFT);
- dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
vgpu->gtt.scratch_pt[i].page_mfn = 0;
@@ -2741,7 +2740,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
}
daddr = dma_map_page(dev, virt_to_page(page), 0,
- 4096, PCI_DMA_BIDIRECTIONAL);
+ 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) {
gvt_err("fail to dmamap scratch ggtt page\n");
__free_page(virt_to_page(page));
@@ -2755,7 +2754,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
- dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_page);
return ret;
}
@@ -2779,7 +2778,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
- dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_page);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 7efa386449d1..20b82fb036f8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -328,7 +328,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
return ret;
/* Setup DMA mapping. */
- *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+ *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr)) {
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
page_to_pfn(page), ret);
@@ -344,7 +344,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
{
struct device *dev = vgpu->gvt->gt->i915->drm.dev;
- dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb1be5c48c8..6c804102528b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1386,7 +1386,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
enum intel_engine_id i;
int ret;
- ppgtt = i915_ppgtt_create(&i915->gt);
+ ppgtt = i915_ppgtt_create(&i915->gt, I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 7b274c51cac0..6e2ad68f8f3f 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -4,6 +4,7 @@
*/
#include <linux/kmemleak.h>
+#include <linux/sizes.h>
#include "i915_buddy.h"
@@ -82,6 +83,7 @@ int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
size = round_down(size, chunk_size);
mm->size = size;
+ mm->avail = size;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
@@ -155,6 +157,8 @@ void i915_buddy_fini(struct i915_buddy_mm *mm)
i915_block_free(mm, mm->roots[i]);
}
+ GEM_WARN_ON(mm->avail != mm->size);
+
kfree(mm->roots);
kfree(mm->free_list);
}
@@ -230,6 +234,7 @@ void i915_buddy_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
+ mm->avail += i915_buddy_block_size(mm, block);
__i915_buddy_free(mm, block);
}
@@ -283,6 +288,7 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
}
mark_allocated(block);
+ mm->avail -= i915_buddy_block_size(mm, block);
kmemleak_update_trace(block);
return block;
@@ -368,6 +374,7 @@ int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
}
mark_allocated(block);
+ mm->avail -= i915_buddy_block_size(mm, block);
list_add_tail(&block->link, &allocated);
continue;
}
@@ -402,6 +409,44 @@ err_free:
return err;
}
+void i915_buddy_block_print(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ struct drm_printer *p)
+{
+ u64 start = i915_buddy_block_offset(block);
+ u64 size = i915_buddy_block_size(mm, block);
+
+ drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
+}
+
+void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p)
+{
+ int order;
+
+ drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
+
+ for (order = mm->max_order; order >= 0; order--) {
+ struct i915_buddy_block *block;
+ u64 count = 0, free;
+
+ list_for_each_entry(block, &mm->free_list[order], link) {
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+ count++;
+ }
+
+ drm_printf(p, "order-%d ", order);
+
+ free = count * (mm->chunk_size << order);
+ if (free < SZ_1M)
+ drm_printf(p, "free: %lluKiB", free >> 10);
+ else
+ drm_printf(p, "free: %lluMiB", free >> 20);
+
+ drm_printf(p, ", pages: %llu\n", count);
+ }
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_buddy.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
index 3940d632f208..7077742112ac 100644
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -10,6 +10,8 @@
#include <linux/list.h>
#include <linux/slab.h>
+#include <drm/drm_print.h>
+
struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
@@ -69,6 +71,7 @@ struct i915_buddy_mm {
/* Must be at least PAGE_SIZE */
u64 chunk_size;
u64 size;
+ u64 avail;
};
static inline u64
@@ -129,6 +132,11 @@ void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
+void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p);
+void i915_buddy_block_print(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ struct drm_printer *p);
+
void i915_buddy_module_exit(void);
int i915_buddy_module_init(void);
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index b79b5f6d2cfa..afb828dab53b 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -8,7 +8,7 @@
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
{
- if (context && IS_ACTIVE(CONFIG_DRM_I915_FENCE_TIMEOUT))
+ if (CONFIG_DRM_I915_FENCE_TIMEOUT && context)
return msecs_to_jiffies_timeout(CONFIG_DRM_I915_FENCE_TIMEOUT);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 44969f5dde50..fe638b5da7c0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,13 +32,15 @@
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_clock_utils.h"
-#include "gt/intel_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_pm_debugfs.h"
#include "gt/intel_gt_requests.h"
-#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
+#include "gt/intel_reset.h"
#include "gt/intel_rps.h"
#include "gt/intel_sseu_debugfs.h"
@@ -48,7 +50,6 @@
#include "i915_scheduler.h"
#include "i915_trace.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -139,7 +140,6 @@ void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
@@ -229,15 +229,12 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
-
- engine = i915_gem_object_last_write_engine(obj);
- if (engine)
- seq_printf(m, " (%s)", engine->name);
}
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
struct intel_memory_region *mr;
enum intel_region_id id;
@@ -246,8 +243,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
for_each_memory_region(mr, i915, id)
- seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
- mr->name, &mr->total, &mr->avail);
+ intel_memory_region_debug(mr, &p);
return 0;
}
@@ -354,232 +350,12 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uncore *uncore = &dev_priv->uncore;
- struct intel_rps *rps = &dev_priv->gt.rps;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (GRAPHICS_VER(dev_priv) == 5) {
- u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
- u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
-
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
- MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
- (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 rpmodectl, freq_sts;
-
- rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
-
- vlv_punit_get(dev_priv);
- freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
-
- seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
- seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
-
- seq_printf(m, "actual GPU freq: %d MHz\n",
- intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
-
- seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->cur_freq));
-
- seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->max_freq));
-
- seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->min_freq));
-
- seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->idle_freq));
-
- seq_printf(m,
- "efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(rps, rps->efficient_freq));
- } else if (GRAPHICS_VER(dev_priv) >= 6) {
- u32 rp_state_limits;
- u32 gt_perf_status;
- u32 rp_state_cap;
- u32 rpmodectl, rpinclimit, rpdeclimit;
- u32 rpstat, cagf, reqf;
- u32 rpupei, rpcurup, rpprevup;
- u32 rpdownei, rpcurdown, rpprevdown;
- u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
- int max_freq;
-
- rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
- if (IS_GEN9_LP(dev_priv)) {
- rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
- gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
- } else {
- rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
- gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
- }
-
- /* RPSTAT1 is in the GT power well */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
- if (GRAPHICS_VER(dev_priv) >= 9)
- reqf >>= 23;
- else {
- reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- reqf >>= 24;
- else
- reqf >>= 25;
- }
- reqf = intel_gpu_freq(rps, reqf);
-
- rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
- rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
- rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
-
- rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
- rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
- rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
- rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
- rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
- rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_rps_read_actual_frequency(rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- if (GRAPHICS_VER(dev_priv) >= 11) {
- pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
- /*
- * The equivalent to the PM ISR & IIR cannot be read
- * without affecting the current state of the system
- */
- pm_isr = 0;
- pm_iir = 0;
- } else if (GRAPHICS_VER(dev_priv) >= 8) {
- pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
- pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
- pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
- pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
- } else {
- pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
- pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
- pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
- pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
- }
- pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
-
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
-
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_mask);
- if (GRAPHICS_VER(dev_priv) <= 10)
- seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
- pm_isr, pm_iir);
- seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
- rps->pm_intrmsk_mbz);
- seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (GRAPHICS_VER(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
- seq_printf(m, "Render p-state VID: %d\n",
- gt_perf_status & 0xff);
- seq_printf(m, "Render p-state limit: %d\n",
- rp_state_limits & 0xff);
- seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
- seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
- seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
- seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
- seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
- seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
- rpupei,
- intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
- seq_printf(m, "RP CUR UP: %d (%lldun)\n",
- rpcurup,
- intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%lldns)\n",
- rpprevup,
- intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n",
- rps->power.up_threshold);
-
- seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
- rpdownei,
- intel_gt_pm_interval_to_ns(&dev_priv->gt,
- rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
- rpcurdown,
- intel_gt_pm_interval_to_ns(&dev_priv->gt,
- rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
- rpprevdown,
- intel_gt_pm_interval_to_ns(&dev_priv->gt,
- rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n",
- rps->power.down_threshold);
-
- max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
- rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ||
- GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(dev_priv) ||
- GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
- rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ||
- GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
- seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(rps, rps->max_freq));
-
- seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(rps, rps->cur_freq));
- seq_printf(m, "Actual freq: %d MHz\n", cagf);
- seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(rps, rps->idle_freq));
- seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(rps, rps->min_freq));
- seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m,
- "efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(rps, rps->efficient_freq));
- } else {
- seq_puts(m, "no P-state info available\n");
- }
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_gt *gt = &i915->gt;
+ struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
- seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
+ intel_gt_pm_frequency_dump(gt, &p);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -778,36 +554,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
-static int
-i915_wedged_get(void *data, u64 *val)
+static int i915_wedged_get(void *data, u64 *val)
{
struct drm_i915_private *i915 = data;
- int ret = intel_gt_terminally_wedged(&i915->gt);
- switch (ret) {
- case -EIO:
- *val = 1;
- return 0;
- case 0:
- *val = 0;
- return 0;
- default:
- return ret;
- }
+ return intel_gt_debugfs_reset_show(&i915->gt, val);
}
-static int
-i915_wedged_set(void *data, u64 val)
+static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- /* Flush any previous reset before applying for a new one */
- wait_event(i915->gt.reset.queue,
- !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
-
- intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
- "Manually set wedged engine mask = %llx", val);
- return 0;
+ return intel_gt_debugfs_reset_store(&i915->gt, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
@@ -952,27 +710,15 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct intel_gt *gt = &i915->gt;
- atomic_inc(&gt->user_wakeref);
- intel_gt_pm_get(gt);
- if (GRAPHICS_VER(i915) >= 6)
- intel_uncore_forcewake_user_get(gt->uncore);
-
- return 0;
+ return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt);
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct intel_gt *gt = &i915->gt;
- if (GRAPHICS_VER(i915) >= 6)
- intel_uncore_forcewake_user_put(&i915->uncore);
- intel_gt_pm_put(gt);
- atomic_dec(&gt->user_wakeref);
-
- return 0;
+ return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt);
}
static const struct file_operations i915_forcewake_fops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 59fb4c710c8c..b18a250e5d2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,6 +67,8 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "pxp/intel_pxp_pm.h"
+
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_ioc32.h"
@@ -82,9 +84,9 @@
#include "intel_dram.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_region_ttm.h"
-#include "intel_sideband.h"
#include "vlv_suspend.h"
static const struct drm_driver driver;
@@ -97,7 +99,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
drm_err(&dev_priv->drm, "bridge device not found\n");
- return -1;
+ return -EIO;
}
return 0;
}
@@ -409,8 +411,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- if (i915_get_bridge_dev(dev_priv))
- return -EIO;
+ ret = i915_get_bridge_dev(dev_priv);
+ if (ret < 0)
+ return ret;
ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret < 0)
@@ -588,8 +591,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- intel_gt_init_workarounds(dev_priv);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1096,9 +1097,7 @@ static int i915_drm_prepare(struct drm_device *dev)
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
- i915_gem_suspend(i915);
-
- return 0;
+ return i915_gem_backup_suspend(i915);
}
static int i915_drm_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 005b1cec7007..12256218634f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -323,15 +323,15 @@ struct intel_crtc;
struct intel_limit;
struct dpll;
-struct drm_i915_display_funcs {
- void (*get_cdclk)(struct drm_i915_private *dev_priv,
- struct intel_cdclk_config *cdclk_config);
- void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe);
- int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
- int (*get_fifo_size)(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane);
+/* functions used internal in intel_pm.c */
+struct drm_i915_clock_gating_funcs {
+ void (*init_clock_gating)(struct drm_i915_private *dev_priv);
+};
+
+/* functions used for watermark calcs for display. */
+struct drm_i915_wm_disp_funcs {
+ /* update_wm is for legacy wm management */
+ void (*update_wm)(struct drm_i915_private *dev_priv);
int (*compute_pipe_wm)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_intermediate_wm)(struct intel_atomic_state *state,
@@ -343,39 +343,9 @@ struct drm_i915_display_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
- u8 (*calc_voltage_level)(int cdclk);
- /* Returns the active state of the crtc, and if the crtc is active,
- * fills out the pipe-config with the hw state. */
- bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_state *);
- void (*get_initial_plane_config)(struct intel_crtc *,
- struct intel_initial_plane_config *);
- int (*crtc_compute_clock)(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
- void (*crtc_enable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*crtc_disable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*commit_modeset_enables)(struct intel_atomic_state *state);
- void (*commit_modeset_disables)(struct intel_atomic_state *state);
- void (*audio_codec_enable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- void (*audio_codec_disable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
- void (*fdi_link_train)(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
- void (*init_clock_gating)(struct drm_i915_private *dev_priv);
- void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
- /* clock updates for mode set */
- /* cursor updates */
- /* render clock increase/decrease */
- /* display clock increase/decrease */
- /* pll clock increase/decrease */
+};
+struct intel_color_funcs {
int (*color_check)(struct intel_crtc_state *crtc_state);
/*
* Program double buffered color management registers during
@@ -394,6 +364,53 @@ struct drm_i915_display_funcs {
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
+struct intel_audio_funcs {
+ void (*audio_codec_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_codec_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+};
+
+struct intel_cdclk_funcs {
+ void (*get_cdclk)(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config);
+ void (*set_cdclk)(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe);
+ int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
+};
+
+struct intel_hotplug_funcs {
+ void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
+};
+
+struct intel_fdi_funcs {
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+};
+
+struct intel_dpll_funcs {
+ int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
+};
+
+struct drm_i915_display_funcs {
+ /* Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state. */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+};
+
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
@@ -454,7 +471,6 @@ struct intel_fbc {
} fb;
unsigned int fence_y_offset;
- u16 gen9_wa_cfb_stride;
u16 interval;
s8 fence_id;
bool psr2_active;
@@ -479,9 +495,10 @@ struct intel_fbc {
u64 modifier;
} fb;
- int cfb_size;
+ unsigned int cfb_stride;
+ unsigned int cfb_size;
unsigned int fence_y_offset;
- u16 gen9_wa_cfb_stride;
+ u16 override_cfb_stride;
u16 interval;
s8 fence_id;
bool plane_visible;
@@ -636,22 +653,6 @@ i915_fence_timeout(const struct drm_i915_private *i915)
/* Amount of PSF GV points, BSpec precisely defines this */
#define I915_NUM_PSF_GV_POINTS 3
-struct ddi_vbt_port_info {
- /* Non-NULL if port present. */
- struct intel_bios_encoder_data *devdata;
-
- int max_tmds_clock;
-
- /* This is an index in the HDMI/DVI DDI buffer translation table. */
- u8 hdmi_level_shift;
- u8 hdmi_level_shift_set:1;
-
- u8 alternate_aux_channel;
- u8 alternate_ddc_pin;
-
- int dp_max_link_rate; /* 0 for not limited by VBT */
-};
-
enum psr_lines_to_wait {
PSR_0_LINES_TO_WAIT = 0,
PSR_1_LINE_TO_WAIT,
@@ -706,6 +707,7 @@ struct intel_vbt_data {
struct {
u16 pwm_freq_hz;
+ u16 brightness_precision_bits;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
@@ -732,7 +734,7 @@ struct intel_vbt_data {
struct list_head display_devices;
- struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+ struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
struct sdvo_device_mapping sdvo_mappings[2];
};
@@ -886,8 +888,6 @@ struct drm_i915_private {
*/
u32 gpio_mmio_base;
- u32 hsw_psr_mmio_adjust;
-
/* MMIO base address for MIPI regs */
u32 mipi_mmio_base;
@@ -974,8 +974,32 @@ struct drm_i915_private {
/* unbound hipri wq for page flips/plane updates */
struct workqueue_struct *flip_wq;
+ /* pm private clock gating functions */
+ const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
+
+ /* pm display functions */
+ const struct drm_i915_wm_disp_funcs *wm_disp;
+
+ /* irq display functions */
+ const struct intel_hotplug_funcs *hotplug_funcs;
+
+ /* fdi display functions */
+ const struct intel_fdi_funcs *fdi_funcs;
+
+ /* display pll funcs */
+ const struct intel_dpll_funcs *dpll_funcs;
+
/* Display functions */
- struct drm_i915_display_funcs display;
+ const struct drm_i915_display_funcs *display;
+
+ /* Display internal color functions */
+ const struct intel_color_funcs *color_funcs;
+
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *audio_funcs;
+
+ /* Display CDCLK functions */
+ const struct intel_cdclk_funcs *cdclk_funcs;
/* PCH chipset type */
enum intel_pch pch_type;
@@ -1022,8 +1046,6 @@ struct drm_i915_private {
*/
u8 active_pipes;
- struct i915_wa_list gt_wa_list;
-
struct i915_frontbuffer_tracking fb_tracking;
struct intel_atomic_helper {
@@ -1665,6 +1687,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
+#define HAS_DP20(dev_priv) (IS_DG2(dev_priv))
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
@@ -1702,6 +1725,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
+#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
+ INTEL_INFO(dev_priv)->has_pxp) && \
+ VDBOX_MASK(&dev_priv->gt))
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -1721,6 +1747,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
+#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
+
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
@@ -1881,11 +1909,11 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
struct i915_address_space *vm;
- rcu_read_lock();
+ xa_lock(&file_priv->vm_xa);
vm = xa_load(&file_priv->vm_xa, id);
- if (vm && !kref_get_unless_zero(&vm->ref))
- vm = NULL;
- rcu_read_unlock();
+ if (vm)
+ kref_get(&vm->ref);
+ xa_unlock(&file_priv->vm_xa);
return vm;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 590efc8b0265..981e383d1a5d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1139,8 +1139,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
intel_gt_driver_release(&dev_priv->gt);
- intel_wa_list_free(&dev_priv->gt_wa_list);
-
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_drain_freed_objects(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 36489be4896b..cd5f2348a187 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,7 +30,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
do {
if (dma_map_sg_attrs(obj->base.dev->dev,
pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING |
DMA_ATTR_NO_WARN))
@@ -64,7 +64,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
usleep_range(100, 250);
dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_ww.h b/drivers/gpu/drm/i915/i915_gem_ww.h
index f6b1a796667b..86f0fe343de6 100644
--- a/drivers/gpu/drm/i915/i915_gem_ww.h
+++ b/drivers/gpu/drm/i915/i915_gem_ww.h
@@ -11,8 +11,7 @@ struct i915_gem_ww_ctx {
struct ww_acquire_ctx ctx;
struct list_head obj_list;
struct drm_i915_gem_object *contended;
- unsigned short intr;
- unsigned short loop;
+ bool intr;
};
void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
@@ -20,31 +19,23 @@ void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj);
-/* Internal functions used by the inlines! Don't use. */
+/* Internal function used by the inlines! Don't use. */
static inline int __i915_gem_ww_fini(struct i915_gem_ww_ctx *ww, int err)
{
- ww->loop = 0;
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(ww);
if (!err)
- ww->loop = 1;
+ err = -EDEADLK;
}
- if (!ww->loop)
+ if (err != -EDEADLK)
i915_gem_ww_ctx_fini(ww);
return err;
}
-static inline void
-__i915_gem_ww_init(struct i915_gem_ww_ctx *ww, bool intr)
-{
- i915_gem_ww_ctx_init(ww, intr);
- ww->loop = 1;
-}
-
-#define for_i915_gem_ww(_ww, _err, _intr) \
- for (__i915_gem_ww_init(_ww, _intr); (_ww)->loop; \
- _err = __i915_gem_ww_fini(_ww, _err))
-
+#define for_i915_gem_ww(_ww, _err, _intr) \
+ for (i915_gem_ww_ctx_init(_ww, _intr), (_err) = -EDEADLK; \
+ (_err) == -EDEADLK; \
+ (_err) = __i915_gem_ww_fini(_ww, _err))
#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9cf6ac575de1..2a2d7643b551 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -431,6 +431,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
int slice;
int subslice;
+ int iter;
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
@@ -444,19 +445,38 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
- err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.sampler[slice][subslice]);
+ if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 50)) {
+ for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
- err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.row[slice][subslice]);
+ for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+ } else {
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+ }
if (GRAPHICS_VER(m->i915) < 12)
return;
+ if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
+ for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.geom_svg[slice][subslice]);
+ }
+
err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
ee->instdone.slice_common_extra[0]);
err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
@@ -733,7 +753,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
* only exists if the corresponding VCS engine is
* present.
*/
- if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
+ !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
continue;
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
@@ -1612,7 +1633,8 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
* only exists if the corresponding VCS engine is
* present.
*/
- if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
+ !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
continue;
gt->sfc_done[i] =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9bc4f4a8e12e..77680bca46ee 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -359,9 +359,8 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
+static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask, u32 enabled_irq_mask)
{
u32 new_val;
@@ -380,6 +379,16 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
+void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+{
+ ilk_update_display_irq(i915, bits, bits);
+}
+
+void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+{
+ ilk_update_display_irq(i915, bits, 0);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -419,10 +428,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
+static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
u32 new_val;
@@ -444,15 +452,27 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
}
}
+void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(i915, pipe, bits, bits);
+}
+
+void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(i915, pipe, bits, 0);
+}
+
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
sdeimr &= ~interrupt_mask;
@@ -469,6 +489,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
}
+void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+{
+ ibx_display_interrupt_update(i915, bits, bits);
+}
+
+void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+{
+ ibx_display_interrupt_update(i915, bits, 0);
+}
+
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -2093,22 +2123,6 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
- if (de_iir & DE_EDP_PSR_INT_HSW) {
- struct intel_encoder *encoder;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
- EDP_PSR_IIR);
-
- intel_psr_irq_handler(intel_dp, psr_iir);
- intel_uncore_write(&dev_priv->uncore,
- EDP_PSR_IIR, psr_iir);
- break;
- }
- }
-
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev_priv);
@@ -4331,6 +4345,20 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
return ret;
}
+#define HPD_FUNCS(platform) \
+static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
+ .hpd_irq_setup = platform##_hpd_irq_setup, \
+}
+
+HPD_FUNCS(i915);
+HPD_FUNCS(dg1);
+HPD_FUNCS(gen11);
+HPD_FUNCS(bxt);
+HPD_FUNCS(icp);
+HPD_FUNCS(spt);
+HPD_FUNCS(ilk);
+#undef HPD_FUNCS
+
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4381,20 +4409,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &i915_hpd_funcs;
} else {
if (HAS_PCH_DG1(dev_priv))
- dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &dg1_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &gen11_hpd_funcs;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &bxt_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &icp_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &spt_hpd_funcs;
else
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index e43b6734f21b..0eb90d271fa7 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -9,9 +9,9 @@
#include <linux/ktime.h>
#include <linux/types.h>
-#include "display/intel_display.h"
#include "i915_reg.h"
+enum pipe;
struct drm_crtc;
struct drm_device;
struct drm_display_mode;
@@ -40,46 +40,15 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
u32 mask,
u32 bits);
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, bits);
-}
-static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, 0);
-}
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
-}
-static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
-}
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, bits);
-}
-static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, 0);
-}
+
+void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+
+void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+
+void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index d8b4482c69d0..ab2295dd4500 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -67,8 +67,8 @@ static const struct {
{ .init = i915_mock_selftests },
{ .init = i915_pmu_init,
.exit = i915_pmu_exit },
- { .init = i915_register_pci_driver,
- .exit = i915_unregister_pci_driver },
+ { .init = i915_pci_register_driver,
+ .exit = i915_pci_unregister_driver },
{ .init = i915_perf_sysctl_register,
.exit = i915_perf_sysctl_unregister },
};
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index f27eceb82c0f..8d725b64592d 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -55,7 +55,7 @@ struct drm_printer;
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
- param(bool, enable_psr2_sel_fetch, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, true, 0400) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1bbd09ad5287..169837de395d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -537,8 +537,6 @@ static const struct intel_device_info vlv_info = {
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
- .display.has_psr = 1, \
- .display.has_psr_hw_tracking = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
HSW_PIPE_OFFSETS, \
@@ -642,6 +640,8 @@ static const struct intel_device_info chv_info = {
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
+ .display.has_psr = 1, \
+ .display.has_psr_hw_tracking = 1, \
.dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
.dbuf.slice_mask = BIT(DBUF_S1)
@@ -865,6 +865,7 @@ static const struct intel_device_info jsl_info = {
}, \
TGL_CURSOR_OFFSETS, \
.has_global_mocs = 1, \
+ .has_pxp = 1, \
.display.has_dsb = 1
static const struct intel_device_info tgl_info = {
@@ -891,10 +892,11 @@ static const struct intel_device_info rkl_info = {
#define DGFX_FEATURES \
.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_llc = 0, \
+ .has_pxp = 0, \
.has_snoop = 1, \
.is_dgfx = 1
-static const struct intel_device_info dg1_info __maybe_unused = {
+static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
.graphics_rel = 10,
@@ -912,7 +914,6 @@ static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
@@ -1115,6 +1116,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_RKL_IDS(&rkl_info),
INTEL_ADLS_IDS(&adl_s_info),
INTEL_ADLP_IDS(&adl_p_info),
+ INTEL_DG1_IDS(&dg1_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -1234,12 +1236,12 @@ static struct pci_driver i915_pci_driver = {
.driver.pm = &i915_pm_ops,
};
-int i915_register_pci_driver(void)
+int i915_pci_register_driver(void)
{
return pci_register_driver(&i915_pci_driver);
}
-void i915_unregister_pci_driver(void)
+void i915_pci_unregister_driver(void)
{
pci_unregister_driver(&i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h
index b386f319f52e..ee048c238174 100644
--- a/drivers/gpu/drm/i915/i915_pci.h
+++ b/drivers/gpu/drm/i915/i915_pci.h
@@ -1,8 +1,12 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2021 Intel Corporation
*/
-int i915_register_pci_driver(void);
-void i915_unregister_pci_driver(void);
+#ifndef __I915_PCI_H__
+#define __I915_PCI_H__
+
+int i915_pci_register_driver(void);
+void i915_pci_unregister_driver(void);
+
+#endif /* __I915_PCI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index e49da36c62fb..51b368be0fc4 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -124,7 +124,9 @@ query_engine_info(struct drm_i915_private *i915,
for_each_uabi_engine(engine, i915) {
info.engine.engine_class = engine->uabi_class;
info.engine.engine_instance = engine->uabi_instance;
+ info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE;
info.capabilities = engine->uabi_capabilities;
+ info.logical_instance = ilog2(engine->logical_mask);
if (copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
@@ -432,9 +434,6 @@ static int query_memregion_info(struct drm_i915_private *i915,
u32 total_length;
int ret, id, i;
- if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
- return -ENODEV;
-
if (query_item->flags != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4037030f0984..da9055c3ebf0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1968,7 +1968,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
_ICL_PORT_PCS_LN(ln) + 4 * (dw))
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
-#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
+#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
#define DCC_MODE_SELECT_MASK (0x3 << 20)
#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
#define COMMON_KEEPER_EN (1 << 26)
@@ -1989,7 +1989,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
-#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
+#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -2001,7 +2001,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
-#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
@@ -2013,7 +2012,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
-#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
+#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -2024,14 +2023,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
-#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
-#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy))
+#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy))
#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31)
#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29)
#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
@@ -2237,10 +2235,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004)
#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31)
+#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30)
#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29)
#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26)
#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24)
+#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16)
#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10)
+#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9)
+#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8)
#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5)
#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008)
@@ -2551,6 +2553,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
#define RING_ID(base) _MMIO((base) + 0x8c)
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+
+#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4)
+/*
+ * CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
+ * The lsb of each can be considered a separate enabling bit for encryption.
+ * 6:0 == default MOCS value for reads => 6:1 == table index for reads.
+ * 13:7 == default MOCS value for writes => 13:8 == table index for writes.
+ * 15:14 == Reserved => 31:30 are set to 0.
+ */
+#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7)
+#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0)
+#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \
+ CMD_CCTL_READ_OVERRIDE_MASK)
+#define CMD_CCTL_MOCS_OVERRIDE(write, read) \
+ (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \
+ REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
+
+#define BLIT_CCTL(base) _MMIO((base) + 0x204)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0)
+#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \
+ BLIT_CCTL_SRC_MOCS_MASK)
+#define BLIT_CCTL_MOCS(dst, src) \
+ (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1))
+
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_CAT_ERROR REG_BIT(2)
#define RESET_CTL_READY_TO_RESET REG_BIT(1)
@@ -2686,6 +2714,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
+#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
#define SF_MCR_SELECTOR _MMIO(0xfd8)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -2820,6 +2849,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_MODE _MMIO(0x209c)
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
+# define TGL_NESTED_BB_EN (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
# define STOP_RING (1 << 8)
@@ -3080,8 +3110,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Fuse readout registers for GT */
#define HSW_PAVP_FUSE1 _MMIO(0x911C)
-#define HSW_F1_EU_DIS_SHIFT 16
-#define HSW_F1_EU_DIS_MASK (0x3 << HSW_F1_EU_DIS_SHIFT)
+#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
+#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
#define HSW_F1_EU_DIS_10EUS 0
#define HSW_F1_EU_DIS_8EUS 1
#define HSW_F1_EU_DIS_6EUS 2
@@ -3150,7 +3180,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
-#define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
+#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913C)
+#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
#define XEHP_EU_ENABLE _MMIO(0x9134)
#define XEHP_EU_ENA_MASK 0xFF
@@ -3356,6 +3387,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
+#define GLK_FBC_STRIDE _MMIO(0x43228)
+#define FBC_STRIDE_OVERRIDE REG_BIT(15)
+#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
+#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1 << 0)
#define SNB_FBC_FRONT_BUFFER (1 << 1)
@@ -4113,6 +4148,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RPN_CAP_MASK REG_GENMASK(23, 16)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
+#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
/*
* Logical Context regs
@@ -4231,6 +4267,7 @@ enum {
#define DUPS1_GATING_DIS (1 << 15)
#define DUPS2_GATING_DIS (1 << 19)
#define DUPS3_GATING_DIS (1 << 23)
+#define CURSOR_GATING_DIS REG_BIT(28)
#define DPF_GATING_DIS (1 << 10)
#define DPF_RAM_GATING_DIS (1 << 9)
#define DPFR_GATING_DIS (1 << 8)
@@ -4509,11 +4546,9 @@ enum {
* HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
* instance of it
*/
-#define _HSW_EDP_PSR_BASE 0x64800
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust)
-#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A))
+#define EDP_PSR_CTL(tran) _MMIO(_TRANS2(tran, _SRD_CTL_A))
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -4557,22 +4592,13 @@ enum {
#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
-#define _SRD_AUX_CTL_A 0x60810
-#define _SRD_AUX_CTL_EDP 0x6f810
-#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A))
-#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
-#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
-#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
-#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
-#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
-
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_TRANS2(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A))
+#define EDP_PSR_STATUS(tran) _MMIO(_TRANS2(tran, _SRD_STATUS_A))
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -4599,13 +4625,13 @@ enum {
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A))
+#define EDP_PSR_PERF_CNT(tran) _MMIO(_TRANS2(tran, _SRD_PERF_CNT_A))
#define EDP_PSR_PERF_CNT_MASK 0xffffff
/* PSR_MASK on SKL+ */
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A))
+#define EDP_PSR_DEBUG(tran) _MMIO(_TRANS2(tran, _SRD_DEBUG_A))
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -7230,6 +7256,7 @@ enum {
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
@@ -7353,6 +7380,7 @@ enum {
#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
#define PLANE_SURF(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+#define PLANE_SURF_DECRYPT REG_BIT(2)
#define _PLANE_OFFSET_1_B 0x711a4
#define _PLANE_OFFSET_2_B 0x712a4
@@ -8094,6 +8122,7 @@ enum {
/* irq instances for OTHER_CLASS */
#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
+#define OTHER_KCR_INSTANCE 4
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
@@ -8176,8 +8205,9 @@ enum {
#define GLK_CL0_PWR_DOWN (1 << 10)
#define CHICKEN_MISC_4 _MMIO(0x4208c)
-#define FBC_STRIDE_OVERRIDE (1 << 13)
-#define FBC_STRIDE_MASK 0x1FFF
+#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
+#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
+#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -8216,6 +8246,7 @@ enum {
#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
@@ -9101,6 +9132,29 @@ enum {
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
#define TRANS_DP_SYNC_MASK (3 << 3)
+#define _TRANS_DP2_CTL_A 0x600a0
+#define _TRANS_DP2_CTL_B 0x610a0
+#define _TRANS_DP2_CTL_C 0x620a0
+#define _TRANS_DP2_CTL_D 0x630a0
+#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
+#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
+#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
+#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
+
+#define _TRANS_DP2_VFREQHIGH_A 0x600a4
+#define _TRANS_DP2_VFREQHIGH_B 0x610a4
+#define _TRANS_DP2_VFREQHIGH_C 0x620a4
+#define _TRANS_DP2_VFREQHIGH_D 0x630a4
+#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
+
+#define _TRANS_DP2_VFREQLOW_A 0x600a8
+#define _TRANS_DP2_VFREQLOW_B 0x610a8
+#define _TRANS_DP2_VFREQLOW_C 0x620a8
+#define _TRANS_DP2_VFREQLOW_D 0x630a8
+#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+
/* SNB eDP training params */
/* SNB A-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
@@ -9715,6 +9769,11 @@ enum {
#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
+#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
+#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
+#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
+#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
+
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
@@ -10160,7 +10219,7 @@ enum skl_power_gate {
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
-#define TRANS_DDI_MODE_SELECT_FDI (4 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
#define TRANS_DDI_BPC_MASK (7 << 20)
#define TRANS_DDI_BPC_8 (0 << 20)
#define TRANS_DDI_BPC_10 (1 << 20)
@@ -10963,7 +11022,6 @@ enum skl_power_gate {
_DKL_TX_DPCNTL1)
#define _DKL_TX_DPCNTL2 0x2C8
-#define DKL_TX_LOADGEN_SHARING_PMD_DISABLE REG_BIT(12)
#define DKL_TX_DP20BITMODE (1 << 2)
#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
_DKL_PHY1_BASE, \
@@ -11048,12 +11106,6 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
-#define BXT_REQ_DATA_MASK 0x3F
-#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
-#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
-#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
-
#define BXT_D_CR_DRP0_DUNIT8 0x1000
#define BXT_D_CR_DRP0_DUNIT9 0x1200
#define BXT_D_CR_DRP0_DUNIT_START 8
@@ -11084,9 +11136,7 @@ enum skl_power_gate {
#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
-#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
-#define SKL_REQ_DATA_MASK (0xF << 0)
#define DG1_GEAR_TYPE REG_BIT(16)
#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
@@ -11350,6 +11400,51 @@ enum skl_power_gate {
_PAL_PREC_MULTI_SEG_DATA_A, \
_PAL_PREC_MULTI_SEG_DATA_B)
+#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
+
+/* Plane CSC Registers */
+#define _PLANE_CSC_RY_GY_1_A 0x70210
+#define _PLANE_CSC_RY_GY_2_A 0x70310
+
+#define _PLANE_CSC_RY_GY_1_B 0x71210
+#define _PLANE_CSC_RY_GY_2_B 0x71310
+
+#define _PLANE_CSC_RY_GY_1(pipe) _PIPE(pipe, _PLANE_CSC_RY_GY_1_A, \
+ _PLANE_CSC_RY_GY_1_B)
+#define _PLANE_CSC_RY_GY_2(pipe) _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+ _PLANE_INPUT_CSC_RY_GY_2_B)
+#define PLANE_CSC_COEFF(pipe, plane, index) _MMIO_PLANE(plane, \
+ _PLANE_CSC_RY_GY_1(pipe) + (index) * 4, \
+ _PLANE_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_CSC_PREOFF_HI_1_A 0x70228
+#define _PLANE_CSC_PREOFF_HI_2_A 0x70328
+
+#define _PLANE_CSC_PREOFF_HI_1_B 0x71228
+#define _PLANE_CSC_PREOFF_HI_2_B 0x71328
+
+#define _PLANE_CSC_PREOFF_HI_1(pipe) _PIPE(pipe, _PLANE_CSC_PREOFF_HI_1_A, \
+ _PLANE_CSC_PREOFF_HI_1_B)
+#define _PLANE_CSC_PREOFF_HI_2(pipe) _PIPE(pipe, _PLANE_CSC_PREOFF_HI_2_A, \
+ _PLANE_CSC_PREOFF_HI_2_B)
+#define PLANE_CSC_PREOFF(pipe, plane, index) _MMIO_PLANE(plane, _PLANE_CSC_PREOFF_HI_1(pipe) + \
+ (index) * 4, _PLANE_CSC_PREOFF_HI_2(pipe) + \
+ (index) * 4)
+
+#define _PLANE_CSC_POSTOFF_HI_1_A 0x70234
+#define _PLANE_CSC_POSTOFF_HI_2_A 0x70334
+
+#define _PLANE_CSC_POSTOFF_HI_1_B 0x71234
+#define _PLANE_CSC_POSTOFF_HI_2_B 0x71334
+
+#define _PLANE_CSC_POSTOFF_HI_1(pipe) _PIPE(pipe, _PLANE_CSC_POSTOFF_HI_1_A, \
+ _PLANE_CSC_POSTOFF_HI_1_B)
+#define _PLANE_CSC_POSTOFF_HI_2(pipe) _PIPE(pipe, _PLANE_CSC_POSTOFF_HI_2_A, \
+ _PLANE_CSC_POSTOFF_HI_2_B)
+#define PLANE_CSC_POSTOFF(pipe, plane, index) _MMIO_PLANE(plane, _PLANE_CSC_POSTOFF_HI_1(pipe) + \
+ (index) * 4, _PLANE_CSC_POSTOFF_HI_2(pipe) + \
+ (index) * 4)
+
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
@@ -11616,6 +11711,14 @@ enum skl_power_gate {
_ICL_DSI_IO_MODECTL_1)
#define COMBO_PHY_MODE_DSI (1 << 0)
+/* TGL DSI Chicken register */
+#define _TGL_DSI_CHKN_REG_0 0x6B0C0
+#define _TGL_DSI_CHKN_REG_1 0x6B8C0
+#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
+ _TGL_DSI_CHKN_REG_0, \
+ _TGL_DSI_CHKN_REG_1)
+#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12)
+
/* Display Stream Splitter Control */
#define DSS_CTL1 _MMIO(0x67400)
#define SPLITTER_ENABLE (1 << 31)
@@ -12739,4 +12842,7 @@ enum skl_power_gate {
#define CLKREQ_POLICY _MMIO(0x101038)
#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+#define CLKGATE_DIS_MISC _MMIO(0x46534)
+#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 79da5eca60af..2c3cd6e635b5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1145,6 +1145,12 @@ __emit_semaphore_wait(struct i915_request *to,
return 0;
}
+static bool
+can_use_semaphore_wait(struct i915_request *to, struct i915_request *from)
+{
+ return to->engine->gt->ggtt == from->engine->gt->ggtt;
+}
+
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
@@ -1153,6 +1159,9 @@ emit_semaphore_wait(struct i915_request *to,
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
struct i915_sw_fence *wait = &to->submit;
+ if (!can_use_semaphore_wait(to, from))
+ goto await_fence;
+
if (!intel_context_use_semaphores(to->context))
goto await_fence;
@@ -1256,7 +1265,8 @@ __i915_request_await_execution(struct i915_request *to,
* immediate execution, and so we must wait until it reaches the
* active slot.
*/
- if (intel_engine_has_semaphores(to->engine) &&
+ if (can_use_semaphore_wait(to, from) &&
+ intel_engine_has_semaphores(to->engine) &&
!i915_request_has_initial_breadcrumb(to)) {
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
if (err < 0)
@@ -1325,6 +1335,25 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
return err;
}
+static inline bool is_parallel_rq(struct i915_request *rq)
+{
+ return intel_context_is_parallel(rq->context);
+}
+
+static inline struct intel_context *request_to_parent(struct i915_request *rq)
+{
+ return intel_context_to_parent(rq->context);
+}
+
+static bool is_same_parallel_context(struct i915_request *to,
+ struct i915_request *from)
+{
+ if (is_parallel_rq(to))
+ return request_to_parent(to) == request_to_parent(from);
+
+ return false;
+}
+
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence)
@@ -1356,11 +1385,14 @@ i915_request_await_execution(struct i915_request *rq,
* want to run our callback in all cases.
*/
- if (dma_fence_is_i915(fence))
+ if (dma_fence_is_i915(fence)) {
+ if (is_same_parallel_context(rq, to_request(fence)))
+ continue;
ret = __i915_request_await_execution(rq,
to_request(fence));
- else
+ } else {
ret = i915_request_await_external(rq, fence);
+ }
if (ret < 0)
return ret;
} while (--nchild);
@@ -1461,10 +1493,13 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
fence))
continue;
- if (dma_fence_is_i915(fence))
+ if (dma_fence_is_i915(fence)) {
+ if (is_same_parallel_context(rq, to_request(fence)))
+ continue;
ret = i915_request_await_request(rq, to_request(fence));
- else
+ } else {
ret = i915_request_await_external(rq, fence);
+ }
if (ret < 0)
return ret;
@@ -1540,35 +1575,51 @@ i915_request_await_object(struct i915_request *to,
}
static struct i915_request *
-__i915_request_add_to_timeline(struct i915_request *rq)
+__i915_request_ensure_parallel_ordering(struct i915_request *rq,
+ struct intel_timeline *timeline)
{
- struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
- /*
- * Dependency tracking and request ordering along the timeline
- * is special cased so that we can eliminate redundant ordering
- * operations while building the request (we know that the timeline
- * itself is ordered, and here we guarantee it).
- *
- * As we know we will need to emit tracking along the timeline,
- * we embed the hooks into our request struct -- at the cost of
- * having to have specialised no-allocation interfaces (which will
- * be beneficial elsewhere).
- *
- * A second benefit to open-coding i915_request_await_request is
- * that we can apply a slight variant of the rules specialised
- * for timelines that jump between engines (such as virtual engines).
- * If we consider the case of virtual engine, we must emit a dma-fence
- * to prevent scheduling of the second request until the first is
- * complete (to maximise our greedy late load balancing) and this
- * precludes optimising to use semaphores serialisation of a single
- * timeline across engines.
- */
+ GEM_BUG_ON(!is_parallel_rq(rq));
+
+ prev = request_to_parent(rq)->parallel.last_rq;
+ if (prev) {
+ if (!__i915_request_is_complete(prev)) {
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+
+ if (rq->engine->sched_engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+ i915_request_put(prev);
+ }
+
+ request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
+
+ return to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
+}
+
+static struct i915_request *
+__i915_request_ensure_ordering(struct i915_request *rq,
+ struct intel_timeline *timeline)
+{
+ struct i915_request *prev;
+
+ GEM_BUG_ON(is_parallel_rq(rq));
+
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
+
if (prev && !__i915_request_is_complete(prev)) {
bool uses_guc = intel_engine_uses_guc(rq->engine);
+ bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask |
+ rq->engine->mask);
+ bool same_context = prev->context == rq->context;
/*
* The requests are supposed to be kept in order. However,
@@ -1576,13 +1627,11 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* is used as a barrier for external modification to this
* context.
*/
- GEM_BUG_ON(prev->context == rq->context &&
+ GEM_BUG_ON(same_context &&
i915_seqno_passed(prev->fence.seqno,
rq->fence.seqno));
- if ((!uses_guc &&
- is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
- (uses_guc && prev->context == rq->context))
+ if ((same_context && uses_guc) || (!uses_guc && pow2))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1597,6 +1646,50 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
+ return prev;
+}
+
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *timeline = i915_request_timeline(rq);
+ struct i915_request *prev;
+
+ /*
+ * Dependency tracking and request ordering along the timeline
+ * is special cased so that we can eliminate redundant ordering
+ * operations while building the request (we know that the timeline
+ * itself is ordered, and here we guarantee it).
+ *
+ * As we know we will need to emit tracking along the timeline,
+ * we embed the hooks into our request struct -- at the cost of
+ * having to have specialised no-allocation interfaces (which will
+ * be beneficial elsewhere).
+ *
+ * A second benefit to open-coding i915_request_await_request is
+ * that we can apply a slight variant of the rules specialised
+ * for timelines that jump between engines (such as virtual engines).
+ * If we consider the case of virtual engine, we must emit a dma-fence
+ * to prevent scheduling of the second request until the first is
+ * complete (to maximise our greedy late load balancing) and this
+ * precludes optimising to use semaphores serialisation of a single
+ * timeline across engines.
+ *
+ * We do not order parallel submission requests on the timeline as each
+ * parallel submission context has its own timeline and the ordering
+ * rules for parallel requests are that they must be submitted in the
+ * order received from the execbuf IOCTL. So rather than using the
+ * timeline we store a pointer to last request submitted in the
+ * relationship in the gem context and insert a submission fence
+ * between that request and request passed into this function or
+ * alternatively we use completion fence if gem context has a single
+ * timeline and this is the first submission of an execbuf IOCTL.
+ */
+ if (likely(!is_parallel_rq(rq)))
+ prev = __i915_request_ensure_ordering(rq, timeline);
+ else
+ prev = __i915_request_ensure_parallel_ordering(rq, timeline);
+
/*
* Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before
@@ -1852,7 +1945,7 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ if (CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT &&
__i915_spin_request(rq, state))
goto out;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..dc359242d1ae 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -139,6 +139,29 @@ enum {
* the GPU. Here we track such boost requests on a per-request basis.
*/
I915_FENCE_FLAG_BOOST,
+
+ /*
+ * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a
+ * parent-child relationship (parallel submission, multi-lrc) should
+ * trigger a submission to the GuC rather than just moving the context
+ * tail.
+ */
+ I915_FENCE_FLAG_SUBMIT_PARALLEL,
+
+ /*
+ * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a
+ * parent-child relationship (parallel submission, multi-lrc) that
+ * hit an error while generating requests in the execbuf IOCTL.
+ * Indicates this request should be skipped as another request in
+ * submission / relationship encoutered an error.
+ */
+ I915_FENCE_FLAG_SKIP_PARALLEL,
+
+ /*
+ * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite
+ * fence (dma_fence_array) and i915 generated for parallel submission.
+ */
+ I915_FENCE_FLAG_COMPOSITE,
};
/**
@@ -218,6 +241,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+ /**
+ * @submit_work: complete submit fence from an IRQ if needed for
+ * locking hierarchy reasons.
+ */
+ struct irq_work submit_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -285,18 +313,23 @@ struct i915_request {
struct hrtimer timer;
} watchdog;
- /*
- * Requests may need to be stalled when using GuC submission waiting for
- * certain GuC operations to complete. If that is the case, stalled
- * requests are added to a per context list of stalled requests. The
- * below list_head is the link in that list.
+ /**
+ * @guc_fence_link: Requests may need to be stalled when using GuC
+ * submission waiting for certain GuC operations to complete. If that is
+ * the case, stalled requests are added to a per context list of stalled
+ * requests. The below list_head is the link in that list. Protected by
+ * ce->guc_state.lock.
*/
struct list_head guc_fence_link;
/**
- * Priority level while the request is inflight. Differs from i915
- * scheduler priority. See comment above
- * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details.
+ * @guc_prio: Priority level while the request is in flight. Differs
+ * from i915 scheduler priority. See comment above
+ * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
+ * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
+ * GUC_PRIO_FINI) outside the GuC priority range are used to indicate
+ * if the priority has not been initialized yet or if no more updates
+ * are possible because the request has completed.
*/
#define GUC_PRIO_INIT 0xff
#define GUC_PRIO_FINI 0xfe
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cdf0e9c6fd73..1804f4142740 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,7 +37,6 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 806ad688274b..8104981a6604 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -794,7 +794,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, ctx)
- __field(u32, guc_id)
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
@@ -805,16 +804,14 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
- __entry->guc_id = rq->context->guc_id;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->tail = rq->tail;
),
- TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->guc_id, __entry->ctx, __entry->seqno,
- __entry->tail)
+ __entry->ctx, __entry->seqno, __entry->tail)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -903,23 +900,19 @@ DECLARE_EVENT_CLASS(intel_context,
__field(u32, guc_id)
__field(int, pin_count)
__field(u32, sched_state)
- __field(u32, guc_sched_state_no_lock)
__field(u8, guc_prio)
),
TP_fast_assign(
- __entry->guc_id = ce->guc_id;
+ __entry->guc_id = ce->guc_id.id;
__entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state;
- __entry->guc_sched_state_no_lock =
- atomic_read(&ce->guc_sched_state_no_lock);
- __entry->guc_prio = ce->guc_prio;
+ __entry->guc_prio = ce->guc_state.prio;
),
- TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u",
+ TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
__entry->guc_id, __entry->pin_count,
__entry->sched_state,
- __entry->guc_sched_state_no_lock,
__entry->guc_prio)
);
@@ -1246,7 +1239,7 @@ DECLARE_EVENT_CLASS(i915_context,
TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
- __entry->vm = rcu_access_pointer(ctx->vm);
+ __entry->vm = ctx->vm;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 6877362f6b85..d59fbb019032 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -126,12 +126,30 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
kfree(bman_res);
}
+static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct i915_buddy_block *block;
+
+ mutex_lock(&bman->lock);
+ drm_printf(printer, "default_page_size: %lluKiB\n",
+ bman->default_page_size >> 10);
+
+ i915_buddy_print(&bman->mm, printer);
+
+ drm_printf(printer, "reserved:\n");
+ list_for_each_entry(block, &bman->reserved, link)
+ i915_buddy_block_print(&bman->mm, block, printer);
+ mutex_unlock(&bman->lock);
+}
+
static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
.alloc = i915_ttm_buddy_man_alloc,
.free = i915_ttm_buddy_man_free,
+ .debug = i915_ttm_buddy_man_debug,
};
-
/**
* i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
* @bdev: The ttm device
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5259edacde38..7a5925072466 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/sched/clock.h>
struct drm_i915_private;
struct timer_list;
@@ -458,17 +459,4 @@ static inline bool timer_expired(const struct timer_list *t)
return timer_active(t) && !timer_pending(t);
}
-/*
- * This is a lookalike for IS_ENABLED() that takes a kconfig value,
- * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
- * i.e. whether the configuration is active. Wrapping up the config inside
- * a boolean context prevents clang and smatch from complaining about potential
- * issues in confusing logical-&& with bitwise-& for constants.
- *
- * Sadly IS_ENABLED() itself does not work with kconfig values.
- *
- * Returns 0 if @config is 0, 1 if set to any value.
- */
-#define IS_ACTIVE(config) ((config) != 0)
-
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4b7fc4647e46..90546fa58fc1 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1234,9 +1234,10 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
return i915_active_add_request(&vma->active, rq);
}
-int i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
+int _i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ struct dma_fence *fence,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
int err;
@@ -1257,9 +1258,11 @@ int i915_vma_move_to_active(struct i915_vma *vma,
intel_frontbuffer_put(front);
}
- dma_resv_add_excl_fence(vma->resv, &rq->fence);
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
- obj->read_domains = 0;
+ if (fence) {
+ dma_resv_add_excl_fence(vma->resv, fence);
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
+ }
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
err = dma_resv_reserve_shared(vma->resv, 1);
@@ -1267,8 +1270,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return err;
}
- dma_resv_add_shared_fence(vma->resv, &rq->fence);
- obj->write_domain = 0;
+ if (fence) {
+ dma_resv_add_shared_fence(vma->resv, fence);
+ obj->write_domain = 0;
+ }
}
if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index ed69f66c7ab0..648dbe744c96 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -57,9 +57,16 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq);
-int __must_check i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
+int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ struct dma_fence *fence,
+ unsigned int flags);
+static inline int __must_check
+i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
+ unsigned int flags)
+{
+ return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
+}
#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 995b502d7e5d..80e93bf00f2e 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -105,8 +105,9 @@ struct intel_remapped_plane_info {
} __packed;
struct intel_remapped_info {
- struct intel_remapped_plane_info plane[2];
- u32 unused_mbz;
+ struct intel_remapped_plane_info plane[4];
+ /* in gtt pages */
+ u32 plane_alignment;
} __packed;
struct intel_rotation_info {
@@ -129,7 +130,7 @@ static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 3 * sizeof(u32) + 8 * sizeof(u16));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
/* Check that rotation/remapped shares offsets for simplicity */
BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d328bb95c49b..8e6f48d1eb7b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -133,6 +133,7 @@ enum intel_ppgtt_type {
func(has_logical_ring_elsq); \
func(has_mslices); \
func(has_pooled_eu); \
+ func(has_pxp); \
func(has_rc6); \
func(has_rc6p); \
func(has_rps); \
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 91866520c173..84bb212bae4b 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,7 +5,7 @@
#include "i915_drv.h"
#include "intel_dram.h"
-#include "intel_sideband.h"
+#include "intel_pcode.h"
struct dram_dimm_info {
u16 size;
@@ -244,7 +244,6 @@ static int
skl_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
- u32 mem_freq_khz, val;
int ret;
dram_info->type = skl_get_dram_type(i915);
@@ -255,17 +254,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
if (ret)
return ret;
- val = intel_uncore_read(&i915->uncore,
- SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- if (dram_info->num_channels * mem_freq_khz == 0) {
- drm_info(&i915->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -350,24 +338,10 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
static int bxt_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels, valid_ranks = 0;
+ u32 val;
+ u8 valid_ranks = 0;
int i;
- val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- if (mem_freq_khz * num_active_channels == 0) {
- drm_info(&i915->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
/*
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
@@ -444,7 +418,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
break;
default:
MISSING_CASE(val & 0xf);
- return -1;
+ return -EINVAL;
}
} else {
switch (val & 0xf) {
@@ -462,7 +436,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
break;
default:
MISSING_CASE(val & 0xf);
- return -1;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 779eb2fa90b6..e7f7e6627750 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -78,6 +78,18 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
return i915_ttm_buddy_man_reserve(man, offset, size);
}
+void intel_memory_region_debug(struct intel_memory_region *mr,
+ struct drm_printer *printer)
+{
+ drm_printf(printer, "%s: ", mr->name);
+
+ if (mr->region_private)
+ ttm_resource_manager_debug(mr->region_private, printer);
+ else
+ drm_printf(printer, "total:%pa, available:%pa bytes\n",
+ &mr->total, &mr->avail);
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 1f2b96efa69d..3feae3353d33 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -15,6 +15,7 @@
struct drm_i915_private;
struct drm_i915_gem_object;
+struct drm_printer;
struct intel_memory_region;
struct sg_table;
struct ttm_resource;
@@ -127,6 +128,9 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
+void intel_memory_region_debug(struct intel_memory_region *mr,
+ struct drm_printer *printer);
+
struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
new file mode 100644
index 000000000000..e8c886e4e78d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_pcode.h"
+
+static int gen6_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ return -ENODEV;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
+
+static int gen7_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
+ return -ENXIO;
+ case GEN11_PCODE_LOCKED:
+ return -EBUSY;
+ case GEN11_PCODE_REJECTED:
+ return -EACCES;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
+
+static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
+ u32 mbox, u32 *val, u32 *val1,
+ int fast_timeout_us,
+ int slow_timeout_ms,
+ bool is_read)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+
+ lockdep_assert_held(&i915->sb_lock);
+
+ /*
+ * GEN6_PCODE_* are outside of the forcewake domain, we can use
+ * intel_uncore_read/write_fw variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
+ return -EAGAIN;
+
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
+ intel_uncore_write_fw(uncore,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ fast_timeout_us,
+ slow_timeout_ms,
+ &mbox))
+ return -ETIMEDOUT;
+
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
+ if (is_read && val1)
+ *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
+
+ if (GRAPHICS_VER(i915) > 6)
+ return gen7_check_mailbox_status(mbox);
+ else
+ return gen6_check_mailbox_status(mbox);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, val, val1,
+ 500, 20,
+ true);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ drm_dbg(&i915->drm,
+ "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
+ }
+
+ return err;
+}
+
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
+ u32 mbox, u32 val,
+ int fast_timeout_us,
+ int slow_timeout_ms)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
+ fast_timeout_us, slow_timeout_ms,
+ false);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ drm_dbg(&i915->drm,
+ "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
+ }
+
+ return err;
+}
+
+static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
+{
+ *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
+ 500, 0,
+ true);
+
+ return *status || ((request & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @i915: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 50 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ mutex_lock(&i915->sb_lock);
+
+#define COND \
+ skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
+ * account for interrupts that could reduce the number of these
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
+ */
+ drm_dbg_kms(&i915->drm,
+ "PCODE timeout, retrying with preemption disabled\n");
+ drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 50);
+ preempt_enable();
+
+out:
+ mutex_unlock(&i915->sb_lock);
+ return ret ? ret : status;
+#undef COND
+}
+
+int intel_pcode_init(struct drm_i915_private *i915)
+{
+ int ret = 0;
+
+ if (!IS_DGFX(i915))
+ return ret;
+
+ ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+
+ drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
+
+ if (ret)
+ drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
new file mode 100644
index 000000000000..50806649d4b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_PCODE_H_
+#define _INTEL_PCODE_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
+ u32 val, int fast_timeout_us,
+ int slow_timeout_ms);
+#define sandybridge_pcode_write(i915, mbox, val) \
+ sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
+
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
+int intel_pcode_init(struct drm_i915_private *i915);
+
+#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a725792d5248..ecbb3d141632 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -47,8 +47,9 @@
#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
/* Stores plane specific WM parameters */
@@ -893,9 +894,8 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct intel_crtc *unused_crtc)
+static void pnv_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
@@ -1164,17 +1164,13 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
cpp = plane_state->hw.fb->format->cpp[0];
/*
- * Not 100% sure which way ELK should go here as the
- * spec only says CL/CTG should assume 32bpp and BW
- * doesn't need to. But as these things followed the
- * mobile vs. desktop lines on gen3 as well, let's
- * assume ELK doesn't need this.
+ * WaUse32BppForSRWM:ctg,elk
*
- * The spec also fails to list such a restriction for
- * the HPLL watermark, which seems a little strange.
+ * The spec fails to list this restriction for the
+ * HPLL watermark, which seems a little strange.
* Let's use 32bpp for the HPLL watermark as well.
*/
- if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
+ if (plane->id == PLANE_PRIMARY &&
level != G4X_WM_LEVEL_NORMAL)
cpp = max(cpp, 4u);
@@ -1388,8 +1384,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- int num_active_planes = hweight8(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
const struct g4x_pipe_wm *raw;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1429,7 +1424,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
wm_state->sr.fbc = raw->fbc;
- wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
+ wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
level = G4X_WM_LEVEL_HPLL;
if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
@@ -1720,7 +1715,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
int num_active_planes = hweight8(active_planes);
const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
@@ -1912,8 +1907,8 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
- int num_active_planes = hweight8(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -2265,9 +2260,8 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void i965_update_wm(struct intel_crtc *unused_crtc)
+static void i965_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
@@ -2341,9 +2335,8 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
#undef FW_WM
-static void i9xx_update_wm(struct intel_crtc *unused_crtc)
+static void i9xx_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
u32 fwater_hi;
@@ -2359,7 +2352,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *pipe_mode =
@@ -2386,7 +2382,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (DISPLAY_VER(dev_priv) == 2)
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *pipe_mode =
@@ -2487,9 +2486,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i845_update_wm(struct intel_crtc *unused_crtc)
+static void i845_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct drm_display_mode *pipe_mode;
u32 fwater_lo;
@@ -2502,7 +2500,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
pipe_mode = &crtc->config->hw.pipe_mode;
planea_wm = intel_calculate_wm(pipe_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -2871,6 +2869,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
+ int mult = IS_DG2(dev_priv) ? 2 : 1;
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2884,13 +2883,13 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
return;
}
- wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
@@ -2903,13 +2902,13 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
return;
}
- wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
@@ -6844,7 +6843,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
for_each_plane_id_on_crtc(crtc, plane_id)
raw->plane[plane_id] = active->wm.plane[plane_id];
- if (++level > max_level)
+ level = G4X_WM_LEVEL_SR;
+ if (level > max_level)
goto out;
raw = &crtc_state->wm.g4x.raw[level];
@@ -6853,7 +6853,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
raw->plane[PLANE_SPRITE0] = 0;
raw->fbc = active->sr.fbc;
- if (++level > max_level)
+ level = G4X_WM_LEVEL_HPLL;
+ if (level > max_level)
goto out;
raw = &crtc_state->wm.g4x.raw[level];
@@ -6862,6 +6863,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
raw->plane[PLANE_SPRITE0] = 0;
raw->fbc = active->hpll.fbc;
+ level++;
out:
for_each_plane_id_on_crtc(crtc, plane_id)
g4x_raw_plane_wm_set(crtc_state, level,
@@ -7141,47 +7143,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- * @crtc: the #intel_crtc on which to compute the WM
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(crtc);
-}
-
void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -7639,11 +7600,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
-
- /* Undocumented but fixes async flip + VT-d corruption */
- if (intel_vtd_active())
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
- HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1);
}
/* WaVSRefCountFullforceMissDisable:bdw */
@@ -7679,20 +7635,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- enum pipe pipe;
-
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
- for_each_pipe(dev_priv, pipe) {
- /* Undocumented but fixes async flip + VT-d corruption */
- if (intel_vtd_active())
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
- HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1);
- }
-
/* This is required by WaCatErrorRejectionIssue:hsw */
intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -7921,7 +7868,7 @@ static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating(struct drm_i915_private *dev_priv)
{
- dev_priv->display.init_clock_gating(dev_priv);
+ dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
}
void intel_suspend_hw(struct drm_i915_private *dev_priv)
@@ -7936,6 +7883,36 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
"No clock gating settings or workarounds applied.\n");
}
+#define CG_FUNCS(platform) \
+static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
+ .init_clock_gating = platform##_init_clock_gating, \
+}
+
+CG_FUNCS(adlp);
+CG_FUNCS(dg1);
+CG_FUNCS(gen12lp);
+CG_FUNCS(icl);
+CG_FUNCS(cfl);
+CG_FUNCS(skl);
+CG_FUNCS(kbl);
+CG_FUNCS(bxt);
+CG_FUNCS(glk);
+CG_FUNCS(bdw);
+CG_FUNCS(chv);
+CG_FUNCS(hsw);
+CG_FUNCS(ivb);
+CG_FUNCS(vlv);
+CG_FUNCS(gen6);
+CG_FUNCS(ilk);
+CG_FUNCS(g4x);
+CG_FUNCS(i965gm);
+CG_FUNCS(i965g);
+CG_FUNCS(gen3);
+CG_FUNCS(i85x);
+CG_FUNCS(i830);
+CG_FUNCS(nop);
+#undef CG_FUNCS
+
/**
* intel_init_clock_gating_hooks - setup the clock gating hooks
* @dev_priv: device private
@@ -7948,55 +7925,100 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_ALDERLAKE_P(dev_priv))
- dev_priv->display.init_clock_gating = adlp_init_clock_gating;
+ dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
else if (IS_DG1(dev_priv))
- dev_priv->display.init_clock_gating = dg1_init_clock_gating;
+ dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 12)
- dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
+ dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 11)
- dev_priv->display.init_clock_gating = icl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
- dev_priv->display.init_clock_gating = cfl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
else if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = skl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
else if (IS_KABYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = kbl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
else if (IS_BROXTON(dev_priv))
- dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+ dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
else if (IS_GEMINILAKE(dev_priv))
- dev_priv->display.init_clock_gating = glk_init_clock_gating;
+ dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
else if (IS_BROADWELL(dev_priv))
- dev_priv->display.init_clock_gating = bdw_init_clock_gating;
+ dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = chv_init_clock_gating;
+ dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
else if (IS_HASWELL(dev_priv))
- dev_priv->display.init_clock_gating = hsw_init_clock_gating;
+ dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
else if (IS_IVYBRIDGE(dev_priv))
- dev_priv->display.init_clock_gating = ivb_init_clock_gating;
+ dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = vlv_init_clock_gating;
+ dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 6)
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 5)
- dev_priv->display.init_clock_gating = ilk_init_clock_gating;
+ dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
else if (IS_I965GM(dev_priv))
- dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
else if (IS_I965G(dev_priv))
- dev_priv->display.init_clock_gating = i965g_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 3)
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 2)
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
}
}
+static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
+ .compute_pipe_wm = ilk_compute_pipe_wm,
+ .compute_intermediate_wm = ilk_compute_intermediate_wm,
+ .initial_watermarks = ilk_initial_watermarks,
+ .optimize_watermarks = ilk_optimize_watermarks,
+};
+
+static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
+ .compute_pipe_wm = vlv_compute_pipe_wm,
+ .compute_intermediate_wm = vlv_compute_intermediate_wm,
+ .initial_watermarks = vlv_initial_watermarks,
+ .optimize_watermarks = vlv_optimize_watermarks,
+ .atomic_update_watermarks = vlv_atomic_update_fifo,
+};
+
+static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
+ .compute_pipe_wm = g4x_compute_pipe_wm,
+ .compute_intermediate_wm = g4x_compute_intermediate_wm,
+ .initial_watermarks = g4x_initial_watermarks,
+ .optimize_watermarks = g4x_optimize_watermarks,
+};
+
+static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
+ .update_wm = pnv_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
+ .update_wm = i965_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
+ .update_wm = i9xx_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
+ .update_wm = i845_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs nop_funcs = {
+};
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
@@ -8012,7 +8034,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (DISPLAY_VER(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.compute_global_watermarks = skl_compute_wm;
+ dev_priv->wm_disp = &skl_wm_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
@@ -8020,31 +8042,19 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
(DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
- dev_priv->display.compute_intermediate_wm =
- ilk_compute_intermediate_wm;
- dev_priv->display.initial_watermarks =
- ilk_initial_watermarks;
- dev_priv->display.optimize_watermarks =
- ilk_optimize_watermarks;
+ dev_priv->wm_disp = &ilk_wm_funcs;
} else {
drm_dbg_kms(&dev_priv->drm,
"Failed to read display plane latency. "
"Disable CxSR\n");
+ dev_priv->wm_disp = &nop_funcs;
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
- dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
- dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
- dev_priv->display.initial_watermarks = vlv_initial_watermarks;
- dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
- dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
+ dev_priv->wm_disp = &vlv_wm_funcs;
} else if (IS_G4X(dev_priv)) {
g4x_setup_wm_latency(dev_priv);
- dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
- dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
- dev_priv->display.initial_watermarks = g4x_initial_watermarks;
- dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
+ dev_priv->wm_disp = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
@@ -8058,25 +8068,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.update_wm = NULL;
+ dev_priv->wm_disp = &nop_funcs;
} else
- dev_priv->display.update_wm = pnv_update_wm;
+ dev_priv->wm_disp = &pnv_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.update_wm = i965_update_wm;
+ dev_priv->wm_disp = &i965_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->wm_disp = &i9xx_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1) {
- dev_priv->display.update_wm = i845_update_wm;
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- } else {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- }
+ if (INTEL_NUM_PIPES(dev_priv) == 1)
+ dev_priv->wm_disp = &i845_wm_funcs;
+ else
+ dev_priv->wm_disp = &i9xx_wm_funcs;
} else {
drm_err(&dev_priv->drm,
"unexpected fall-through in %s\n", __func__);
+ dev_priv->wm_disp = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 91f23b7f0af2..990cdcaf85ce 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-#include "display/intel_bw.h"
#include "display/intel_display.h"
#include "display/intel_global_state.h"
@@ -19,6 +18,7 @@ struct drm_device;
struct drm_i915_private;
struct i915_request;
struct intel_atomic_state;
+struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -29,7 +29,6 @@ struct skl_wm_level;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
void intel_suspend_hw(struct drm_i915_private *dev_priv);
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 183ea2b187fe..47a85fab4130 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include "display/intel_display.h"
-
#include "intel_wakeref.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
new file mode 100644
index 000000000000..5ba8490a31e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sbi.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ *
+ * LPT/WPT IOSF sideband.
+ */
+
+#include "i915_drv.h"
+#include "intel_sbi.h"
+
+/* SBI access */
+static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 cmd;
+
+ lockdep_assert_held(&i915->sb_lock);
+
+ if (intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to become ready\n");
+ return -EBUSY;
+ }
+
+ intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
+ intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
+
+ if (destination == SBI_ICLK)
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= BIT(8);
+ intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100, 100, &cmd)) {
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to complete read\n");
+ return -ETIMEDOUT;
+ }
+
+ if (cmd & SBI_RESPONSE_FAIL) {
+ drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
+ return -ENXIO;
+ }
+
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, SBI_DATA);
+
+ return 0;
+}
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(i915, reg, destination, &result, true);
+
+ return result;
+}
+
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ intel_sbi_rw(i915, reg, destination, &value, false);
+}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
new file mode 100644
index 000000000000..f5a862210454
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sbi.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SBI_H_
+#define _INTEL_SBI_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
deleted file mode 100644
index e304bf44e1ff..000000000000
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <asm/iosf_mbi.h>
-
-#include "i915_drv.h"
-#include "intel_sideband.h"
-
-/*
- * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
- * VLV_VLV2_PUNIT_HAS_0.8.docx
- */
-
-/* Standard MMIO read, non-posted */
-#define SB_MRD_NP 0x00
-/* Standard MMIO write, non-posted */
-#define SB_MWR_NP 0x01
-/* Private register read, double-word addressing, non-posted */
-#define SB_CRRDDA_NP 0x06
-/* Private register write, double-word addressing, non-posted */
-#define SB_CRWRDA_NP 0x07
-
-static void ping(void *info)
-{
-}
-
-static void __vlv_punit_get(struct drm_i915_private *i915)
-{
- iosf_mbi_punit_acquire();
-
- /*
- * Prevent the cpu from sleeping while we use this sideband, otherwise
- * the punit may cause a machine hang. The issue appears to be isolated
- * with changing the power state of the CPU package while changing
- * the power state via the punit, and we have only observed it
- * reliably on 4-core Baytail systems suggesting the issue is in the
- * power delivery mechanism and likely to be be board/function
- * specific. Hence we presume the workaround needs only be applied
- * to the Valleyview P-unit and not all sideband communications.
- */
- if (IS_VALLEYVIEW(i915)) {
- cpu_latency_qos_update_request(&i915->sb_qos, 0);
- on_each_cpu(ping, NULL, 1);
- }
-}
-
-static void __vlv_punit_put(struct drm_i915_private *i915)
-{
- if (IS_VALLEYVIEW(i915))
- cpu_latency_qos_update_request(&i915->sb_qos,
- PM_QOS_DEFAULT_VALUE);
-
- iosf_mbi_punit_release();
-}
-
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
-{
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
- __vlv_punit_get(i915);
-
- mutex_lock(&i915->sb_lock);
-}
-
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
-{
- mutex_unlock(&i915->sb_lock);
-
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
- __vlv_punit_put(i915);
-}
-
-static int vlv_sideband_rw(struct drm_i915_private *i915,
- u32 devfn, u32 port, u32 opcode,
- u32 addr, u32 *val)
-{
- struct intel_uncore *uncore = &i915->uncore;
- const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
- int err;
-
- lockdep_assert_held(&i915->sb_lock);
- if (port == IOSF_PORT_PUNIT)
- iosf_mbi_assert_punit_acquired();
-
- /* Flush the previous comms, just in case it failed last time. */
- if (intel_wait_for_register(uncore,
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
- 5)) {
- drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
- is_read ? "read" : "write");
- return -EAGAIN;
- }
-
- preempt_disable();
-
- intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
- intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
- intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
- (devfn << IOSF_DEVFN_SHIFT) |
- (opcode << IOSF_OPCODE_SHIFT) |
- (port << IOSF_PORT_SHIFT) |
- (0xf << IOSF_BYTE_ENABLES_SHIFT) |
- (0 << IOSF_BAR_SHIFT) |
- IOSF_SB_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
- 10000, 0, NULL) == 0) {
- if (is_read)
- *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
- err = 0;
- } else {
- drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
- is_read ? "read" : "write");
- err = -ETIMEDOUT;
- }
-
- preempt_enable();
-
- return err;
-}
-
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRWRDA_NP, reg, &val);
-}
-
-static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
-{
- /*
- * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(i915))
- return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
- else
- return IOSF_PORT_DPIO;
-}
-
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
- u32 val = 0;
-
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
-
- /*
- * FIXME: There might be some registers where all 1's is a valid value,
- * so ideally we should check the register offset instead...
- */
- drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
-
- return val;
-}
-
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
-
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
-}
-
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
-}
-
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
-}
-
-/* SBI access */
-static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination,
- u32 *val, bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 cmd;
-
- lockdep_assert_held(&i915->sb_lock);
-
- if (intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to become ready\n");
- return -EBUSY;
- }
-
- intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
- intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
-
- if (destination == SBI_ICLK)
- cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
- else
- cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- if (!is_read)
- cmd |= BIT(8);
- intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100, 100, &cmd)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to complete read\n");
- return -ETIMEDOUT;
- }
-
- if (cmd & SBI_RESPONSE_FAIL) {
- drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
- return -ENXIO;
- }
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, SBI_DATA);
-
- return 0;
-}
-
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination)
-{
- u32 result = 0;
-
- intel_sbi_rw(i915, reg, destination, &result, true);
-
- return result;
-}
-
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination)
-{
- intel_sbi_rw(i915, reg, destination, &value, false);
-}
-
-static int gen6_check_mailbox_status(u32 mbox)
-{
- switch (mbox & GEN6_PCODE_ERROR_MASK) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_UNIMPLEMENTED_CMD:
- return -ENODEV;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- case GEN6_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- default:
- MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
- return 0;
- }
-}
-
-static int gen7_check_mailbox_status(u32 mbox)
-{
- switch (mbox & GEN6_PCODE_ERROR_MASK) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN7_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- case GEN7_PCODE_ILLEGAL_DATA:
- return -EINVAL;
- case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
- return -ENXIO;
- case GEN11_PCODE_LOCKED:
- return -EBUSY;
- case GEN11_PCODE_REJECTED:
- return -EACCES;
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- default:
- MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
- return 0;
- }
-}
-
-static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
- u32 mbox, u32 *val, u32 *val1,
- int fast_timeout_us,
- int slow_timeout_ms,
- bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
-
- lockdep_assert_held(&i915->sb_lock);
-
- /*
- * GEN6_PCODE_* are outside of the forcewake domain, we can use
- * intel_uncore_read/write_fw variants to reduce the amount of work
- * required when reading/writing.
- */
-
- if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
- return -EAGAIN;
-
- intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
- intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
- intel_uncore_write_fw(uncore,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (__intel_wait_for_register_fw(uncore,
- GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY, 0,
- fast_timeout_us,
- slow_timeout_ms,
- &mbox))
- return -ETIMEDOUT;
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
- if (is_read && val1)
- *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
-
- if (GRAPHICS_VER(i915) > 6)
- return gen7_check_mailbox_status(mbox);
- else
- return gen6_check_mailbox_status(mbox);
-}
-
-int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
- u32 *val, u32 *val1)
-{
- int err;
-
- mutex_lock(&i915->sb_lock);
- err = __sandybridge_pcode_rw(i915, mbox, val, val1,
- 500, 20,
- true);
- mutex_unlock(&i915->sb_lock);
-
- if (err) {
- drm_dbg(&i915->drm,
- "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), err);
- }
-
- return err;
-}
-
-int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
- u32 mbox, u32 val,
- int fast_timeout_us,
- int slow_timeout_ms)
-{
- int err;
-
- mutex_lock(&i915->sb_lock);
- err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
- fast_timeout_us, slow_timeout_ms,
- false);
- mutex_unlock(&i915->sb_lock);
-
- if (err) {
- drm_dbg(&i915->drm,
- "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), err);
- }
-
- return err;
-}
-
-static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status)
-{
- *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
- 500, 0,
- true);
-
- return *status || ((request & reply_mask) == reply);
-}
-
-/**
- * skl_pcode_request - send PCODE request until acknowledgment
- * @i915: device private
- * @mbox: PCODE mailbox ID the request is targeted for
- * @request: request ID
- * @reply_mask: mask used to check for request acknowledgment
- * @reply: value used to check for request acknowledgment
- * @timeout_base_ms: timeout for polling with preemption enabled
- *
- * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
- * The request is acknowledged once the PCODE reply dword equals @reply after
- * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 50 ms with
- * preemption disabled.
- *
- * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
- * other error as reported by PCODE.
- */
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
-{
- u32 status;
- int ret;
-
- mutex_lock(&i915->sb_lock);
-
-#define COND \
- skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
-
- /*
- * Prime the PCODE by doing a request first. Normally it guarantees
- * that a subsequent request, at most @timeout_base_ms later, succeeds.
- * _wait_for() doesn't guarantee when its passed condition is evaluated
- * first, so send the first request explicitly.
- */
- if (COND) {
- ret = 0;
- goto out;
- }
- ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
- if (!ret)
- goto out;
-
- /*
- * The above can time out if the number of requests was low (2 in the
- * worst case) _and_ PCODE was busy for some reason even after a
- * (queued) request and @timeout_base_ms delay. As a workaround retry
- * the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 50ms to
- * account for interrupts that could reduce the number of these
- * requests, and for any quirks of the PCODE firmware that delays
- * the request completion.
- */
- drm_dbg_kms(&i915->drm,
- "PCODE timeout, retrying with preemption disabled\n");
- drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
- preempt_disable();
- ret = wait_for_atomic(COND, 50);
- preempt_enable();
-
-out:
- mutex_unlock(&i915->sb_lock);
- return ret ? ret : status;
-#undef COND
-}
-
-int intel_pcode_init(struct drm_i915_private *i915)
-{
- int ret = 0;
-
- if (!IS_DGFX(i915))
- return ret;
-
- ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
-
- drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
-
- if (ret)
- drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
-
- return ret;
-}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6b38bc2811c1..e072054adac5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -36,6 +36,12 @@
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
+static void
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
+{
+ uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
+}
+
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
{
@@ -64,7 +70,7 @@ static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
static const char * const forcewake_domain_names[] = {
"render",
- "blitter",
+ "gt",
"media",
"vdbox0",
"vdbox1",
@@ -248,7 +254,7 @@ fw_domain_put(const struct intel_uncore_forcewake_domain *d)
}
static void
-fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
+fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
@@ -340,7 +346,7 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- fw_domains_get(uncore, fw_domains);
+ fw_domains_get_normal(uncore, fw_domains);
/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(uncore);
@@ -396,7 +402,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count == 0)
- uncore->funcs.force_wake_put(uncore, domain->mask);
+ fw_domains_put(uncore, domain->mask);
spin_unlock_irqrestore(&uncore->lock, irqflags);
@@ -454,7 +460,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
fw = uncore->fw_domains_active;
if (fw)
- uncore->funcs.force_wake_put(uncore, fw);
+ fw_domains_put(uncore, fw);
fw_domains_reset(uncore, uncore->fw_domains);
assert_forcewakes_inactive(uncore);
@@ -562,7 +568,7 @@ static void forcewake_early_sanitize(struct intel_uncore *uncore,
intel_uncore_forcewake_reset(uncore);
if (restore_forcewake) {
spin_lock_irq(&uncore->lock);
- uncore->funcs.force_wake_get(uncore, restore_forcewake);
+ fw_domains_get(uncore, restore_forcewake);
if (intel_uncore_has_fifo(uncore))
uncore->fifo_count = fifo_free_entries(uncore);
@@ -623,7 +629,7 @@ static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
}
if (fw_domains)
- uncore->funcs.force_wake_get(uncore, fw_domains);
+ fw_domains_get(uncore, fw_domains);
}
/**
@@ -644,7 +650,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
{
unsigned long irqflags;
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
assert_rpm_wakelock_held(uncore->rpm);
@@ -711,7 +717,7 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
{
lockdep_assert_held(&uncore->lock);
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -733,7 +739,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- uncore->funcs.force_wake_put(uncore, domain->mask);
+ fw_domains_put(uncore, domain->mask);
}
}
@@ -750,7 +756,7 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
{
unsigned long irqflags;
- if (!uncore->funcs.force_wake_put)
+ if (!uncore->fw_get_funcs)
return;
spin_lock_irqsave(&uncore->lock, irqflags);
@@ -769,7 +775,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- if (!uncore->funcs.force_wake_put)
+ if (!uncore->fw_get_funcs)
return;
fw_domains &= uncore->fw_domains;
@@ -793,7 +799,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
{
lockdep_assert_held(&uncore->lock);
- if (!uncore->funcs.force_wake_put)
+ if (!uncore->fw_get_funcs)
return;
__intel_uncore_forcewake_put(uncore, fw_domains);
@@ -801,7 +807,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
void assert_forcewakes_inactive(struct intel_uncore *uncore)
{
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
@@ -818,7 +824,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
return;
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
spin_lock_irq(&uncore->lock);
@@ -851,16 +857,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
}
/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
-
-#define __gen6_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (NEEDS_FORCE_WAKE(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else \
- __fwd = 0; \
- __fwd; \
+#define NEEDS_FORCE_WAKE(reg) ({ \
+ u32 __reg = (reg); \
+ __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
})
static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
@@ -942,125 +941,146 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
__fwd; \
})
-#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
- find_fw_domain(uncore, offset)
-
-#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
- find_fw_domain(uncore, offset)
-
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
-static const i915_reg_t gen8_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
- RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+static const struct i915_range gen8_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0x12030, .end = 0x12030 },
+ { .start = 0x1a030, .end = 0x1a030 },
+ { .start = 0x22030, .end = 0x22030 },
/* TODO: Other registers are not yet used */
};
-static const i915_reg_t gen11_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
- RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
- RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
- RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
- RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
- RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
- RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
- RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
- /* TODO: Other registers are not yet used */
+static const struct i915_range gen11_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2550, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22230, .end = 0x22230 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0230, .end = 0x1C0230 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4230, .end = 0x1C4230 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8230, .end = 0x1C8230 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0230, .end = 0x1D0230 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4230, .end = 0x1D4230 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8230, .end = 0x1D8230 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
};
-static const i915_reg_t gen12_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
- RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
- RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
- RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
- RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
- RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
- RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
- RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
- /* TODO: Other registers are not yet used */
+static const struct i915_range gen12_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2510, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0xA188, .end = 0xA188 },
+ { .start = 0xA278, .end = 0xA278 },
+ { .start = 0xA540, .end = 0xA56C },
+ { .start = 0xC4C8, .end = 0xC4C8 },
+ { .start = 0xC4D4, .end = 0xC4D4 },
+ { .start = 0xC600, .end = 0xC600 },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
+
+ /*
+ * The rest of these ranges are specific to Xe_HP and beyond, but
+ * are reserved/unused ranges on earlier gen12 platforms, so they can
+ * be safely added to the gen12 table.
+ */
+ { .start = 0x1E0030, .end = 0x1E0030 },
+ { .start = 0x1E0510, .end = 0x1E0550 },
+ { .start = 0x1E4030, .end = 0x1E4030 },
+ { .start = 0x1E4510, .end = 0x1E4550 },
+ { .start = 0x1E8030, .end = 0x1E8030 },
+ { .start = 0x1E8510, .end = 0x1E8550 },
+ { .start = 0x1F0030, .end = 0x1F0030 },
+ { .start = 0x1F0510, .end = 0x1F0550 },
+ { .start = 0x1F4030, .end = 0x1F4030 },
+ { .start = 0x1F4510, .end = 0x1F4550 },
+ { .start = 0x1F8030, .end = 0x1F8030 },
+ { .start = 0x1F8510, .end = 0x1F8550 },
};
-static const i915_reg_t xehp_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
- RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
- RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
- RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
- RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
- RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
- RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
- RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
- RING_TAIL(XEHP_BSD5_RING_BASE), /* 0x1E0000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD5_RING_BASE), /* 0x1E0550 */
- RING_TAIL(XEHP_BSD6_RING_BASE), /* 0x1E4000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD6_RING_BASE), /* 0x1E4550 */
- RING_TAIL(XEHP_VEBOX3_RING_BASE), /* 0x1E8000 (base) */
- RING_EXECLIST_CONTROL(XEHP_VEBOX3_RING_BASE), /* 0x1E8550 */
- RING_TAIL(XEHP_BSD7_RING_BASE), /* 0x1F0000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD7_RING_BASE), /* 0x1F0550 */
- RING_TAIL(XEHP_BSD8_RING_BASE), /* 0x1F4000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD8_RING_BASE), /* 0x1F4550 */
- RING_TAIL(XEHP_VEBOX4_RING_BASE), /* 0x1F8000 (base) */
- RING_EXECLIST_CONTROL(XEHP_VEBOX4_RING_BASE), /* 0x1F8550 */
- /* TODO: Other registers are not yet used */
+static const struct i915_range dg2_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2510, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0xA188, .end = 0xA188 },
+ { .start = 0xA278, .end = 0xA278 },
+ { .start = 0xA540, .end = 0xA56C },
+ { .start = 0xC4C8, .end = 0xC4C8 },
+ { .start = 0xC4E0, .end = 0xC4E0 },
+ { .start = 0xC600, .end = 0xC600 },
+ { .start = 0xC658, .end = 0xC658 },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
+ { .start = 0x1E0030, .end = 0x1E0030 },
+ { .start = 0x1E0510, .end = 0x1E0550 },
+ { .start = 0x1E4030, .end = 0x1E4030 },
+ { .start = 0x1E4510, .end = 0x1E4550 },
+ { .start = 0x1E8030, .end = 0x1E8030 },
+ { .start = 0x1E8510, .end = 0x1E8550 },
+ { .start = 0x1F0030, .end = 0x1F0030 },
+ { .start = 0x1F0510, .end = 0x1F0550 },
+ { .start = 0x1F4030, .end = 0x1F4030 },
+ { .start = 0x1F4510, .end = 0x1F4550 },
+ { .start = 0x1F8030, .end = 0x1F8030 },
+ { .start = 0x1F8510, .end = 0x1F8550 },
};
-static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+static int mmio_range_cmp(u32 key, const struct i915_range *range)
{
- u32 offset = i915_mmio_reg_offset(*reg);
-
- if (key < offset)
+ if (key < range->start)
return -1;
- else if (key > offset)
+ else if (key > range->end)
return 1;
else
return 0;
}
-#define __is_X_shadowed(x) \
-static bool is_##x##_shadowed(u32 offset) \
-{ \
- const i915_reg_t *regs = x##_shadowed_regs; \
- return BSEARCH(offset, regs, ARRAY_SIZE(x##_shadowed_regs), \
- mmio_reg_cmp); \
-}
+static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
+{
+ if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
+ return false;
-__is_X_shadowed(gen8)
-__is_X_shadowed(gen11)
-__is_X_shadowed(gen12)
-__is_X_shadowed(xehp)
+ return BSEARCH(offset,
+ uncore->shadowed_reg_table,
+ uncore->shadowed_reg_table_entries,
+ mmio_range_cmp);
+}
static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
@@ -1068,15 +1088,9 @@ gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
return FORCEWAKE_RENDER;
}
-#define __gen8_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else \
- __fwd = 0; \
- __fwd; \
-})
+static const struct intel_forcewake_range __gen6_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
+};
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __chv_fw_ranges[] = {
@@ -1101,34 +1115,8 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
#define __fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
-
-#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
const u32 __offset = (offset); \
- if (!is_gen11_shadowed(__offset)) \
- __fwd = find_fw_domain(uncore, __offset); \
- __fwd; \
-})
-
-#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- const u32 __offset = (offset); \
- if (!is_gen12_shadowed(__offset)) \
- __fwd = find_fw_domain(uncore, __offset); \
- __fwd; \
-})
-
-#define __xehp_fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- const u32 __offset = (offset); \
- if (!is_xehp_shadowed(__offset)) \
+ if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
__fwd = find_fw_domain(uncore, __offset); \
__fwd; \
})
@@ -1605,7 +1593,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore,
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
fw_domain_arm_timer(domain);
- uncore->funcs.force_wake_get(uncore, fw_domains);
+ fw_domains_get(uncore, fw_domains);
}
static inline void __force_wake_auto(struct intel_uncore *uncore,
@@ -1621,35 +1609,30 @@ static inline void __force_wake_auto(struct intel_uncore *uncore,
___force_wake_auto(uncore, fw_domains);
}
-#define __gen_read(func, x) \
+#define __gen_fwtable_read(x) \
static u##x \
-func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
+{ \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
+ fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
if (fw_engine) \
__force_wake_auto(uncore, fw_engine); \
val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
-#define __gen_reg_read_funcs(func) \
-static enum forcewake_domains \
-func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
- return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
-} \
-\
-__gen_read(func, 8) \
-__gen_read(func, 16) \
-__gen_read(func, 32) \
-__gen_read(func, 64)
+static enum forcewake_domains
+fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
+ return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
+}
-__gen_reg_read_funcs(gen12_fwtable);
-__gen_reg_read_funcs(gen11_fwtable);
-__gen_reg_read_funcs(fwtable);
-__gen_reg_read_funcs(gen6);
+__gen_fwtable_read(8)
+__gen_fwtable_read(16)
+__gen_fwtable_read(32)
+__gen_fwtable_read(64)
-#undef __gen_reg_read_funcs
+#undef __gen_fwtable_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1714,35 +1697,29 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-#define __gen_write(func, x) \
+#define __gen_fwtable_write(x) \
static void \
-func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
+fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
+ fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
if (fw_engine) \
__force_wake_auto(uncore, fw_engine); \
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen_reg_write_funcs(func) \
-static enum forcewake_domains \
-func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
- return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
-} \
-\
-__gen_write(func, 8) \
-__gen_write(func, 16) \
-__gen_write(func, 32)
+static enum forcewake_domains
+fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
+}
-__gen_reg_write_funcs(xehp_fwtable);
-__gen_reg_write_funcs(gen12_fwtable);
-__gen_reg_write_funcs(gen11_fwtable);
-__gen_reg_write_funcs(fwtable);
-__gen_reg_write_funcs(gen8);
+__gen_fwtable_write(8)
+__gen_fwtable_write(16)
+__gen_fwtable_write(32)
-#undef __gen_reg_write_funcs
+#undef __gen_fwtable_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -1866,6 +1843,18 @@ static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
fw_domain_fini(uncore, d->id);
}
+static const struct intel_uncore_fw_get uncore_get_fallback = {
+ .force_wake_get = fw_domains_get_with_fallback
+};
+
+static const struct intel_uncore_fw_get uncore_get_normal = {
+ .force_wake_get = fw_domains_get_normal,
+};
+
+static const struct intel_uncore_fw_get uncore_get_thread_status = {
+ .force_wake_get = fw_domains_get_with_thread_status
+};
+
static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
@@ -1881,8 +1870,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
int i;
- uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_fallback;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
@@ -1907,8 +1895,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
- uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_fallback;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
@@ -1918,16 +1905,13 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- uncore->funcs.force_wake_get = fw_domains_get;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_normal;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_thread_status;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_thread_status;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(i915)) {
@@ -1942,9 +1926,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
- uncore->funcs.force_wake_get =
- fw_domains_get_with_thread_status;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_thread_status;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@@ -1975,9 +1957,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE, FORCEWAKE_ACK);
}
} else if (GRAPHICS_VER(i915) == 6) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_thread_status;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_thread_status;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
@@ -2001,6 +1981,12 @@ out:
(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
}
+#define ASSIGN_SHADOW_TABLE(uncore, d) \
+{ \
+ (uncore)->shadowed_reg_table = d; \
+ (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
+}
+
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -2111,40 +2097,42 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
return ret;
forcewake_early_sanitize(uncore, 0);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
+
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) >= 12) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) == 11) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_CHERRYVIEW(i915)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) == 8) {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_VALLEYVIEW(i915)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
- ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
@@ -2186,8 +2174,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
}
/* make sure fw funcs are set if and only if we have fw*/
- GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
- GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 3c0b0a8b5250..3248e4e2c540 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -84,12 +84,12 @@ enum forcewake_domains {
FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
};
-struct intel_uncore_funcs {
+struct intel_uncore_fw_get {
void (*force_wake_get)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- void (*force_wake_put)(struct intel_uncore *uncore,
- enum forcewake_domains domains);
+};
+struct intel_uncore_funcs {
enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
i915_reg_t r);
enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
@@ -119,6 +119,12 @@ struct intel_forcewake_range {
enum forcewake_domains domains;
};
+/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
+struct i915_range {
+ u32 start;
+ u32 end;
+};
+
struct intel_uncore {
void __iomem *regs;
@@ -136,7 +142,15 @@ struct intel_uncore {
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
+ /*
+ * Shadowed registers are special cases where we can safely write
+ * to the register *without* grabbing forcewake.
+ */
+ const struct i915_range *shadowed_reg_table;
+ unsigned int shadowed_reg_table_entries;
+
struct notifier_block pmic_bus_access_nb;
+ const struct intel_uncore_fw_get *fw_get_funcs;
struct intel_uncore_funcs funcs;
unsigned int fifo_count;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 545c8f277c46..4f4c2e15e736 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -123,6 +123,12 @@ enum {
__INTEL_WAKEREF_PUT_LAST_BIT__
};
+static inline void
+intel_wakeref_might_get(struct intel_wakeref *wf)
+{
+ might_lock(&wf->mutex);
+}
+
/**
* intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
@@ -170,6 +176,12 @@ intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
}
+static inline void
+intel_wakeref_might_put(struct intel_wakeref *wf)
+{
+ might_lock(&wf->mutex);
+}
+
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
new file mode 100644
index 000000000000..e2314ad9546d
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
+ * It allows execution and flip to display of protected (i.e. encrypted)
+ * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
+ *
+ * Objects can opt-in to PXP encryption at creation time via the
+ * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
+ * correctly protected they must be used in conjunction with a context created
+ * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
+ * of those two uapi flags for details and restrictions.
+ *
+ * Protected objects are tied to a pxp session; currently we only support one
+ * session, which i915 manages and whose index is available in the uapi
+ * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
+ * protected objects.
+ * The session is invalidated by the HW when certain events occur (e.g.
+ * suspend/resume). When this happens, all the objects that were used with the
+ * session are marked as invalid and all contexts marked as using protected
+ * content are banned. Any further attempt at using them in an execbuf call is
+ * rejected, while flips are converted to black frames.
+ *
+ * Some of the PXP setup operations are performed by the Management Engine,
+ * which is handled by the mei driver; communication between i915 and mei is
+ * performed via the mei_pxp component module.
+ */
+
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp)
+{
+ return container_of(pxp, struct intel_gt, pxp);
+}
+
+bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return pxp->arb_is_valid;
+}
+
+/* KCR register definitions */
+#define KCR_INIT _MMIO(0x320f0)
+/* Setting KCR Init bit is required after system boot */
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+static void kcr_pxp_enable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static void kcr_pxp_disable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static int create_vcs_context(struct intel_pxp *pxp)
+{
+ static struct lock_class_key pxp_lock;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ int i;
+
+ /*
+ * Find the first VCS engine present. We're guaranteed there is one
+ * if we're in this function due to the check in has_pxp
+ */
+ for (i = 0, engine = NULL; !engine; i++)
+ engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
+
+ GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_PXP_ADDR,
+ &pxp_lock, "pxp_context");
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n");
+ return PTR_ERR(ce);
+ }
+
+ pxp->ce = ce;
+
+ return 0;
+}
+
+static void destroy_vcs_context(struct intel_pxp *pxp)
+{
+ intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
+}
+
+void intel_pxp_init(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ if (!HAS_PXP(gt->i915))
+ return;
+
+ mutex_init(&pxp->tee_mutex);
+
+ /*
+ * we'll use the completion to check if there is a termination pending,
+ * so we start it as completed and we reinit it when a termination
+ * is triggered.
+ */
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+
+ mutex_init(&pxp->arb_mutex);
+ INIT_WORK(&pxp->session_work, intel_pxp_session_work);
+
+ ret = create_vcs_context(pxp);
+ if (ret)
+ return;
+
+ ret = intel_pxp_tee_component_init(pxp);
+ if (ret)
+ goto out_context;
+
+ drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
+
+ return;
+
+out_context:
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_fini(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_tee_component_fini(pxp);
+
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
+{
+ pxp->arb_is_valid = false;
+ reinit_completion(&pxp->termination);
+}
+
+static void pxp_queue_termination(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We want to get the same effect as if we received a termination
+ * interrupt, so just pretend that we did.
+ */
+ spin_lock_irq(&gt->irq_lock);
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST;
+ queue_work(system_unbound_wq, &pxp->session_work);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+/*
+ * the arb session is restarted from the irq work when we receive the
+ * termination completion interrupt
+ */
+int intel_pxp_start(struct intel_pxp *pxp)
+{
+ int ret = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (pxp->arb_is_valid)
+ goto unlock;
+
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(250))) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* make sure the compiler doesn't optimize the double access */
+ barrier();
+
+ if (!pxp->arb_is_valid)
+ ret = -EIO;
+
+unlock:
+ mutex_unlock(&pxp->arb_mutex);
+ return ret;
+}
+
+void intel_pxp_init_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_enable(pxp_to_gt(pxp));
+ intel_pxp_irq_enable(pxp);
+}
+
+void intel_pxp_fini_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_disable(pxp_to_gt(pxp));
+
+ intel_pxp_irq_disable(pxp);
+}
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ if (!i915_gem_object_is_protected(obj))
+ return -EINVAL;
+
+ GEM_BUG_ON(!pxp->key_instance);
+
+ /*
+ * If this is the first time we're using this object, it's not
+ * encrypted yet; it will be encrypted with the current key, so mark it
+ * as such. If the object is already encrypted, check instead if the
+ * used key is still valid.
+ */
+ if (!obj->pxp_key_instance && assign)
+ obj->pxp_key_instance = pxp->key_instance;
+
+ if (obj->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+void intel_pxp_invalidate(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_gem_context *ctx, *cn;
+
+ /* ban all contexts marked as protected */
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ if (likely(!i915_gem_context_uses_protected_content(ctx))) {
+ i915_gem_context_put(ctx);
+ continue;
+ }
+
+ spin_unlock_irq(&i915->gem.contexts.lock);
+
+ /*
+ * By the time we get here we are either going to suspend with
+ * quiesced execution or the HW keys are already long gone and
+ * in this case it is worthless to attempt to close the context
+ * and wait for its execution. It will hang the GPU if it has
+ * not already. So, as a fast mitigation, we can ban the
+ * context as quick as we can. That might race with the
+ * execbuffer, but currently this is the best that can be done.
+ */
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ intel_context_ban(ce, NULL);
+ i915_gem_context_unlock_engines(ctx);
+
+ /*
+ * The context has been banned, no need to keep the wakeref.
+ * This is safe from races because the only other place this
+ * is touched is context_release and we're holding a ctx ref
+ */
+ if (ctx->pxp_wakeref) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ ctx->pxp_wakeref);
+ ctx->pxp_wakeref = 0;
+ }
+
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock_irq(&i915->gem.contexts.lock);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
new file mode 100644
index 000000000000..aa262258d4d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_H__
+#define __INTEL_PXP_H__
+
+#include "intel_pxp_types.h"
+
+struct drm_i915_gem_object;
+
+static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return pxp->ce;
+}
+
+#ifdef CONFIG_DRM_I915_PXP
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp);
+bool intel_pxp_is_active(const struct intel_pxp *pxp);
+
+void intel_pxp_init(struct intel_pxp *pxp);
+void intel_pxp_fini(struct intel_pxp *pxp);
+
+void intel_pxp_init_hw(struct intel_pxp *pxp);
+void intel_pxp_fini_hw(struct intel_pxp *pxp);
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+
+int intel_pxp_start(struct intel_pxp *pxp);
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign);
+
+void intel_pxp_invalidate(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_init(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_fini(struct intel_pxp *pxp)
+{
+}
+
+static inline int intel_pxp_start(struct intel_pxp *pxp)
+{
+ return -ENODEV;
+}
+
+static inline bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return false;
+}
+
+static inline int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __INTEL_PXP_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
new file mode 100644
index 000000000000..f41e45763d0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_trace.h"
+
+/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+
+static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
+{
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp off */
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* select session */
+ *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
+
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp on */
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+
+ *cs++ = MFX_WAIT_PXP;
+
+ return cs;
+}
+
+static u32 *pxp_emit_inline_termination(u32 *cs)
+{
+ /* session inline termination */
+ *cs++ = CRYPTO_KEY_EXCHANGE;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
+{
+ cs = pxp_emit_session_selection(cs, idx);
+ cs = pxp_emit_inline_termination(cs);
+
+ return cs;
+}
+
+static u32 *pxp_emit_wait(u32 *cs)
+{
+ /* wait for cmds to go through */
+ *cs++ = MFX_WAIT_PXP;
+ *cs++ = 0;
+
+ return cs;
+}
+
+/*
+ * if we ever need to terminate more than one session, we can submit multiple
+ * selections and terminations back-to-back with a single wait at the end
+ */
+#define SELECTION_LEN 10
+#define TERMINATION_LEN 2
+#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
+#define WAIT_LEN 2
+
+static void pxp_request_commit(struct i915_request *rq)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+ mutex_unlock(&tl->mutex);
+}
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
+{
+ struct i915_request *rq;
+ struct intel_context *ce = pxp->ce;
+ u32 *cs;
+ int err = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ cs = pxp_emit_session_termination(cs, id);
+ cs = pxp_emit_wait(cs);
+
+ intel_ring_advance(rq, cs);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ pxp_request_commit(rq);
+
+ if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
new file mode 100644
index 000000000000..6d6299543578
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_CMD_H__
+#define __INTEL_PXP_CMD_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 idx);
+
+#endif /* __INTEL_PXP_CMD_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
new file mode 100644
index 000000000000..10e1e45471f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "pxp/intel_pxp.h"
+#include "pxp/intel_pxp_irq.h"
+#include "i915_drv.h"
+
+static int pxp_info_show(struct seq_file *m, void *data)
+{
+ struct intel_pxp *pxp = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ bool enabled = intel_pxp_is_enabled(pxp);
+
+ if (!enabled) {
+ drm_printf(&p, "pxp disabled\n");
+ return 0;
+ }
+
+ drm_printf(&p, "active: %s\n", yesno(intel_pxp_is_active(pxp)));
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(pxp_info);
+
+static int pxp_terminate_get(void *data, u64 *val)
+{
+ /* nothing to read */
+ return -EPERM;
+}
+
+static int pxp_terminate_set(void *data, u64 val)
+{
+ struct intel_pxp *pxp = data;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(&gt->irq_lock);
+ intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(&gt->irq_lock);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n");
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *gt_root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "info", &pxp_info_fops, NULL },
+ { "terminate_state", &pxp_terminate_fops, NULL },
+ };
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ if (!HAS_PXP((pxp_to_gt(pxp)->i915)))
+ return;
+
+ root = debugfs_create_dir("pxp", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
new file mode 100644
index 000000000000..7e0c3d2f5d7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PXP_DEBUGFS_H__
+#define __INTEL_PXP_DEBUGFS_H__
+
+struct intel_pxp;
+struct dentry;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root);
+#else
+static inline void
+intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
new file mode 100644
index 000000000000..8d5553772ded
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_types.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "intel_runtime_pm.h"
+
+/**
+ * intel_pxp_irq_handler - Handles PXP interrupts.
+ * @pxp: pointer to pxp struct
+ * @iir: interrupt vector
+ */
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
+ return;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT |
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
+ /* immediately mark PXP as inactive on termination */
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ }
+
+ if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->session_events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->session_events)
+ queue_work(system_unbound_wq, &pxp->session_work);
+}
+
+static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 mask = interrupts << 16;
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask);
+}
+
+static inline void pxp_irq_reset(struct intel_gt *gt)
+{
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ spin_lock_irq(&gt->irq_lock);
+
+ if (!pxp->irq_enabled)
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
+
+ __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
+ pxp->irq_enabled = true;
+
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We always need to submit a global termination when we re-enable the
+ * interrupts, so there is no need to make sure that the session state
+ * makes sense at the end of this function. Just make sure this is not
+ * called in a path were the driver consider the session as valid and
+ * doesn't call a termination on restart.
+ */
+ GEM_WARN_ON(intel_pxp_is_active(pxp));
+
+ spin_lock_irq(&gt->irq_lock);
+
+ pxp->irq_enabled = false;
+ __pxp_set_interrupts(gt, 0);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ pxp_irq_reset(gt);
+
+ flush_work(&pxp->session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
new file mode 100644
index 000000000000..8b5793654844
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_IRQ_H__
+#define __INTEL_PXP_IRQ_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT BIT(1)
+#define GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT BIT(2)
+#define GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT BIT(3)
+
+#define GEN12_PXP_INTERRUPTS \
+ (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | \
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT | \
+ GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_irq_enable(struct intel_pxp *pxp);
+void intel_pxp_irq_disable(struct intel_pxp *pxp);
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir);
+#else
+static inline void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
new file mode 100644
index 000000000000..23fd86de5a24
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_pm.h"
+#include "intel_pxp_session.h"
+
+void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ /*
+ * Contexts using protected objects keep a runtime PM reference, so we
+ * can only runtime suspend when all of them have been either closed
+ * or banned. Therefore, there is no need to invalidate in that
+ * scenario.
+ */
+ if (!runtime)
+ intel_pxp_invalidate(pxp);
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
+
+void intel_pxp_resume(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ /*
+ * The PXP component gets automatically unbound when we go into S3 and
+ * re-bound after we come out, so in that scenario we can defer the
+ * hw init to the bind call.
+ */
+ if (!pxp->pxp_component)
+ return;
+
+ intel_pxp_init_hw(pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
new file mode 100644
index 000000000000..c89e97a0c3d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_PM_H__
+#define __INTEL_PXP_PM_H__
+
+#include "intel_pxp_types.h"
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
+void intel_pxp_resume(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+{
+}
+
+static inline void intel_pxp_resume(struct intel_pxp *pxp)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
new file mode 100644
index 000000000000..d02732f04757
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "drm/i915_drm.h"
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+
+#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */
+
+#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */
+
+/* PXP global terminate register for session termination */
+#define PXP_GLOBAL_TERMINATE _MMIO(0x320f8)
+
+static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 sip = 0;
+
+ /* if we're suspended the session is considered off */
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
+ sip = intel_uncore_read(uncore, GEN12_KCR_SIP);
+
+ return sip & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 mask = BIT(id);
+ int ret;
+
+ /* if we're suspended the session is considered off */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref)
+ return in_play ? -ENODEV : 0;
+
+ ret = intel_wait_for_register(uncore,
+ GEN12_KCR_SIP,
+ mask,
+ in_play ? mask : 0,
+ 100);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return ret;
+}
+
+static int pxp_create_arb_session(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ pxp->arb_is_valid = false;
+
+ if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ drm_err(&gt->i915->drm, "arb session already in play at creation time\n");
+ return -EEXIST;
+ }
+
+ ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_err(&gt->i915->drm, "arb session failed to go in play\n");
+ return ret;
+ }
+
+ if (!++pxp->key_instance)
+ ++pxp->key_instance;
+
+ pxp->arb_is_valid = true;
+
+ return 0;
+}
+
+static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /* must mark termination in progress calling this function */
+ GEM_WARN_ON(pxp->arb_is_valid);
+
+ /* terminate the hw sessions */
+ ret = intel_pxp_terminate_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to submit session termination\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Session state did not clear\n");
+ return ret;
+ }
+
+ intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+
+ return ret;
+}
+
+static void pxp_terminate(struct intel_pxp *pxp)
+{
+ int ret;
+
+ pxp->hw_state_invalidated = true;
+
+ /*
+ * if we fail to submit the termination there is no point in waiting for
+ * it to complete. PXP will be marked as non-active until the next
+ * termination is issued.
+ */
+ ret = pxp_terminate_arb_session_and_global(pxp);
+ if (ret)
+ complete_all(&pxp->termination);
+}
+
+static void pxp_terminate_complete(struct intel_pxp *pxp)
+{
+ /* Re-create the arb session after teardown handle complete */
+ if (fetch_and_zero(&pxp->hw_state_invalidated))
+ pxp_create_arb_session(pxp);
+
+ complete_all(&pxp->termination);
+}
+
+void intel_pxp_session_work(struct work_struct *work)
+{
+ struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ intel_wakeref_t wakeref;
+ u32 events = 0;
+
+ spin_lock_irq(&gt->irq_lock);
+ events = fetch_and_zero(&pxp->session_events);
+ spin_unlock_irq(&gt->irq_lock);
+
+ if (!events)
+ return;
+
+ if (events & PXP_INVAL_REQUIRED)
+ intel_pxp_invalidate(pxp);
+
+ /*
+ * If we're processing an event while suspending then don't bother,
+ * we're going to re-init everything on resume anyway.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
+ if (!wakeref)
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ pxp_terminate(pxp);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
new file mode 100644
index 000000000000..ba4c9d2b94b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_SESSION_H__
+#define __INTEL_PXP_SESSION_H__
+
+#include <linux/types.h>
+
+struct work_struct;
+
+void intel_pxp_session_work(struct work_struct *work);
+
+#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
new file mode 100644
index 000000000000..49508f31dcb7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <linux/component.h>
+#include "drm/i915_pxp_tee_interface.h"
+#include "drm/i915_component.h"
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_tee_interface.h"
+
+static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
+{
+ return &kdev_to_i915(i915_kdev)->gt.pxp;
+}
+
+static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
+ void *msg_in, u32 msg_in_size,
+ void *msg_out, u32 msg_out_max_size,
+ u32 *msg_out_rcv_size)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ int ret = 0;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ /*
+ * The binding of the component is asynchronous from i915 probe, so we
+ * can't be sure it has happened.
+ */
+ if (!pxp_component) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send PXP TEE message\n");
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
+ goto unlock;
+ }
+
+ if (ret > msg_out_max_size) {
+ drm_err(&i915->drm,
+ "Failed to receive PXP TEE message due to unexpected output size\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (msg_out_rcv_size)
+ *msg_out_rcv_size = ret;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+/**
+ * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
+ * @i915_kdev: pointer to i915 kernel device
+ * @tee_kdev: pointer to tee kernel device
+ * @data: pointer to pxp_tee_master containing the function pointers
+ *
+ * This bind function is called during the system boot or resume from system sleep.
+ *
+ * Return: return 0 if successful.
+ */
+static int i915_pxp_tee_component_bind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = data;
+ pxp->pxp_component->tee_dev = tee_kdev;
+ mutex_unlock(&pxp->tee_mutex);
+
+ /* if we are suspended, the HW will be re-initialized on resume */
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
+ if (!wakeref)
+ return 0;
+
+ /* the component is required to fully start the PXP HW */
+ intel_pxp_init_hw(pxp);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+
+ intel_pxp_fini_hw(pxp);
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = NULL;
+ mutex_unlock(&pxp->tee_mutex);
+}
+
+static const struct component_ops i915_pxp_tee_component_ops = {
+ .bind = i915_pxp_tee_component_bind,
+ .unbind = i915_pxp_tee_component_unbind,
+};
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct drm_i915_private *i915 = gt->i915;
+
+ ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
+ I915_COMPONENT_PXP);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
+ return ret;
+ }
+
+ pxp->pxp_component_added = true;
+
+ return 0;
+}
+
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+
+ if (!pxp->pxp_component_added)
+ return;
+
+ component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
+ pxp->pxp_component_added = false;
+}
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct pxp_tee_create_arb_in msg_in = {0};
+ struct pxp_tee_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_TEE_APIVER;
+ msg_in.header.command_id = PXP_TEE_ARB_CMDID;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP_TEE_ARB_PROTECTION_MODE;
+ msg_in.session_id = arb_session_id;
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
new file mode 100644
index 000000000000..c136053ce340
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_H__
+#define __INTEL_PXP_TEE_H__
+
+#include "intel_pxp.h"
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp);
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp);
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id);
+
+#endif /* __INTEL_PXP_TEE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h
new file mode 100644
index 000000000000..36e9b0868f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_INTERFACE_H__
+#define __INTEL_PXP_TEE_INTERFACE_H__
+
+#include <linux/types.h>
+
+#define PXP_TEE_APIVER 0x40002
+#define PXP_TEE_ARB_CMDID 0x1e
+#define PXP_TEE_ARB_PROTECTION_MODE 0x2
+
+/* PXP TEE message header */
+struct pxp_tee_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ u32 status;
+ /* Length of the message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* PXP TEE message input to create a arbitrary session */
+struct pxp_tee_create_arb_in {
+ struct pxp_tee_cmd_header header;
+ u32 protection_mode;
+ u32 session_id;
+} __packed;
+
+/* PXP TEE message output to create a arbitrary session */
+struct pxp_tee_create_arb_out {
+ struct pxp_tee_cmd_header header;
+} __packed;
+
+#endif /* __INTEL_PXP_TEE_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
new file mode 100644
index 000000000000..73ef7d1754e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TYPES_H__
+#define __INTEL_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_context;
+struct i915_pxp_component;
+
+/**
+ * struct intel_pxp - pxp state
+ */
+struct intel_pxp {
+ /**
+ * @pxp_component: i915_pxp_component struct of the bound mei_pxp
+ * module. Only set and cleared inside component bind/unbind functions,
+ * which are protected by &tee_mutex.
+ */
+ struct i915_pxp_component *pxp_component;
+ /**
+ * @pxp_component_added: track if the pxp component has been added.
+ * Set and cleared in tee init and fini functions respectively.
+ */
+ bool pxp_component_added;
+
+ /** @ce: kernel-owned context used for PXP operations */
+ struct intel_context *ce;
+
+ /** @arb_mutex: protects arb session start */
+ struct mutex arb_mutex;
+ /**
+ * @arb_is_valid: tracks arb session status.
+ * After a teardown, the arb session can still be in play on the HW
+ * even if the keys are gone, so we can't rely on the HW state of the
+ * session to know if it's valid and need to track the status in SW.
+ */
+ bool arb_is_valid;
+
+ /**
+ * @key_instance: tracks which key instance we're on, so we can use it
+ * to determine if an object was created using the current key or a
+ * previous one.
+ */
+ u32 key_instance;
+
+ /** @tee_mutex: protects the tee channel binding and messaging. */
+ struct mutex tee_mutex;
+
+ /**
+ * @hw_state_invalidated: if the HW perceives an attack on the integrity
+ * of the encryption it will invalidate the keys and expect SW to
+ * re-initialize the session. We keep track of this state to make sure
+ * we only re-start the arb session when required.
+ */
+ bool hw_state_invalidated;
+
+ /** @irq_enabled: tracks the status of the kcr irqs */
+ bool irq_enabled;
+ /**
+ * @termination: tracks the status of a pending termination. Only
+ * re-initialized under gt->irq_lock and completed in &session_work.
+ */
+ struct completion termination;
+
+ /** @session_work: worker that manages session events. */
+ struct work_struct session_work;
+ /** @session_events: pending session events, protected with gt->irq_lock. */
+ u32 session_events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+#define PXP_INVAL_REQUIRED BIT(2)
+};
+
+#endif /* __INTEL_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f843a5040706..46f4236039a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -155,7 +155,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = i915_ppgtt_create(&dev_priv->gt);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1053,7 +1053,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- ppgtt = i915_ppgtt_create(&dev_priv->gt);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_free;
@@ -1300,7 +1300,7 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
err = func(vm, 0, min(vm->total, limit), end_time);
i915_vm_put(vm);
@@ -1848,7 +1848,7 @@ static int igt_cs_tlb(void *arg)
goto out_unlock;
}
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
if (i915_is_ggtt(vm))
goto out_vm;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index cfa5c4165a4f..bdd290f2bf3c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -47,5 +47,7 @@ selftest(execlists, intel_execlists_live_selftests)
selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
selftest(slpc, intel_slpc_live_selftests)
+selftest(guc, intel_guc_live_selftests)
+selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index dd0607254a95..1f10fe36619b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -39,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != rcu_access_pointer(ctx->vm)) {
+ if (vma->vm != ctx->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -118,7 +118,7 @@ static int create_vmas(struct drm_i915_private *i915,
struct i915_vma *vma;
int err;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = checked_vma_instance(obj, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma))
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 4b328346b48a..310fb83c527e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -14,6 +14,18 @@
#define REDUCED_PREEMPT 10
#define WAIT_FOR_RESET_TIME 10000
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ return engine;
+
+ pr_err("No valid engine found!\n");
+ return NULL;
+}
+
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
u32 modify_type)
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
index 35c098601ac0..ae60bb507f45 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
@@ -10,6 +10,7 @@
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
struct intel_selftest_saved_policy {
u32 flags;
@@ -23,6 +24,7 @@ enum selftest_scheduler_modify {
SELFTEST_SCHEDULER_MODIFY_FAST_RESET,
};
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt);
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
enum selftest_scheduler_modify modify_type);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 720b60853f8b..bc8128170a99 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -62,30 +62,40 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
struct {
- const i915_reg_t *regs;
+ const struct i915_range *regs;
unsigned int size;
- } reg_lists[] = {
+ } range_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
- { xehp_shadowed_regs, ARRAY_SIZE(xehp_shadowed_regs) },
+ { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
};
- const i915_reg_t *reg;
+ const struct i915_range *range;
unsigned int i, j;
s32 prev;
- for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
- reg = reg_lists[j].regs;
- for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
- u32 offset = i915_mmio_reg_offset(*reg);
+ for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
+ range = range_lists[j].regs;
+ for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
+ if (range->end < range->start) {
+ pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
+ __func__, i, range->start, range->end);
+ return -EINVAL;
+ }
+
+ if (prev >= (s32)range->start) {
+ pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
+ __func__, i, range->start, range->end, prev);
+ return -EINVAL;
+ }
- if (prev >= (s32)offset) {
- pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
- __func__, i, offset, prev);
+ if (range->start % 4) {
+ pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
+ __func__, i, range->start, range->end);
return -EINVAL;
}
- prev = offset;
+ prev = range->end;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index efa86dffe3c6..75793008c4ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -6,8 +6,6 @@
#include <drm/ttm/ttm_placement.h>
#include <linux/scatterlist.h>
-#include <drm/ttm/ttm_placement.h>
-
#include "gem/i915_gem_region.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
new file mode 100644
index 000000000000..35380738a951
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#include <asm/iosf_mbi.h>
+
+#include "i915_drv.h"
+#include "vlv_sideband.h"
+
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
+
+/* Standard MMIO read, non-posted */
+#define SB_MRD_NP 0x00
+/* Standard MMIO write, non-posted */
+#define SB_MWR_NP 0x01
+/* Private register read, double-word addressing, non-posted */
+#define SB_CRRDDA_NP 0x06
+/* Private register write, double-word addressing, non-posted */
+#define SB_CRWRDA_NP 0x07
+
+static void ping(void *info)
+{
+}
+
+static void __vlv_punit_get(struct drm_i915_private *i915)
+{
+ iosf_mbi_punit_acquire();
+
+ /*
+ * Prevent the cpu from sleeping while we use this sideband, otherwise
+ * the punit may cause a machine hang. The issue appears to be isolated
+ * with changing the power state of the CPU package while changing
+ * the power state via the punit, and we have only observed it
+ * reliably on 4-core Baytail systems suggesting the issue is in the
+ * power delivery mechanism and likely to be board/function
+ * specific. Hence we presume the workaround needs only be applied
+ * to the Valleyview P-unit and not all sideband communications.
+ */
+ if (IS_VALLEYVIEW(i915)) {
+ cpu_latency_qos_update_request(&i915->sb_qos, 0);
+ on_each_cpu(ping, NULL, 1);
+ }
+}
+
+static void __vlv_punit_put(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_update_request(&i915->sb_qos,
+ PM_QOS_DEFAULT_VALUE);
+
+ iosf_mbi_punit_release();
+}
+
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
+{
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_get(i915);
+
+ mutex_lock(&i915->sb_lock);
+}
+
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+{
+ mutex_unlock(&i915->sb_lock);
+
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_put(i915);
+}
+
+static int vlv_sideband_rw(struct drm_i915_private *i915,
+ u32 devfn, u32 port, u32 opcode,
+ u32 addr, u32 *val)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+ int err;
+
+ lockdep_assert_held(&i915->sb_lock);
+ if (port == IOSF_PORT_PUNIT)
+ iosf_mbi_assert_punit_acquired();
+
+ /* Flush the previous comms, just in case it failed last time. */
+ if (intel_wait_for_register(uncore,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 5)) {
+ drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ return -EAGAIN;
+ }
+
+ preempt_disable();
+
+ intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
+ (devfn << IOSF_DEVFN_SHIFT) |
+ (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) |
+ (0xf << IOSF_BYTE_ENABLES_SHIFT) |
+ (0 << IOSF_BAR_SHIFT) |
+ IOSF_SB_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 10000, 0, NULL) == 0) {
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
+ err = 0;
+ } else {
+ drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ err = -ETIMEDOUT;
+ }
+
+ preempt_enable();
+
+ return err;
+}
+
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRRDDA_NP, addr, &val);
+
+ return val;
+}
+
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
+{
+ return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
+}
+
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+ SB_CRRDDA_NP, addr, &val);
+
+ return val;
+}
+
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
+ u8 port, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
+{
+ /*
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(i915))
+ return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
+ else
+ return IOSF_PORT_DPIO;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
+{
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ drm_WARN(&i915->drm, val == 0xffffffff,
+ "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *i915,
+ enum pipe pipe, int reg, u32 val)
+{
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index d1d14bcb8f56..d7732f612e7f 100644
--- a/drivers/gpu/drm/i915/intel_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -1,18 +1,16 @@
/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
-#ifndef _INTEL_SIDEBAND_H_
-#define _INTEL_SIDEBAND_H_
+#ifndef _VLV_SIDEBAND_H_
+#define _VLV_SIDEBAND_H_
#include <linux/bitops.h>
#include <linux/types.h>
-struct drm_i915_private;
enum pipe;
-
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
-};
+struct drm_i915_private;
enum {
VLV_IOSF_SB_BUNIT,
@@ -122,22 +120,4 @@ static inline void vlv_punit_put(struct drm_i915_private *i915)
vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
}
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-
-int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
- u32 *val, u32 *val1);
-int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
- u32 val, int fast_timeout_us,
- int slow_timeout_ms);
-#define sandybridge_pcode_write(i915, mbox, val) \
- sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
-
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
-
-int intel_pcode_init(struct drm_i915_private *i915);
-
-#endif /* _INTEL_SIDEBAND_H */
+#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c
index 44327bc629ca..06613ffeaaf8 100644
--- a/drivers/gpu/drm/kmb/kmb_crtc.c
+++ b/drivers/gpu/drm/kmb/kmb_crtc.c
@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
.disable_vblank = kmb_crtc_disable_vblank,
};
-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
+static void kmb_crtc_set_mode(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *m = &crtc->state->adjusted_mode;
@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
unsigned int val = 0;
/* Initialize mipi */
- kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
+ kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
drm_info(dev,
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
m->crtc_vsync_start - m->crtc_vdisplay,
@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
- kmb_crtc_set_mode(crtc);
+ kmb_crtc_set_mode(crtc, state);
drm_crtc_vblank_on(crtc);
}
@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
}
+static enum drm_mode_status
+ kmb_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int refresh;
+ struct drm_device *dev = crtc->dev;
+ int vfp = mode->vsync_start - mode->vdisplay;
+
+ if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
+ drm_dbg(dev, "height = %d less than %d",
+ mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
+ return MODE_BAD_VVALUE;
+ }
+ if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
+ drm_dbg(dev, "width = %d less than %d",
+ mode->hdisplay, KMB_CRTC_MAX_WIDTH);
+ return MODE_BAD_HVALUE;
+ }
+ refresh = drm_mode_vrefresh(mode);
+ if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
+ drm_dbg(dev, "refresh = %d less than %d or greater than %d",
+ refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
+ return MODE_BAD;
+ }
+
+ if (vfp < KMB_CRTC_MIN_VFP) {
+ drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
.atomic_begin = kmb_crtc_atomic_begin,
.atomic_enable = kmb_crtc_atomic_enable,
.atomic_disable = kmb_crtc_atomic_disable,
.atomic_flush = kmb_crtc_atomic_flush,
+ .mode_valid = kmb_crtc_mode_valid,
};
int kmb_setup_crtc(struct drm_device *drm)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 12ce669650cc..961ac6fb5fcf 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
- if (val & LAYER3_DMA_FIFO_UNDERFLOW)
+ if (val & LAYER3_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index 69a62e2d03ff..bf085e95b28f 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -20,11 +20,18 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
+/* Platform definitions */
+#define KMB_CRTC_MIN_VFP 4
+#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
+#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
+#define KMB_CRTC_MIN_WIDTH 1920
+#define KMB_CRTC_MIN_HEIGHT 1080
#define KMB_FB_MAX_WIDTH 1920
#define KMB_FB_MAX_HEIGHT 1080
#define KMB_FB_MIN_WIDTH 1
#define KMB_FB_MIN_HEIGHT 1
-
+#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
+#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
@@ -50,6 +57,7 @@ struct kmb_drm_private {
spinlock_t irq_lock;
int irq_lcd;
int sys_clk_mhz;
+ struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
struct layer_status plane_status[KMB_MAX_PLANES];
int kmb_under_flow;
int kmb_flush_done;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index 1793cd31b117..f6071882054c 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
return 0;
}
+#define CLK_DIFF_LOW 50
+#define CLK_DIFF_HI 60
+#define SYSCLK_500 500
+
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
struct mipi_tx_frame_timing_cfg *fg_cfg)
{
@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
/* 500 Mhz system clock minus 50 to account for the difference in
* MIPI clock speed in RTL tests
*/
- sysclk = kmb_dsi->sys_clk_mhz - 50;
+ if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
+ } else {
+ /* 700 Mhz clk*/
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
+ }
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
* Frame genartor timing parameters are clocked on the system clock,
@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
return 0;
}
-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
+static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
+ struct drm_atomic_state *old_state)
{
struct regmap *msscam;
@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
return;
}
-
+ drm_atomic_bridge_chain_enable(adv_bridge, old_state);
/* DISABLE MIPI->CIF CONNECTION */
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
}
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz)
+ int sys_clk_mhz, struct drm_atomic_state *old_state)
{
u64 data_rate;
@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
}
- kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
-
/* Initialize mipi controller */
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
/* Dphy initialization */
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
- connect_lcd_to_mipi(kmb_dsi);
+ connect_lcd_to_mipi(kmb_dsi, old_state);
dev_info(kmb_dsi->dev, "mipi hw initialized");
return 0;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h
index 66b7c500d9bc..09dc88743d77 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.h
+++ b/drivers/gpu/drm/kmb/kmb_dsi.h
@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz);
+ int sys_clk_mhz, struct drm_atomic_state *old_state);
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 06b0c42c9e91..00404ba4126d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
{
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int i;
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
+ /* Due to HW limitations, changing pixel format after initial
+ * plane configuration is not supported.
+ */
+ if (init_disp_cfg.format && init_disp_cfg.format != format) {
+ drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
+ return -EINVAL;
+ }
for (i = 0; i < plane->format_count; i++) {
if (plane->format_types[i] == format)
return 0;
@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
fb = new_plane_state->fb;
if (!fb || !new_plane_state->crtc)
return 0;
@@ -99,6 +118,16 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
return -EINVAL;
+
+ /* Due to HW limitations, changing plane height or width after
+ * initial plane configuration is not supported.
+ */
+ if ((init_disp_cfg.width && init_disp_cfg.height) &&
+ (init_disp_cfg.width != fb->width ||
+ init_disp_cfg.height != fb->height)) {
+ drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
+ return -EINVAL;
+ }
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
drm_atomic_get_existing_crtc_state(state,
@@ -335,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
unsigned char plane_id;
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
+ struct disp_cfg *init_disp_cfg;
if (!plane || !new_plane_state || !old_plane_state)
return;
@@ -357,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
}
spin_unlock_irq(&kmb->irq_lock);
- src_w = (new_plane_state->src_w >> 16);
+ init_disp_cfg = &kmb->init_disp_cfg[plane_id];
+ src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
crtc_x = new_plane_state->crtc_x;
crtc_y = new_plane_state->crtc_y;
@@ -500,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
/* Enable DMA */
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
+
+ /* Save initial display config */
+ if (!init_disp_cfg->width ||
+ !init_disp_cfg->height ||
+ !init_disp_cfg->format) {
+ init_disp_cfg->width = width;
+ init_disp_cfg->height = height;
+ init_disp_cfg->format = fb->format->format;
+ }
+
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h
index 6e8d22cf8819..b51144044fe8 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.h
+++ b/drivers/gpu/drm/kmb/kmb_plane.h
@@ -63,6 +63,12 @@ struct layer_status {
u32 ctrl;
};
+struct disp_cfg {
+ unsigned int width;
+ unsigned int height;
+ unsigned int format;
+};
+
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
void kmb_plane_destroy(struct drm_plane *plane);
#endif /* __KMB_PLANE_H__ */
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index de62966243cd..640acc060467 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
+ return drm_sched_job_add_implicit_dependencies(&task->base,
+ &bo->base.base,
+ write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
if (err)
return err;
- err = drm_gem_fence_array_add(&submit->task->deps, fence);
+ err = drm_sched_job_add_dependency(&submit->task->base, fence);
if (err) {
dma_fence_put(fence);
return err;
@@ -359,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
goto err_out2;
}
- fence = lima_sched_context_queue_task(
- submit->ctx->context + submit->pipe, submit->task);
+ fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index dba8329937a3..99d5f6f1a882 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -129,27 +129,20 @@ int lima_sched_task_init(struct lima_sched_task *task,
return err;
}
+ drm_sched_job_arm(&task->base);
+
task->num_bos = num_bos;
task->vm = lima_vm_get(vm);
- xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
-
return 0;
}
void lima_sched_task_fini(struct lima_sched_task *task)
{
- struct dma_fence *fence;
- unsigned long index;
int i;
drm_sched_job_cleanup(&task->base);
- xa_for_each(&task->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&task->deps);
-
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
drm_gem_object_put(&task->bos[i]->base.base);
@@ -175,27 +168,15 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe,
drm_sched_entity_fini(&context->base);
}
-struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
- struct lima_sched_task *task)
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
trace_lima_task_submit(task);
- drm_sched_entity_push_job(&task->base, &context->base);
+ drm_sched_entity_push_job(&task->base);
return fence;
}
-static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *entity)
-{
- struct lima_sched_task *task = to_lima_task(job);
-
- if (!xa_empty(&task->deps))
- return xa_erase(&task->deps, task->last_dep++);
-
- return NULL;
-}
-
static int lima_pm_busy(struct lima_device *ldev)
{
int ret;
@@ -471,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job)
}
static const struct drm_sched_backend_ops lima_sched_ops = {
- .dependency = lima_sched_dependency,
.run_job = lima_sched_run_job,
.timedout_job = lima_sched_timedout_job,
.free_job = lima_sched_free_job,
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 90f03c48ef4a..6a11764d87b3 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -23,9 +23,6 @@ struct lima_sched_task {
struct lima_vm *vm;
void *frame;
- struct xarray deps;
- unsigned long last_dep;
-
struct lima_bo **bos;
int num_bos;
@@ -98,8 +95,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
atomic_t *guilty);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
-struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
- struct lima_sched_task *task);
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index e60566a5739c..5b5afc6aaf8e 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -276,7 +276,6 @@ static int mcde_probe(struct platform_device *pdev)
struct drm_device *drm;
struct mcde *mcde;
struct component_match *match = NULL;
- struct resource *res;
u32 pid;
int irq;
int ret;
@@ -344,8 +343,7 @@ static int mcde_probe(struct platform_device *pdev)
goto clk_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mcde->regs = devm_ioremap_resource(dev, res);
+ mcde->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcde->regs)) {
dev_err(dev, "no MCDE regs\n");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 180ebbccbeda..5651734ce977 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1169,7 +1169,6 @@ static int mcde_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mcde_dsi *d;
struct mipi_dsi_host *host;
- struct resource *res;
u32 dsi_id;
int ret;
@@ -1187,8 +1186,7 @@ static int mcde_dsi_probe(struct platform_device *pdev)
return PTR_ERR(d->prcmu);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- d->regs = devm_ioremap_resource(dev, res);
+ d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(d->regs))
return PTR_ERR(d->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 93b40c245f00..5d90d2eb0019 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -11,6 +11,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
struct mtk_dsi *dsi = dev_get_drvdata(dev);
ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
- return ret;
+ return device_reset_optional(dev);
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index bc0d60df04ae..7f41a33592c8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -206,8 +206,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->compat = match->compat;
priv->afbcd.ops = match->afbcd_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto free_drm;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 2ed87cfdd735..0afbd1e70bfc 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -978,7 +978,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct dw_hdmi_plat_data *dw_plat_data;
struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
- struct resource *res;
int irq;
int ret;
@@ -1042,8 +1041,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
+ meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 4fd4de16cd32..894472921c30 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -38,16 +38,18 @@
typedef struct drm32_mga_init {
int func;
u32 sarea_priv_offset;
- int chipset;
- int sgram;
- unsigned int maccess;
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ struct_group(always32bit,
+ int chipset;
+ int sgram;
+ unsigned int maccess;
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ );
u32 fb_offset;
u32 mmio_offset;
u32 status_offset;
@@ -67,9 +69,8 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
init.func = init32.func;
init.sarea_priv_offset = init32.sarea_priv_offset;
- memcpy(&init.chipset, &init32.chipset,
- offsetof(drm_mga_init_t, fb_offset) -
- offsetof(drm_mga_init_t, chipset));
+ memcpy(&init.always32bit, &init32.always32bit,
+ sizeof(init32.always32bit));
init.fb_offset = init32.fb_offset;
init.mmio_offset = init32.mmio_offset;
init.status_offset = init32.status_offset;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 196f74a0834e..4368112023f7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -224,8 +224,6 @@ struct mga_device {
enum mga_type type;
- int fb_mtrr;
-
union {
struct {
long ref_clk;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index b667371b69a4..fa996d46feed 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -75,26 +75,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
return offset - 65536;
}
-static void mgag200_mm_release(struct drm_device *dev, void *ptr)
-{
- struct mga_device *mdev = to_mga_device(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- mdev->vram_fb_available = 0;
- iounmap(mdev->vram);
- arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- arch_phys_wc_del(mdev->fb_mtrr);
- mdev->fb_mtrr = 0;
-}
-
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 misc;
resource_size_t start, len;
- int ret;
WREG_ECRT(0x04, 0x00);
@@ -112,15 +98,13 @@ int mgag200_mm_init(struct mga_device *mdev)
return -ENXIO;
}
- arch_io_reserve_memtype_wc(start, len);
-
- mdev->fb_mtrr = arch_phys_wc_add(start, len);
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, start, len);
+ devm_arch_phys_wc_add(dev->dev, start, len);
- mdev->vram = ioremap(start, len);
- if (!mdev->vram) {
- ret = -ENOMEM;
- goto err_arch_phys_wc_del;
- }
+ mdev->vram = devm_ioremap(dev->dev, start, len);
+ if (!mdev->vram)
+ return -ENOMEM;
mdev->mc.vram_size = mgag200_probe_vram(mdev, mdev->vram, len);
mdev->mc.vram_base = start;
@@ -128,10 +112,5 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->vram_fb_available = mdev->mc.vram_size;
- return drmm_add_action_or_reset(dev, mgag200_mm_release, NULL);
-
-err_arch_phys_wc_del:
- arch_phys_wc_del(mdev->fb_mtrr);
- arch_io_free_memtype_wc(start, len);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 3ddf739a6f9b..ae11061727ff 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,9 +3,9 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on IOMMU_SUPPORT
- depends on OF && COMMON_CLK
+ depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
@@ -14,6 +14,8 @@ config DRM_MSM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
select DRM_SCHED
select SHMEM
select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 904535eda0c4..40577f8856d8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -51,7 +51,6 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
- disp/dpu1/dpu_core_irq.o \
disp/dpu1/dpu_core_perf.o \
disp/dpu1/dpu_crtc.o \
disp/dpu1/dpu_encoder.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index c9d11d57aed6..dd593ec2bc56 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -138,7 +138,7 @@ reset_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
@@ -154,6 +154,6 @@ void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
ARRAY_SIZE(a5xx_debugfs_list),
minor->debugfs_root, minor);
- debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
- &reset_fops);
+ debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev,
+ &reset_fops);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 8b73f70766a4..71e52b2b2025 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -516,11 +516,11 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
- void __iomem *seqptr;
+ void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
bool pdc_in_aop = false;
- if (!pdcptr)
+ if (IS_ERR(pdcptr))
goto err;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
@@ -532,7 +532,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (!pdc_in_aop) {
seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
- if (!seqptr)
+ if (IS_ERR(seqptr))
goto err;
}
@@ -891,7 +891,7 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
- if (IS_ERR_OR_NULL(gpu_opp))
+ if (IS_ERR(gpu_opp))
return;
gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
@@ -905,7 +905,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
- if (IS_ERR_OR_NULL(gpu_opp))
+ if (IS_ERR(gpu_opp))
return;
dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 33da25b81615..267a880811d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1838,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (info && (info->revn == 618))
+ gpu->clamp_to_idle = true;
+
a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index e8f65cd8eca6..7501849ed15d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -180,7 +180,7 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
msm_readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
-static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
deleted file mode 100644
index d2457490930b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/debugfs.h>
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_core_irq.h"
-#include "dpu_trace.h"
-
-/**
- * dpu_core_irq_callback_handler - dispatch core interrupts
- * @arg: private data of callback handler
- * @irq_idx: interrupt index
- */
-static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
-{
- struct dpu_kms *dpu_kms = arg;
- struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
- struct dpu_irq_callback *cb;
-
- VERB("irq_idx=%d\n", irq_idx);
-
- if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
- DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
-
- atomic_inc(&irq_obj->irq_counts[irq_idx]);
-
- /*
- * Perform registered function callback
- */
- list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
- if (cb->func)
- cb->func(cb->arg, irq_idx);
-}
-
-u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
-{
- if (!dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.get_interrupt_status)
- return 0;
-
- if (irq_idx < 0) {
- DPU_ERROR("[%pS] invalid irq_idx=%d\n",
- __builtin_return_address(0), irq_idx);
- return 0;
- }
-
- return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
- irq_idx, clear);
-}
-
-int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
-{
- unsigned long irq_flags;
-
- if (!dpu_kms->irq_obj.irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
-
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
-
- irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
- trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- list_add_tail(&register_irq_cb->list,
- &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
- if (list_is_first(&register_irq_cb->list,
- &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
- int ret = dpu_kms->hw_intr->ops.enable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
- irq_idx);
- }
- dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
-
- return 0;
-}
-
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
-{
- unsigned long irq_flags;
-
- if (!dpu_kms->irq_obj.irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
-
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
-
- irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
- trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- /* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
- int ret = dpu_kms->hw_intr->ops.disable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
- dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
-
- return 0;
-}
-
-static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
-{
- if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
- return;
-
- dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
-}
-
-static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
-{
- if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
- return;
-
- dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
-{
- struct dpu_kms *dpu_kms = s->private;
- struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
- struct dpu_irq_callback *cb;
- unsigned long irq_flags;
- int i, irq_count, cb_count;
-
- if (WARN_ON(!irq_obj->irq_cb_tbl))
- return 0;
-
- for (i = 0; i < irq_obj->total_irqs; i++) {
- irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
- cb_count = 0;
- irq_count = atomic_read(&irq_obj->irq_counts[i]);
- list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
- cb_count++;
- dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
-
- if (irq_count || cb_count)
- seq_printf(s, "idx:%d irq:%d cb:%d\n",
- i, irq_count, cb_count);
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
-
-void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
- struct dentry *parent)
-{
- debugfs_create_file("core_irq", 0600, parent, dpu_kms,
- &dpu_debugfs_core_irq_fops);
-}
-#endif
-
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
-{
- int i;
-
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- dpu_clear_all_irqs(dpu_kms);
- dpu_disable_all_irqs(dpu_kms);
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- /* Create irq callbacks for all possible irq_idx */
- dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
- dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
- sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
- for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
- INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
- atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
- }
-}
-
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
-{
- int i;
-
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
- if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
- DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
-
- dpu_clear_all_irqs(dpu_kms);
- dpu_disable_all_irqs(dpu_kms);
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- kfree(dpu_kms->irq_obj.irq_cb_tbl);
- kfree(dpu_kms->irq_obj.irq_counts);
- dpu_kms->irq_obj.irq_cb_tbl = NULL;
- dpu_kms->irq_obj.irq_counts = NULL;
- dpu_kms->irq_obj.total_irqs = 0;
-}
-
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
-{
- /*
- * Dispatch to HW driver to handle interrupt lookup that is being
- * fired. When matching interrupt is located, HW driver will call to
- * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
- * dpu_core_irq_callback_handler will perform the registered function
- * callback, and do the interrupt status clearing once the registered
- * callback is finished.
- * Function will also clear the interrupt status after reading.
- */
- dpu_kms->hw_intr->ops.dispatch_irqs(
- dpu_kms->hw_intr,
- dpu_core_irq_callback_handler,
- dpu_kms);
-
- return IRQ_HANDLED;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 768012243b44..967245b8cc02 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -70,17 +70,147 @@ static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
return NULL;
}
-static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
{
- struct drm_encoder *encoder;
+ if (!src_name ||
+ !strcmp(src_name, "none"))
+ return DPU_CRTC_CRC_SOURCE_NONE;
+ if (!strcmp(src_name, "auto") ||
+ !strcmp(src_name, "lm"))
+ return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
+
+ return DPU_CRTC_CRC_SOURCE_INVALID;
+}
- encoder = get_encoder_from_crtc(crtc);
+static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name, size_t *values_cnt)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ *values_cnt = crtc_state->num_mixers;
+
+ return 0;
+}
+
+static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ enum dpu_crtc_crc_source current_source;
+ struct dpu_crtc_state *crtc_state;
+ struct drm_device *drm_dev = crtc->dev;
+ struct dpu_crtc_mixer *m;
+
+ bool was_enabled;
+ bool enable = false;
+ int i, ret = 0;
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ ret = drm_modeset_lock(&crtc->mutex, NULL);
+
+ if (ret)
+ return ret;
+
+ enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ spin_lock_irq(&drm_dev->event_lock);
+ current_source = crtc_state->crc_source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
+
+ if (!was_enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+
+ if (ret)
+ goto cleanup;
+
+ } else if (was_enabled && !enable) {
+ drm_crtc_vblank_put(crtc);
+ }
+
+ spin_lock_irq(&drm_dev->event_lock);
+ crtc_state->crc_source = source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ crtc_state->crc_frame_skip_count = 0;
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ /* Calculate MISR over 1 frame */
+ m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
+ }
+
+
+cleanup:
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
- return false;
+ return 0;
+ }
+
+ return dpu_encoder_get_vsync_count(encoder);
+}
+
+
+static int dpu_crtc_get_crc(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state;
+ struct dpu_crtc_mixer *m;
+ u32 crcs[CRTC_DUAL_MIXERS];
+
+ int i = 0;
+ int rc = 0;
+
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
+
+ /* Skip first 2 frames in case of "uncooked" CRCs */
+ if (crtc_state->crc_frame_skip_count < 2) {
+ crtc_state->crc_frame_skip_count++;
+ return 0;
}
- return dpu_encoder_get_frame_count(encoder);
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
+
+ if (rc) {
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+ return rc;
+ }
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
}
static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
@@ -389,6 +519,9 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
dpu_crtc->vblank_cb_time = ktime_get();
else
dpu_crtc->vblank_cb_count++;
+
+ dpu_crtc_get_crc(crtc);
+
drm_crtc_handle_vblank(crtc);
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
@@ -1332,6 +1465,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.atomic_destroy_state = dpu_crtc_destroy_state,
.late_register = dpu_crtc_late_register,
.early_unregister = dpu_crtc_early_unregister,
+ .verify_crc_source = dpu_crtc_verify_crc_source,
+ .set_crc_source = dpu_crtc_set_crc_source,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index cec3474340e8..ae9546ca1359 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -70,6 +70,19 @@ struct dpu_crtc_smmu_state_data {
};
/**
+ * enum dpu_crtc_crc_source: CRC source
+ * @DPU_CRTC_CRC_SOURCE_NONE: no source set
+ * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer
+ * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source
+ */
+enum dpu_crtc_crc_source {
+ DPU_CRTC_CRC_SOURCE_NONE = 0,
+ DPU_CRTC_CRC_SOURCE_LAYER_MIXER,
+ DPU_CRTC_CRC_SOURCE_MAX,
+ DPU_CRTC_CRC_SOURCE_INVALID = -1
+};
+
+/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
@@ -139,6 +152,7 @@ struct dpu_crtc_frame_event {
* @event_lock : Spinlock around event handling code
* @phandle: Pointer to power handler
* @cur_perf : current performance committed to clock/bandwidth driver
+ * @crc_source : CRC source
*/
struct dpu_crtc {
struct drm_crtc base;
@@ -210,6 +224,9 @@ struct dpu_crtc_state {
u32 num_ctls;
struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+
+ enum dpu_crtc_crc_source crc_source;
+ int crc_frame_skip_count;
};
#define to_dpu_crtc_state(x) \
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 0e9d3fa1544b..e7ee4cfb8461 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -168,6 +168,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
+ * @dp: msm_dp pointer, for DP encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -206,6 +207,8 @@ struct dpu_encoder_virt {
struct msm_display_topology topology;
u32 idle_timeout;
+
+ struct msm_dp *dp;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -395,19 +398,11 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
return 0;
}
-int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc)
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc;
- struct dpu_encoder_phys *phys;
- int framecount = 0;
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- phys = dpu_enc ? dpu_enc->cur_master : NULL;
-
- if (phys && phys->ops.get_frame_count)
- framecount = phys->ops.get_frame_count(phys);
-
- return framecount;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
+ return phys ? atomic_read(&phys->vsync_cnt) : 0;
}
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
@@ -1000,8 +995,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
- msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS)
+ msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode);
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
@@ -1182,9 +1177,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- ret = msm_dp_display_enable(priv->dp,
- drm_enc);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ ret = msm_dp_display_enable(dpu_enc->dp, drm_enc);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
ret);
@@ -1224,8 +1218,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- if (msm_dp_display_pre_disable(priv->dp, drm_enc))
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
}
@@ -1253,8 +1247,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- if (msm_dp_display_disable(priv->dp, drm_enc))
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ if (msm_dp_display_disable(dpu_enc->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
}
@@ -2170,7 +2164,8 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
-
+ else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
+ dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]];
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 99a5d73c9b88..e241914a9677 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -163,9 +163,9 @@ void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
/**
- * dpu_encoder_get_frame_count - get interface frame count for the encoder.
+ * dpu_encoder_get_vsync_count - get vsync count for the encoder.
* @drm_enc: Pointer to previously created drm encoder structure
*/
-int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc);
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index aa01698d6b25..34a6940d12c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -42,7 +42,7 @@
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
- return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+ return (phys_enc->split_role != ENC_ROLE_SLAVE);
}
static bool dpu_encoder_phys_cmd_mode_fixup(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 700d65e39feb..ce6f32a919e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -844,7 +844,7 @@ static const struct dpu_intf_cfg sdm845_intf[] = {
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
};
@@ -958,12 +958,6 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
- .core_ib_ff = "6.0",
- .core_clk_ff = "1.0",
- .comp_ratio_rt =
- "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
- .comp_ratio_nrt =
- "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
.undersized_prefill_lines = 2,
.xtra_prefill_lines = 2,
.dest_scale_prefill_lines = 3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index d2a945a27cfa..4ade44bbd37e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -676,10 +676,6 @@ struct dpu_perf_cdp_cfg {
* @min_core_ib minimum mnoc ib vote in kbps
* @min_llcc_ib minimum llcc ib vote in kbps
* @min_dram_ib minimum dram ib vote in kbps
- * @core_ib_ff core instantaneous bandwidth fudge factor
- * @core_clk_ff core clock fudge factor
- * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
- * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
* @undersized_prefill_lines undersized prefill in lines
* @xtra_prefill_lines extra prefill latency in lines
* @dest_scale_prefill_lines destination scaler latency in lines
@@ -702,10 +698,6 @@ struct dpu_perf_cfg {
u32 min_core_ib;
u32 min_llcc_ib;
u32 min_dram_ib;
- const char *core_ib_ff;
- const char *core_clk_ff;
- const char *comp_ratio_rt;
- const char *comp_ratio_nrt;
u32 undersized_prefill_lines;
u32 xtra_prefill_lines;
u32 dest_scale_prefill_lines;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 2e816f232e85..d2b6dca487e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -3,12 +3,15 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/slab.h>
+#include "dpu_core_irq.h"
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
+#include "dpu_trace.h"
/**
* Register offsets in MDSS register file for the interrupt registers
@@ -117,25 +120,33 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
-static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
- int irq_idx)
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
- int reg_idx;
+ struct dpu_irq_callback *cb;
- if (!intr)
- return;
+ VERB("irq_idx=%d\n", irq_idx);
- reg_idx = DPU_IRQ_REG(irq_idx);
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx));
+ if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
+ DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
- /* ensure register writes go through */
- wmb();
+ atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
}
-static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
- void (*cbfunc)(void *, int),
- void *arg)
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
u32 irq_status;
@@ -144,13 +155,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
unsigned long irq_flags;
if (!intr)
- return;
+ return IRQ_NONE;
- /*
- * The dispatcher will save the IRQ status before calling here.
- * Now need to go through each IRQ status and find matching
- * irq lookup index.
- */
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
if (!test_bit(reg_idx, &intr->irq_mask))
@@ -178,17 +184,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
*/
while ((bit = ffs(irq_status)) != 0) {
irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
- /*
- * Once a match on irq mask, perform a callback
- * to the given cbfunc. cbfunc will take care
- * the interrupt status clearing. If cbfunc is
- * not provided, then the interrupt clearing
- * is here.
- */
- if (cbfunc)
- cbfunc(arg, irq_idx);
- dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ dpu_core_irq_callback_handler(dpu_kms, irq_idx);
/*
* When callback finish, clear the irq_status
@@ -203,6 +200,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
wmb();
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return IRQ_HANDLED;
}
static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
@@ -303,12 +302,13 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
return 0;
}
-static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int i;
if (!intr)
- return -EINVAL;
+ return;
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (test_bit(i, &intr->irq_mask))
@@ -318,16 +318,15 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
/* ensure register writes go through */
wmb();
-
- return 0;
}
-static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int i;
if (!intr)
- return -EINVAL;
+ return;
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (test_bit(i, &intr->irq_mask))
@@ -337,13 +336,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
/* ensure register writes go through */
wmb();
-
- return 0;
}
-static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
- int irq_idx, bool clear)
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
unsigned long irq_flags;
u32 intr_status;
@@ -351,6 +348,12 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
if (!intr)
return 0;
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return 0;
@@ -374,32 +377,6 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
return intr_status;
}
-static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
-
- return irq_flags;
-}
-
-static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags)
-{
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
-static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
-{
- ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked;
- ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked;
- ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
- ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
- ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
- ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
- ops->lock = dpu_hw_intr_lock;
- ops->unlock = dpu_hw_intr_unlock;
-}
-
static void __intr_offset(struct dpu_mdss_cfg *m,
void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
{
@@ -421,7 +398,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
- __setup_intr_ops(&intr->ops);
intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
@@ -443,7 +419,168 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
if (intr) {
kfree(intr->cache_irq_mask);
+
+ kfree(intr->irq_cb_tbl);
+ kfree(intr->irq_counts);
+
kfree(intr);
}
}
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms->hw_intr->irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
+ if (list_is_first(&register_irq_cb->list,
+ &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
+ int ret = dpu_hw_intr_enable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+ }
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms->hw_intr->irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
+ int ret = dpu_hw_intr_disable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_kms *dpu_kms = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, cb_count;
+
+ if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
+ return 0;
+
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
+ list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (irq_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d cb:%d\n",
+ i, irq_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
+
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ debugfs_create_file("core_irq", 0600, parent, dpu_kms,
+ &dpu_debugfs_core_irq_fops);
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
+ }
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index ac83c1159815..d50e78c9f148 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -32,92 +32,6 @@ enum dpu_hw_intr_reg {
#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
-struct dpu_hw_intr;
-
-/**
- * Interrupt operations.
- */
-struct dpu_hw_intr_ops {
-
- /**
- * enable_irq - Enable IRQ based on lookup IRQ index
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @return: 0 for success, otherwise failure
- */
- int (*enable_irq_locked)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
- * disable_irq - Disable IRQ based on lookup IRQ index
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @return: 0 for success, otherwise failure
- */
- int (*disable_irq_locked)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
- * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
- * any asserted IRQs). Useful during reset.
- * @intr: HW interrupt handle
- * @return: 0 for success, otherwise failure
- */
- int (*clear_all_irqs)(
- struct dpu_hw_intr *intr);
-
- /**
- * disable_all_irqs - Disables all the interrupts. Useful during reset.
- * @intr: HW interrupt handle
- * @return: 0 for success, otherwise failure
- */
- int (*disable_all_irqs)(
- struct dpu_hw_intr *intr);
-
- /**
- * dispatch_irqs - IRQ dispatcher will call the given callback
- * function when a matching interrupt status bit is
- * found in the irq mapping table.
- * @intr: HW interrupt handle
- * @cbfunc: Callback function pointer
- * @arg: Argument to pass back during callback
- */
- void (*dispatch_irqs)(
- struct dpu_hw_intr *intr,
- void (*cbfunc)(void *arg, int irq_idx),
- void *arg);
-
- /**
- * get_interrupt_status - Gets HW interrupt status, and clear if set,
- * based on given lookup IRQ index.
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @clear: True to clear irq after read
- */
- u32 (*get_interrupt_status)(
- struct dpu_hw_intr *intr,
- int irq_idx,
- bool clear);
-
- /**
- * lock - take the IRQ lock
- * @intr: HW interrupt handle
- * @return: irq_flags for the taken spinlock
- */
- unsigned long (*lock)(
- struct dpu_hw_intr *intr);
-
- /**
- * unlock - take the IRQ lock
- * @intr: HW interrupt handle
- * @irq_flags: the irq_flags returned from lock
- */
- void (*unlock)(
- struct dpu_hw_intr *intr, unsigned long irq_flags);
-};
-
/**
* struct dpu_hw_intr: hw interrupts handling data structure
* @hw: virtual address mapping
@@ -126,15 +40,19 @@ struct dpu_hw_intr_ops {
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
+ * @irq_cb_tbl: array of IRQ callbacks lists
+ * @irq_counts: array of IRQ counts
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
- struct dpu_hw_intr_ops ops;
u32 *cache_irq_mask;
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
+
+ struct list_head *irq_cb_tbl;
+ atomic_t *irq_counts;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index cb6bb7a22c15..86363c0ec834 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#include "dpu_kms.h"
@@ -24,6 +25,15 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+#define LM_MISR_FRAME_COUNT_MASK 0xFF
+#define LM_MISR_CTRL_ENABLE BIT(8)
+#define LM_MISR_CTRL_STATUS BIT(9)
+#define LM_MISR_CTRL_STATUS_CLEAR BIT(10)
+#define LM_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+
static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -96,6 +106,48 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, LM_MISR_CTRL_STATUS_CLEAR);
+
+ /* Clear old MISR value (in case it's read before a new value is calculated)*/
+ wmb();
+
+ if (enable) {
+ config = (frame_count & LM_MISR_FRAME_COUNT_MASK) |
+ LM_MISR_CTRL_ENABLE | LM_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+ } else {
+ DPU_REG_WRITE(c, LM_MISR_CTRL, 0);
+ }
+
+}
+
+static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 ctrl = 0;
+
+ if (!misr_value)
+ return -EINVAL;
+
+ ctrl = DPU_REG_READ(c, LM_MISR_CTRL);
+
+ if (!(ctrl & LM_MISR_CTRL_ENABLE))
+ return -EINVAL;
+
+ if (!(ctrl & LM_MISR_CTRL_STATUS))
+ return -EINVAL;
+
+ *misr_value = DPU_REG_READ(c, LM_MISR_SIGNATURE);
+
+ return 0;
+}
+
static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
@@ -158,6 +210,8 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
}
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 4a6b2de19ef6..d8052fb2d5da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_LM_H
@@ -53,6 +54,16 @@ struct dpu_hw_lm_ops {
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
+
+ /**
+ * setup_misr: Enable/disable MISR
+ */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count);
+
+ /**
+ * collect_misr: Read MISR signature
+ */
+ int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 69eed7932486..f9460672176a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -138,11 +138,13 @@ static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
u32 *idx)
{
int rc = 0;
- const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+ const struct dpu_sspp_sub_blks *sblk;
- if (!ctx)
+ if (!ctx || !ctx->cap || !ctx->cap->sblk)
return -EINVAL;
+ sblk = ctx->cap->sblk;
+
switch (s_id) {
case DPU_SSPP_SRC:
*idx = sblk->src_blk.base;
@@ -419,7 +421,7 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
(void)pe;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
- || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ || !scaler3_cfg)
return;
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index ff3cffde84cd..6d4911957e33 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_UTIL_H
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index ae48f41821cf..a15b26428280 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -188,6 +188,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
struct dentry *entry;
struct drm_device *dev;
struct msm_drm_private *priv;
+ int i;
if (!p)
return -EINVAL;
@@ -203,8 +204,10 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
- if (priv->dp)
- msm_dp_debugfs_init(priv->dp, minor);
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (priv->dp[i])
+ msm_dp_debugfs_init(priv->dp[i], minor);
+ }
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
@@ -544,35 +547,42 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
- int rc = 0;
+ int rc;
+ int i;
- if (!priv->dp)
- return rc;
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
- encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
- if (IS_ERR(encoder)) {
- DPU_ERROR("encoder init failed for dsi display\n");
- return PTR_ERR(encoder);
- }
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
- memset(&info, 0, sizeof(info));
- rc = msm_dp_modeset_init(priv->dp, dev, encoder);
- if (rc) {
- DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
- return rc;
- }
+ memset(&info, 0, sizeof(info));
+ rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
- priv->encoders[priv->num_encoders++] = encoder;
+ priv->encoders[priv->num_encoders++] = encoder;
- info.num_of_h_tiles = 1;
- info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
- info.intf_type = encoder->encoder_type;
- rc = dpu_encoder_setup(dev, encoder, &info);
- if (rc)
- DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
- encoder->base.id, rc);
- return rc;
+ info.num_of_h_tiles = 1;
+ info.h_tile_instance[0] = i;
+ info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ info.intf_type = encoder->encoder_type;
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc) {
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ return rc;
+ }
+ }
+
+ return 0;
}
/**
@@ -792,6 +802,7 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
@@ -800,7 +811,8 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
if (!priv)
return -EINVAL;
- msm_dp_irq_postinstall(priv->dp);
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
+ msm_dp_irq_postinstall(priv->dp[i]);
return 0;
}
@@ -908,6 +920,10 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return 0;
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(domain);
+ return PTR_ERR(mmu);
+ }
aspace = msm_gem_address_space_create(mmu, "dpu1",
0x1000, 0x100000000 - 0x1000);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 323a6bce9e64..775bcbda860f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -78,18 +78,6 @@ struct dpu_irq_callback {
void *arg;
};
-/**
- * struct dpu_irq: IRQ structure contains callback registration info
- * @total_irq: total number of irq_idx obtained from HW interrupts mapping
- * @irq_cb_tbl: array of IRQ callbacks setting
- * @debugfs_file: debugfs file for irq statistics
- */
-struct dpu_irq {
- u32 total_irqs;
- struct list_head *irq_cb_tbl;
- atomic_t *irq_counts;
-};
-
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -104,7 +92,6 @@ struct dpu_kms {
struct regulator *venus;
struct dpu_hw_intr *hw_intr;
- struct dpu_irq irq_obj;
struct dpu_core_perf perf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index c989621209aa..a3e3b9d1b82e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1193,7 +1193,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
if (DPU_FORMAT_IS_YUV(fmt))
_dpu_plane_setup_csc(pdpu);
else
- pdpu->csc_ptr = 0;
+ pdpu->csc_ptr = NULL;
}
_dpu_plane_set_qos_lut(plane, fb);
@@ -1330,7 +1330,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
/* remove previous state, if present */
if (plane->state) {
dpu_plane_destroy_state(plane, plane->state);
- plane->state = 0;
+ plane->state = NULL;
}
pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index cdcaf470f148..5a33bb148e9e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -173,12 +173,9 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
DBG("");
clk_disable_unprepare(mdp4_kms->clk);
- if (mdp4_kms->pclk)
- clk_disable_unprepare(mdp4_kms->pclk);
- if (mdp4_kms->lut_clk)
- clk_disable_unprepare(mdp4_kms->lut_clk);
- if (mdp4_kms->axi_clk)
- clk_disable_unprepare(mdp4_kms->axi_clk);
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+ clk_disable_unprepare(mdp4_kms->axi_clk);
return 0;
}
@@ -188,12 +185,9 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
DBG("");
clk_prepare_enable(mdp4_kms->clk);
- if (mdp4_kms->pclk)
- clk_prepare_enable(mdp4_kms->pclk);
- if (mdp4_kms->lut_clk)
- clk_prepare_enable(mdp4_kms->lut_clk);
- if (mdp4_kms->axi_clk)
- clk_prepare_enable(mdp4_kms->axi_clk);
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+ clk_prepare_enable(mdp4_kms->axi_clk);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 9741544ffc35..1bf9ff5dbabc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -752,6 +752,94 @@ const struct mdp5_cfg_hw msm8x76_config = {
.max_clk = 360000000,
};
+static const struct mdp5_cfg_hw msm8x53_config = {
+ .name = "msm8x53",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 3,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 2,
+ .base = { 0x70000, 0x70800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 400000000,
+};
+
static const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
@@ -1151,6 +1239,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
+ { .revision = 16, .config = { .hw = &msm8x53_config } },
};
static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index b3b42672b2d4..7b242246d4e7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -295,15 +295,12 @@ static int mdp5_disable(struct mdp5_kms *mdp5_kms)
mdp5_kms->enable_count--;
WARN_ON(mdp5_kms->enable_count < 0);
- if (mdp5_kms->tbu_rt_clk)
- clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
- if (mdp5_kms->tbu_clk)
- clk_disable_unprepare(mdp5_kms->tbu_clk);
+ clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
+ clk_disable_unprepare(mdp5_kms->tbu_clk);
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
- if (mdp5_kms->lut_clk)
- clk_disable_unprepare(mdp5_kms->lut_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
return 0;
}
@@ -317,12 +314,9 @@ static int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
- if (mdp5_kms->lut_clk)
- clk_prepare_enable(mdp5_kms->lut_clk);
- if (mdp5_kms->tbu_clk)
- clk_prepare_enable(mdp5_kms->tbu_clk);
- if (mdp5_kms->tbu_rt_clk)
- clk_prepare_enable(mdp5_kms->tbu_rt_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+ clk_prepare_enable(mdp5_kms->tbu_clk);
+ clk_prepare_enable(mdp5_kms->tbu_rt_clk);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 2f4895bcb0b0..0ea53420bc40 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -136,10 +136,8 @@ static int mdp5_mdss_enable(struct msm_mdss *mdss)
DBG("");
clk_prepare_enable(mdp5_mdss->ahb_clk);
- if (mdp5_mdss->axi_clk)
- clk_prepare_enable(mdp5_mdss->axi_clk);
- if (mdp5_mdss->vsync_clk)
- clk_prepare_enable(mdp5_mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
@@ -149,10 +147,8 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss)
struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdp5_mdss->vsync_clk)
- clk_disable_unprepare(mdp5_mdss->vsync_clk);
- if (mdp5_mdss->axi_clk)
- clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index cabe15190ec1..2e1acb1bc390 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -126,8 +126,12 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
priv = drm_dev->dev_private;
kms = priv->kms;
- if (priv->dp)
- msm_dp_snapshot(disp_state, priv->dp);
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
+
+ msm_dp_snapshot(disp_state, priv->dp[i]);
+ }
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (!priv->dsi[i])
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index cc2bb8295329..6ae9b29044b6 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -24,15 +24,6 @@
#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
-#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000
-#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200
-#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200
-#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200
-#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400
-#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00
-#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000
-#define MSM_DP_CONTROLLER_P0_SIZE 0x0400
-
#define DP_INTERRUPT_STATUS1 \
(DP_INTR_AUX_I2C_DONE| \
DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
@@ -66,82 +57,77 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ struct dss_io_data *dss = &catalog->io->dp_controller;
- msm_disp_snapshot_add_block(disp_state, catalog->io->dp_controller.len,
- catalog->io->dp_controller.base, "dp_ctrl");
+ msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
+ msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
+ msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
+ msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
}
static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
{
- offset += MSM_DP_CONTROLLER_AUX_OFFSET;
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.aux.base + offset);
}
static inline void dp_write_aux(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_AUX_OFFSET;
/*
* To make sure aux reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.aux.base + offset);
}
static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset)
{
- offset += MSM_DP_CONTROLLER_AHB_OFFSET;
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.ahb.base + offset);
}
static inline void dp_write_ahb(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_AHB_OFFSET;
/*
* To make sure phy reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.ahb.base + offset);
}
static inline void dp_write_p0(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_P0_OFFSET;
/*
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.p0.base + offset);
}
static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
u32 offset)
{
- offset += MSM_DP_CONTROLLER_P0_OFFSET;
/*
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.p0.base + offset);
}
static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
{
- offset += MSM_DP_CONTROLLER_LINK_OFFSET;
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.link.base + offset);
}
static inline void dp_write_link(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_LINK_OFFSET;
/*
* To make sure link reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.link.base + offset);
}
/* aux related catalog functions */
@@ -276,29 +262,21 @@ static void dump_regs(void __iomem *base, int len)
void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
{
- u32 offset, len;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ struct dss_io_data *io = &catalog->io->dp_controller;
pr_info("AHB regs\n");
- offset = MSM_DP_CONTROLLER_AHB_OFFSET;
- len = MSM_DP_CONTROLLER_AHB_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->ahb.base, io->ahb.len);
pr_info("AUXCLK regs\n");
- offset = MSM_DP_CONTROLLER_AUX_OFFSET;
- len = MSM_DP_CONTROLLER_AUX_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->aux.base, io->aux.len);
pr_info("LCLK regs\n");
- offset = MSM_DP_CONTROLLER_LINK_OFFSET;
- len = MSM_DP_CONTROLLER_LINK_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->link.base, io->link.len);
pr_info("P0CLK regs\n");
- offset = MSM_DP_CONTROLLER_P0_OFFSET;
- len = MSM_DP_CONTROLLER_P0_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->p0.base, io->p0.len);
}
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
@@ -493,8 +471,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
/* Poll for mainlink ready status */
- ret = readx_poll_timeout(readl, catalog->io->dp_controller.base +
- MSM_DP_CONTROLLER_LINK_OFFSET +
+ ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
REG_DP_MAINLINK_READY,
data, data & bit,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
@@ -541,8 +518,7 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
struct dp_catalog_private, dp_catalog);
/* Poll for mainlink ready status */
- ret = readl_poll_timeout(catalog->io->dp_controller.base +
- MSM_DP_CONTROLLER_LINK_OFFSET +
+ ret = readl_poll_timeout(catalog->io->dp_controller.link.base +
REG_DP_MAINLINK_READY,
data, data & DP_MAINLINK_READY_FOR_VIDEO,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 2f6247e80e9d..da4323556ef3 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -24,240 +24,108 @@ struct dp_debug_private {
struct dp_usbpd *usbpd;
struct dp_link *link;
struct dp_panel *panel;
- struct drm_connector **connector;
+ struct drm_connector *connector;
struct device *dev;
struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
-static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
-{
- if (rc >= *max_size) {
- DRM_ERROR("buffer overflow\n");
- return -EINVAL;
- }
- *len += rc;
- *max_size = SZ_4K - *len;
-
- return 0;
-}
-
-static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
- size_t count, loff_t *ppos)
+static int dp_debug_show(struct seq_file *seq, void *p)
{
- struct dp_debug_private *debug = file->private_data;
- char *buf;
- u32 len = 0, rc = 0;
+ struct dp_debug_private *debug = seq->private;
u64 lclk = 0;
- u32 max_size = SZ_4K;
u32 link_params_rate;
- struct drm_display_mode *drm_mode;
+ const struct drm_display_mode *drm_mode;
if (!debug)
return -ENODEV;
- if (*ppos)
- return 0;
-
- buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
drm_mode = &debug->panel->dp_mode.drm_mode;
- rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
+ seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
+ seq_printf(seq, "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
debug->panel->max_pclk_khz);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdrm_dp_link\n\t\trate = %u\n",
+ seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
debug->panel->link_info.rate);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tnum_lanes = %u\n",
+ seq_printf(seq, "\t\tnum_lanes = %u\n",
debug->panel->link_info.num_lanes);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tcapabilities = %lu\n",
+ seq_printf(seq, "\t\tcapabilities = %lu\n",
debug->panel->link_info.capabilities);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+ seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n",
drm_mode->hdisplay,
drm_mode->vdisplay);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tback_porch = %dx%d\n",
+ seq_printf(seq, "\t\tback_porch = %dx%d\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->vtotal - drm_mode->vsync_end);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tfront_porch = %dx%d\n",
+ seq_printf(seq, "\t\tfront_porch = %dx%d\n",
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->vsync_start - drm_mode->vdisplay);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tsync_width = %dx%d\n",
+ seq_printf(seq, "\t\tsync_width = %dx%d\n",
drm_mode->hsync_end - drm_mode->hsync_start,
drm_mode->vsync_end - drm_mode->vsync_start);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tactive_low = %dx%d\n",
+ seq_printf(seq, "\t\tactive_low = %dx%d\n",
debug->panel->dp_mode.h_active_low,
debug->panel->dp_mode.v_active_low);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\th_skew = %d\n",
+ seq_printf(seq, "\t\th_skew = %d\n",
drm_mode->hskew);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\trefresh rate = %d\n",
+ seq_printf(seq, "\t\trefresh rate = %d\n",
drm_mode_vrefresh(drm_mode));
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tpixel clock khz = %d\n",
+ seq_printf(seq, "\t\tpixel clock khz = %d\n",
drm_mode->clock);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tbpp = %d\n",
+ seq_printf(seq, "\t\tbpp = %d\n",
debug->panel->dp_mode.bpp);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
/* Link Information */
- rc = snprintf(buf + len, max_size,
- "\tdp_link:\n\t\ttest_requested = %d\n",
+ seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n",
debug->link->sink_request);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tnum_lanes = %d\n",
+ seq_printf(seq, "\t\tnum_lanes = %d\n",
debug->link->link_params.num_lanes);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
link_params_rate = debug->link->link_params.rate;
- rc = snprintf(buf + len, max_size,
- "\t\tbw_code = %d\n",
+ seq_printf(seq, "\t\tbw_code = %d\n",
drm_dp_link_rate_to_bw_code(link_params_rate));
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
lclk = debug->link->link_params.rate * 1000;
- rc = snprintf(buf + len, max_size,
- "\t\tlclk = %lld\n", lclk);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tv_level = %d\n",
+ seq_printf(seq, "\t\tlclk = %lld\n", lclk);
+ seq_printf(seq, "\t\tv_level = %d\n",
debug->link->phy_params.v_level);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tp_level = %d\n",
+ seq_printf(seq, "\t\tp_level = %d\n",
debug->link->phy_params.p_level);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- if (copy_to_user(user_buff, buf, len))
- goto error;
-
- *ppos += len;
- kfree(buf);
- return len;
- error:
- kfree(buf);
- return -EINVAL;
+ return 0;
}
+DEFINE_SHOW_ATTRIBUTE(dp_debug);
static int dp_test_data_show(struct seq_file *m, void *data)
{
- struct drm_device *dev;
- struct dp_debug_private *debug;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ const struct dp_debug_private *debug = m->private;
+ const struct drm_connector *connector = debug->connector;
u32 bpc;
- debug = m->private;
- dev = debug->drm_dev;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected) {
- bpc = debug->link->test_video.test_bit_depth;
- seq_printf(m, "hdisplay: %d\n",
- debug->link->test_video.test_h_width);
- seq_printf(m, "vdisplay: %d\n",
- debug->link->test_video.test_v_height);
- seq_printf(m, "bpc: %u\n",
- dp_link_bit_depth_to_bpc(bpc));
- } else
- seq_puts(m, "0");
+ if (connector->status == connector_status_connected) {
+ bpc = debug->link->test_video.test_bit_depth;
+ seq_printf(m, "hdisplay: %d\n",
+ debug->link->test_video.test_h_width);
+ seq_printf(m, "vdisplay: %d\n",
+ debug->link->test_video.test_v_height);
+ seq_printf(m, "bpc: %u\n",
+ dp_link_bit_depth_to_bpc(bpc));
+ } else {
+ seq_puts(m, "0");
}
- drm_connector_list_iter_end(&conn_iter);
-
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_test_data);
static int dp_test_type_show(struct seq_file *m, void *data)
{
- struct dp_debug_private *debug = m->private;
- struct drm_device *dev = debug->drm_dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ const struct dp_debug_private *debug = m->private;
+ const struct drm_connector *connector = debug->connector;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected)
- seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
- else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (connector->status == connector_status_connected)
+ seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
+ else
+ seq_puts(m, "0");
return 0;
}
@@ -269,14 +137,12 @@ static ssize_t dp_test_active_write(struct file *file,
{
char *input_buffer;
int status = 0;
- struct dp_debug_private *debug;
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ const struct dp_debug_private *debug;
+ const struct drm_connector *connector;
int val = 0;
debug = ((struct seq_file *)file->private_data)->private;
- dev = debug->drm_dev;
+ connector = debug->connector;
if (len == 0)
return 0;
@@ -287,30 +153,22 @@ static ssize_t dp_test_active_write(struct file *file,
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected) {
- status = kstrtoint(input_buffer, 10, &val);
- if (status < 0)
- break;
- DRM_DEBUG_DRIVER("Got %d for test active\n", val);
- /* To prevent erroneous activation of the compliance
- * testing code, only accept an actual value of 1 here
- */
- if (val == 1)
- debug->panel->video_test = true;
- else
- debug->panel->video_test = false;
+ if (connector->status == connector_status_connected) {
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0) {
+ kfree(input_buffer);
+ return status;
}
+ DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ debug->panel->video_test = true;
+ else
+ debug->panel->video_test = false;
}
- drm_connector_list_iter_end(&conn_iter);
kfree(input_buffer);
- if (status < 0)
- return status;
*offp += len;
return len;
@@ -319,25 +177,16 @@ static ssize_t dp_test_active_write(struct file *file,
static int dp_test_active_show(struct seq_file *m, void *data)
{
struct dp_debug_private *debug = m->private;
- struct drm_device *dev = debug->drm_dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected) {
- if (debug->panel->video_test)
- seq_puts(m, "1");
- else
- seq_puts(m, "0");
- } else
+ struct drm_connector *connector = debug->connector;
+
+ if (connector->status == connector_status_connected) {
+ if (debug->panel->video_test)
+ seq_puts(m, "1");
+ else
seq_puts(m, "0");
+ } else {
+ seq_puts(m, "0");
}
- drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -349,11 +198,6 @@ static int dp_test_active_open(struct inode *inode,
inode->i_private);
}
-static const struct file_operations dp_debug_fops = {
- .open = simple_open,
- .read = dp_debug_read_info,
-};
-
static const struct file_operations test_active_fops = {
.owner = THIS_MODULE,
.open = dp_test_active_open,
@@ -391,7 +235,7 @@ static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector, struct drm_minor *minor)
+ struct drm_connector *connector, struct drm_minor *minor)
{
int rc = 0;
struct dp_debug_private *debug;
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 7eaedfbb149c..8c0d0b5178fd 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -43,7 +43,7 @@ struct dp_debug {
*/
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector,
+ struct drm_connector *connector,
struct drm_minor *minor);
/**
@@ -60,7 +60,7 @@ void dp_debug_put(struct dp_debug *dp_debug);
static inline
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector, struct drm_minor *minor)
+ struct drm_connector *connector, struct drm_minor *minor)
{
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a0392e4d8134..aba8aa47ed76 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
+#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -27,7 +28,6 @@
#include "dp_audio.h"
#include "dp_debug.h"
-static struct msm_dp *g_dp_display;
#define HPD_STRING_SIZE 30
enum {
@@ -79,6 +79,8 @@ struct dp_display_private {
char *name;
int irq;
+ unsigned int id;
+
/* state variables */
bool core_initialized;
bool hpd_irq_on;
@@ -116,11 +118,35 @@ struct dp_display_private {
struct dp_audio *audio;
};
+struct msm_dp_desc {
+ phys_addr_t io_start;
+ unsigned int connector_type;
+};
+
+struct msm_dp_config {
+ const struct msm_dp_desc *descs;
+ size_t num_descs;
+};
+
+static const struct msm_dp_config sc7180_dp_cfg = {
+ .descs = (const struct msm_dp_desc[]) {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ },
+ .num_descs = 1,
+};
+
static const struct of_device_id dp_dt_match[] = {
- {.compatible = "qcom,sc7180-dp"},
+ { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
{}
};
+static struct dp_display_private *dev_get_dp_display_private(struct device *dev)
+{
+ struct msm_dp *dp = dev_get_drvdata(dev);
+
+ return container_of(dp, struct dp_display_private, dp_display);
+}
+
static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
u32 data, u32 delay)
{
@@ -197,25 +223,24 @@ static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
int rc = 0;
- struct dp_display_private *dp;
- struct drm_device *drm;
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv;
+ struct drm_device *drm;
drm = dev_get_drvdata(master);
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
-
dp->dp_display.drm_dev = drm;
priv = drm->dev_private;
- priv->dp = &(dp->dp_display);
+ priv->dp[dp->id] = &dp->dp_display;
- rc = dp->parser->parse(dp->parser);
+ rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type);
if (rc) {
DRM_ERROR("device tree parsing failed\n");
goto end;
}
+ dp->dp_display.panel_bridge = dp->parser->panel_bridge;
+
dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
@@ -240,16 +265,13 @@ end:
static void dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
- struct dp_display_private *dp;
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
-
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
- priv->dp = NULL;
+ priv->dp[dp->id] = NULL;
}
static const struct component_ops dp_display_comp_ops = {
@@ -379,38 +401,17 @@ static void dp_display_host_deinit(struct dp_display_private *dp)
static int dp_display_usbpd_configure_cb(struct device *dev)
{
- int rc = 0;
- struct dp_display_private *dp;
-
- if (!dev) {
- DRM_ERROR("invalid dev\n");
- rc = -EINVAL;
- goto end;
- }
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_display_host_init(dp, false);
- rc = dp_display_process_hpd_high(dp);
-end:
- return rc;
+ return dp_display_process_hpd_high(dp);
}
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
int rc = 0;
- struct dp_display_private *dp;
-
- if (!dev) {
- DRM_ERROR("invalid dev\n");
- rc = -EINVAL;
- return rc;
- }
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
@@ -472,15 +473,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
{
int rc = 0;
u32 sink_request;
- struct dp_display_private *dp;
-
- if (!dev) {
- DRM_ERROR("invalid dev\n");
- return -EINVAL;
- }
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
/* check for any test request issued by sink */
rc = dp_link_process_request(dp->link);
@@ -647,7 +640,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
DRM_DEBUG_DP("hpd_state=%d\n", state);
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(g_dp_display, false);
+ dp_display_handle_plugged_change(&dp->dp_display, false);
/* enable HDP plug interrupt to prepare for next plugin */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
@@ -834,7 +827,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
return 0;
}
-static int dp_display_prepare(struct msm_dp *dp)
+static int dp_display_prepare(struct msm_dp *dp_display)
{
return 0;
}
@@ -842,9 +835,7 @@ static int dp_display_prepare(struct msm_dp *dp)
static int dp_display_enable(struct dp_display_private *dp, u32 data)
{
int rc = 0;
- struct msm_dp *dp_display;
-
- dp_display = g_dp_display;
+ struct msm_dp *dp_display = &dp->dp_display;
DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) {
@@ -880,9 +871,7 @@ static int dp_display_post_enable(struct msm_dp *dp_display)
static int dp_display_disable(struct dp_display_private *dp, u32 data)
{
- struct msm_dp *dp_display;
-
- dp_display = g_dp_display;
+ struct msm_dp *dp_display = &dp->dp_display;
if (!dp_display->power_on)
return 0;
@@ -912,7 +901,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
return 0;
}
-static int dp_display_unprepare(struct msm_dp *dp)
+static int dp_display_unprepare(struct msm_dp *dp_display)
{
return 0;
}
@@ -1213,10 +1202,33 @@ int dp_display_request_irq(struct msm_dp *dp_display)
return 0;
}
+static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev,
+ unsigned int *id)
+{
+ const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev);
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
+
+ for (i = 0; i < cfg->num_descs; i++) {
+ if (cfg->descs[i].io_start == res->start) {
+ *id = i;
+ return &cfg->descs[i];
+ }
+ }
+
+ dev_err(&pdev->dev, "unknown displayport instance\n");
+ return NULL;
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
struct dp_display_private *dp;
+ const struct msm_dp_desc *desc;
if (!pdev || !pdev->dev.of_node) {
DRM_ERROR("pdev not found\n");
@@ -1227,8 +1239,13 @@ static int dp_display_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ desc = dp_display_get_desc(pdev, &dp->id);
+ if (!desc)
+ return -EINVAL;
+
dp->pdev = pdev;
dp->name = "drm_dp";
+ dp->dp_display.connector_type = desc->connector_type;
rc = dp_init_sub_modules(dp);
if (rc) {
@@ -1237,14 +1254,13 @@ static int dp_display_probe(struct platform_device *pdev)
}
mutex_init(&dp->event_mutex);
- g_dp_display = &dp->dp_display;
/* Store DP audio handle inside DP display */
- g_dp_display->dp_audio = dp->audio;
+ dp->dp_display.dp_audio = dp->audio;
init_completion(&dp->audio_comp);
- platform_set_drvdata(pdev, g_dp_display);
+ platform_set_drvdata(pdev, &dp->dp_display);
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
@@ -1257,10 +1273,7 @@ static int dp_display_probe(struct platform_device *pdev)
static int dp_display_remove(struct platform_device *pdev)
{
- struct dp_display_private *dp;
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
dp_display_deinit_sub_modules(dp);
@@ -1315,7 +1328,7 @@ static int dp_pm_resume(struct device *dev)
dp->dp_display.is_connected = true;
} else {
dp->dp_display.is_connected = false;
- dp_display_handle_plugged_change(g_dp_display, false);
+ dp_display_handle_plugged_change(dp_display, false);
}
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
@@ -1429,7 +1442,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
dev = &dp->pdev->dev;
dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
- dp->link, &dp->dp_display.connector,
+ dp->link, dp->dp_display.connector,
minor);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 8b47cdabb67e..8e80e3bac394 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -15,9 +15,11 @@ struct msm_dp {
struct device *codec_dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ struct drm_bridge *panel_bridge;
bool is_connected;
bool audio_enabled;
bool power_on;
+ unsigned int connector_type;
hdmi_codec_plugged_cb plugged_cb;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 764f4b81017e..76856c4ee1d6 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
@@ -147,7 +148,7 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
ret = drm_connector_init(dp_display->drm_dev, connector,
&dp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ dp_display->connector_type);
if (ret)
return ERR_PTR(ret);
@@ -160,5 +161,15 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
drm_connector_attach_encoder(connector, dp_display->encoder);
+ if (dp_display->panel_bridge) {
+ ret = drm_bridge_attach(dp_display->encoder,
+ dp_display->panel_bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ DRM_ERROR("failed to attach panel bridge: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
return connector;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 2181b60e1d1d..71db10c0f262 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -234,7 +234,7 @@ u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct dp_panel_private *panel;
- u32 bpp = mode_edid_bpp;
+ u32 bpp;
if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
DRM_ERROR("invalid input\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index 0519dd3ac3c3..a7acc23f742b 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -6,11 +6,22 @@
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
+#include <drm/drm_bridge.h>
#include "dp_parser.h"
#include "dp_reg.h"
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
.num = 2,
.regs = {
@@ -19,67 +30,73 @@ static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
},
};
-static int msm_dss_ioremap(struct platform_device *pdev,
- struct dss_io_data *io_data)
-{
- struct resource *res = NULL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("%pS->%s: msm_dss_get_res failed\n",
- __builtin_return_address(0), __func__);
- return -ENODEV;
- }
-
- io_data->len = (u32)resource_size(res);
- io_data->base = ioremap(res->start, io_data->len);
- if (!io_data->base) {
- DRM_ERROR("%pS->%s: ioremap failed\n",
- __builtin_return_address(0), __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static void msm_dss_iounmap(struct dss_io_data *io_data)
+static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
- if (io_data->base) {
- iounmap(io_data->base);
- io_data->base = NULL;
- }
- io_data->len = 0;
-}
+ struct resource *res;
+ void __iomem *base;
-static void dp_parser_unmap_io_resources(struct dp_parser *parser)
-{
- struct dp_io *io = &parser->io;
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
- msm_dss_iounmap(&io->dp_controller);
+ return base;
}
static int dp_parser_ctrl_res(struct dp_parser *parser)
{
- int rc = 0;
struct platform_device *pdev = parser->pdev;
struct dp_io *io = &parser->io;
+ struct dss_io_data *dss = &io->dp_controller;
+
+ dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
+ if (IS_ERR(dss->ahb.base))
+ return PTR_ERR(dss->ahb.base);
+
+ dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
+ if (IS_ERR(dss->aux.base)) {
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (PTR_ERR(dss->aux.base) == -EINVAL) {
+ if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ dss->ahb.len = DP_DEFAULT_AHB_SIZE;
+ dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
+ dss->aux.len = DP_DEFAULT_AUX_SIZE;
+ dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
+ dss->link.len = DP_DEFAULT_LINK_SIZE;
+ dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
+ dss->p0.len = DP_DEFAULT_P0_SIZE;
+ } else {
+ DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
+ return PTR_ERR(dss->aux.base);
+ }
+ } else {
+ dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
+ if (IS_ERR(dss->link.base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
+ return PTR_ERR(dss->link.base);
+ }
- rc = msm_dss_ioremap(pdev, &io->dp_controller);
- if (rc) {
- DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc);
- goto err;
+ dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
+ if (IS_ERR(dss->p0.base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
+ return PTR_ERR(dss->p0.base);
+ }
}
io->phy = devm_phy_get(&pdev->dev, "dp");
- if (IS_ERR(io->phy)) {
- rc = PTR_ERR(io->phy);
- goto err;
- }
+ if (IS_ERR(io->phy))
+ return PTR_ERR(io->phy);
return 0;
-err:
- dp_parser_unmap_io_resources(parser);
- return rc;
}
static int dp_parser_misc(struct dp_parser *parser)
@@ -248,7 +265,28 @@ static int dp_parser_clock(struct dp_parser *parser)
return 0;
}
-static int dp_parser_parse(struct dp_parser *parser)
+static int dp_parser_find_panel(struct dp_parser *parser)
+{
+ struct device *dev = &parser->pdev->dev;
+ struct drm_panel *panel;
+ int rc;
+
+ rc = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ if (rc) {
+ DRM_ERROR("failed to acquire DRM panel: %d\n", rc);
+ return rc;
+ }
+
+ parser->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(parser->panel_bridge)) {
+ DRM_ERROR("failed to create panel bridge\n");
+ return PTR_ERR(parser->panel_bridge);
+ }
+
+ return 0;
+}
+
+static int dp_parser_parse(struct dp_parser *parser, int connector_type)
{
int rc = 0;
@@ -269,6 +307,12 @@ static int dp_parser_parse(struct dp_parser *parser)
if (rc)
return rc;
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ rc = dp_parser_find_panel(parser);
+ if (rc)
+ return rc;
+ }
+
/* Map the corresponding regulator information according to
* version. Currently, since we only have one supported platform,
* mapping the regulator directly.
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 34b49628bbaf..3172da089421 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -25,11 +25,18 @@ enum dp_pm_type {
DP_MAX_PM
};
-struct dss_io_data {
- u32 len;
+struct dss_io_region {
+ size_t len;
void __iomem *base;
};
+struct dss_io_data {
+ struct dss_io_region ahb;
+ struct dss_io_region aux;
+ struct dss_io_region link;
+ struct dss_io_region p0;
+};
+
static inline const char *dp_parser_pm_name(enum dp_pm_type module)
{
switch (module) {
@@ -116,8 +123,9 @@ struct dp_parser {
struct dp_display_data disp_data;
const struct dp_regulator_cfg *regulator_cfg;
u32 max_dp_lanes;
+ struct drm_bridge *panel_bridge;
- int (*parse)(struct dp_parser *parser);
+ int (*parse)(struct dp_parser *parser, int connector_type);
};
/**
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index b50db91cb8a7..569c8ff062ba 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -107,6 +107,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
u32 dma_base, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
+void msm_dsi_host_enable_irq(struct mipi_dsi_host *host);
+void msm_dsi_host_disable_irq(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
bool is_bonded_dsi, struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index c86b5090fae6..f69a125f9559 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -106,7 +106,8 @@ struct msm_dsi_host {
phys_addr_t ctrl_size;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
- struct clk *bus_clks[DSI_BUS_CLK_MAX];
+ int num_bus_clks;
+ struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
struct clk *byte_clk;
struct clk *esc_clk;
@@ -115,16 +116,16 @@ struct msm_dsi_host {
struct clk *pixel_clk_src;
struct clk *byte_intf_clk;
- u32 byte_clk_rate;
- u32 pixel_clk_rate;
- u32 esc_clk_rate;
+ unsigned long byte_clk_rate;
+ unsigned long pixel_clk_rate;
+ unsigned long esc_clk_rate;
/* DSI v2 specific clocks */
struct clk *src_clk;
struct clk *esc_clk_src;
struct clk *dsi_clk_src;
- u32 src_clk_rate;
+ unsigned long src_clk_rate;
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
@@ -374,15 +375,14 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
int i, ret = 0;
/* get bus clocks */
- for (i = 0; i < cfg->num_bus_clks; i++) {
- msm_host->bus_clks[i] = msm_clk_get(pdev,
- cfg->bus_clk_names[i]);
- if (IS_ERR(msm_host->bus_clks[i])) {
- ret = PTR_ERR(msm_host->bus_clks[i]);
- pr_err("%s: Unable to get %s clock, ret = %d\n",
- __func__, cfg->bus_clk_names[i], ret);
- goto exit;
- }
+ for (i = 0; i < cfg->num_bus_clks; i++)
+ msm_host->bus_clks[i].id = cfg->bus_clk_names[i];
+ msm_host->num_bus_clks = cfg->num_bus_clks;
+
+ ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
+ goto exit;
}
/* get link and source clocks */
@@ -433,41 +433,6 @@ exit:
return ret;
}
-static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
-{
- const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
- int i, ret;
-
- DBG("id=%d", msm_host->id);
-
- for (i = 0; i < cfg->num_bus_clks; i++) {
- ret = clk_prepare_enable(msm_host->bus_clks[i]);
- if (ret) {
- pr_err("%s: failed to enable bus clock %d ret %d\n",
- __func__, i, ret);
- goto err;
- }
- }
-
- return 0;
-err:
- while (--i >= 0)
- clk_disable_unprepare(msm_host->bus_clks[i]);
-
- return ret;
-}
-
-static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
-{
- const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
- int i;
-
- DBG("");
-
- for (i = cfg->num_bus_clks - 1; i >= 0; i--)
- clk_disable_unprepare(msm_host->bus_clks[i]);
-}
-
int msm_dsi_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -478,7 +443,7 @@ int msm_dsi_runtime_suspend(struct device *dev)
if (!msm_host->cfg_hnd)
return 0;
- dsi_bus_clk_disable(msm_host);
+ clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks);
return 0;
}
@@ -493,15 +458,15 @@ int msm_dsi_runtime_resume(struct device *dev)
if (!msm_host->cfg_hnd)
return 0;
- return dsi_bus_clk_enable(msm_host);
+ return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks);
}
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
- u32 byte_intf_rate;
+ unsigned long byte_intf_rate;
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%d",
+ DBG("Set clk rates: pclk=%d, byteclk=%lu",
msm_host->mode->clock, msm_host->byte_clk_rate);
ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
@@ -558,13 +523,11 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto pixel_clk_err;
}
- if (msm_host->byte_intf_clk) {
- ret = clk_prepare_enable(msm_host->byte_intf_clk);
- if (ret) {
- pr_err("%s: Failed to enable byte intf clk\n",
- __func__);
- goto byte_intf_clk_err;
- }
+ ret = clk_prepare_enable(msm_host->byte_intf_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable byte intf clk\n",
+ __func__);
+ goto byte_intf_clk_err;
}
return 0;
@@ -583,7 +546,7 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
+ DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
msm_host->mode->clock, msm_host->byte_clk_rate,
msm_host->esc_clk_rate, msm_host->src_clk_rate);
@@ -660,8 +623,7 @@ void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
- if (msm_host->byte_intf_clk)
- clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_intf_clk);
clk_disable_unprepare(msm_host->byte_clk);
}
@@ -673,10 +635,10 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->byte_clk);
}
-static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
- u32 pclk_rate;
+ unsigned long pclk_rate;
pclk_rate = mode->clock * 1000;
@@ -696,7 +658,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
+ unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
u64 pclk_bpp = (u64)pclk_rate * bpp;
if (lanes == 0) {
@@ -713,7 +675,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
msm_host->pixel_clk_rate = pclk_rate;
msm_host->byte_clk_rate = pclk_bpp;
- DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
}
@@ -772,7 +734,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate,
msm_host->src_clk_rate);
return 0;
@@ -1898,6 +1860,23 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
return ret;
}
+ msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (msm_host->irq < 0) {
+ ret = msm_host->irq;
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ /* do not autoenable, will be enabled later */
+ ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "dsi_isr", msm_host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ msm_host->irq, ret);
+ return ret;
+ }
+
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
@@ -1925,7 +1904,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
DBG("");
dsi_tx_buf_free(msm_host);
if (msm_host->workqueue) {
- flush_workqueue(msm_host->workqueue);
destroy_workqueue(msm_host->workqueue);
msm_host->workqueue = NULL;
}
@@ -1941,25 +1919,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- struct platform_device *pdev = msm_host->pdev;
int ret;
- msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (msm_host->irq < 0) {
- ret = msm_host->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
- return ret;
- }
-
- ret = devm_request_irq(&pdev->dev, msm_host->irq,
- dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "dsi_isr", msm_host);
- if (ret < 0) {
- DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
- msm_host->irq, ret);
- return ret;
- }
-
msm_host->dev = dev;
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
@@ -2315,6 +2276,20 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
clk_req->escclk_rate = msm_host->esc_clk_rate;
}
+void msm_dsi_host_enable_irq(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ enable_irq(msm_host->irq);
+}
+
+void msm_dsi_host_disable_irq(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ disable_irq(msm_host->irq);
+}
+
int msm_dsi_host_enable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index c41d39f5b7cf..20c4d650fd80 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include "drm/drm_bridge_connector.h"
+
#include "msm_kms.h"
#include "dsi.h"
@@ -377,6 +379,14 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
+ /*
+ * Enable before preparing the panel, disable after unpreparing, so
+ * that the panel can communicate over the DSI link.
+ */
+ msm_dsi_host_enable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_enable_irq(msm_dsi1->host);
+
/* Always call panel functions once, because even for dual panels,
* there is only one drm_panel instance.
*/
@@ -411,6 +421,10 @@ host_en_fail:
if (panel)
drm_panel_unprepare(panel);
panel_prep_fail:
+ msm_dsi_host_disable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_disable_irq(msm_dsi1->host);
+
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
@@ -523,6 +537,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
+ msm_dsi_host_disable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_disable_irq(msm_dsi1->host);
+
/* Save PHY status if it is a clock source */
msm_dsi_phy_pll_save_state(msm_dsi->phy);
@@ -688,10 +706,10 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
- struct drm_connector *connector;
- struct list_head *connector_list;
+ int ret;
int_bridge = msm_dsi->bridge;
ext_bridge = msm_dsi->external_bridge =
@@ -699,22 +717,44 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
encoder = msm_dsi->encoder;
- /* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
-
/*
- * we need the drm_connector created by the external bridge
- * driver (or someone else) to feed it to our driver's
- * priv->connector[] list, mainly for msm_fbdev_init()
+ * Try first to create the bridge without it creating its own
+ * connector.. currently some bridges support this, and others
+ * do not (and some support both modes)
*/
- connector_list = &dev->mode_config.connector_list;
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret == -EINVAL) {
+ struct drm_connector *connector;
+ struct list_head *connector_list;
+
+ /* link the internal dsi bridge to the external bridge */
+ drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
+
+ /*
+ * we need the drm_connector created by the external bridge
+ * driver (or someone else) to feed it to our driver's
+ * priv->connector[] list, mainly for msm_fbdev_init()
+ */
+ connector_list = &dev->mode_config.connector_list;
+
+ list_for_each_entry(connector, connector_list, head) {
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
+ }
+
+ return ERR_PTR(-ENODEV);
+ }
- list_for_each_entry(connector, connector_list, head) {
- if (drm_connector_has_possible_encoder(connector, encoder))
- return connector;
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return ERR_CAST(connector);
}
- return ERR_PTR(-ENODEV);
+ drm_connector_attach_encoder(connector, encoder);
+
+ return connector;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8c65ef6968ca..9842e04b5858 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -627,6 +627,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_cfgs },
{ .compatible = "qcom,dsi-phy-14nm-660",
.data = &dsi_phy_14nm_660_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-8953",
+ .data = &dsi_phy_14nm_8953_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index b91303ada74f..4c8257581bfc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 5b4e991f220d..7414966f198e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -213,9 +213,7 @@ static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_conf
DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
- div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
-
- dec_start = div_u64(dec_start_multiple, multiplier);
+ dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
pconf->dec_start = (u32)dec_start;
pconf->div_frac_start = div_frac_start;
@@ -1065,3 +1063,24 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.io_start = { 0xc994400, 0xc996000 },
.num_dsi_phy = 2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
+ .has_phy_lane = true,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vcca", 17000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index cb297b08458e..079613d2aaa9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -114,9 +114,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
multiplier = 1 << FRAC_BITS;
dec_multiple = div_u64(pll_freq * multiplier, divider);
- div_u64_rem(dec_multiple, multiplier, &frac);
-
- dec = div_u64(dec_multiple, multiplier);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1))
config->pll_clock_inverters = 0x28;
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index fe1366b4c49f..a68a4a1867c1 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1190,7 +1190,6 @@ void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
return;
if (ctrl->workqueue) {
- flush_workqueue(ctrl->workqueue);
destroy_workqueue(ctrl->workqueue);
ctrl->workqueue = NULL;
}
@@ -1243,8 +1242,6 @@ bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
struct drm_connector *connector, struct edid **edid)
{
- int ret = 0;
-
mutex_lock(&ctrl->dev_mutex);
if (ctrl->edid) {
@@ -1279,7 +1276,7 @@ disable_ret:
}
unlock_ret:
mutex_unlock(&ctrl->dev_mutex);
- return ret;
+ return 0;
}
int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 737453b6e596..75b64e6ae035 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -61,10 +61,8 @@ static void msm_hdmi_destroy(struct hdmi *hdmi)
* at this point, hpd has been disabled,
* after flush workq, it's safe to deinit hdcp
*/
- if (hdmi->workq) {
- flush_workqueue(hdmi->workq);
+ if (hdmi->workq)
destroy_workqueue(hdmi->workq);
- }
msm_hdmi_hdcp_destroy(hdmi);
if (hdmi->phy_dev) {
@@ -154,19 +152,13 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
ret = -ENOMEM;
goto fail;
}
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- struct regulator *reg;
-
- reg = devm_regulator_get(&pdev->dev,
- config->hpd_reg_names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
+ for (i = 0; i < config->hpd_reg_cnt; i++)
+ hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
- hdmi->hpd_regs[i] = reg;
+ ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %d\n", ret);
+ goto fail;
}
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
@@ -177,19 +169,11 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
ret = -ENOMEM;
goto fail;
}
- for (i = 0; i < config->pwr_reg_cnt; i++) {
- struct regulator *reg;
-
- reg = devm_regulator_get(&pdev->dev,
- config->pwr_reg_names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
- config->pwr_reg_names[i], ret);
- goto fail;
- }
- hdmi->pwr_regs[i] = reg;
+ ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %d\n", ret);
+ goto fail;
}
hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0b84f0abee1..82261078c6b1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -56,8 +56,8 @@ struct hdmi {
void __iomem *qfprom_mmio;
phys_addr_t mmio_phy_addr;
- struct regulator **hpd_regs;
- struct regulator **pwr_regs;
+ struct regulator_bulk_data *hpd_regs;
+ struct regulator_bulk_data *pwr_regs;
struct clk **hpd_clks;
struct clk **pwr_clks;
@@ -163,7 +163,7 @@ struct hdmi_phy {
void __iomem *mmio;
struct hdmi_phy_cfg *cfg;
const struct hdmi_phy_funcs *funcs;
- struct regulator **regs;
+ struct regulator_bulk_data *regs;
struct clk **clks;
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 6e380db9287b..f04eb4a70f0d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -28,13 +28,9 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
pm_runtime_get_sync(&hdmi->pdev->dev);
- for (i = 0; i < config->pwr_reg_cnt; i++) {
- ret = regulator_enable(hdmi->pwr_regs[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
- config->pwr_reg_names[i], ret);
- }
- }
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
if (config->pwr_clk_cnt > 0) {
DBG("pixclock: %lu", hdmi->pixclock);
@@ -70,13 +66,9 @@ static void power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_clk_cnt; i++)
clk_disable_unprepare(hdmi->pwr_clks[i]);
- for (i = 0; i < config->pwr_reg_cnt; i++) {
- ret = regulator_disable(hdmi->pwr_regs[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
- config->pwr_reg_names[i], ret);
- }
- }
+ ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
pm_runtime_put_autosuspend(&hdmi->pdev->dev);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 58707a1f3878..a7f729cdec7b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -146,16 +146,13 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector)
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
- int i, ret;
+ int ret;
unsigned long flags;
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
+ ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
+ goto fail;
}
ret = pinctrl_pm_select_default_state(dev);
@@ -207,7 +204,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
- int i, ret = 0;
+ int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
@@ -225,12 +222,9 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
if (ret)
dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_disable(hdmi->hpd_regs[i]);
- if (ret)
- dev_warn(dev, "failed to disable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- }
+ ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret)
+ dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
static void
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 8a38d4b95102..16b0e8836d27 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -23,22 +23,15 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
if (!phy->clks)
return -ENOMEM;
- for (i = 0; i < cfg->num_regs; i++) {
- struct regulator *reg;
-
- reg = devm_regulator_get(dev, cfg->reg_names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER) {
- DRM_DEV_ERROR(dev,
- "failed to get phy regulator: %s (%d)\n",
- cfg->reg_names[i], ret);
- }
+ for (i = 0; i < cfg->num_regs; i++)
+ phy->regs[i].supply = cfg->reg_names[i];
- return ret;
- }
+ ret = devm_regulator_bulk_get(dev, cfg->num_regs, phy->regs);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get phy regulators: %d\n", ret);
- phy->regs[i] = reg;
+ return ret;
}
for (i = 0; i < cfg->num_clks; i++) {
@@ -66,11 +59,10 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
pm_runtime_get_sync(dev);
- for (i = 0; i < cfg->num_regs; i++) {
- ret = regulator_enable(phy->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
- cfg->reg_names[i], ret);
+ ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable regulators: (%d)\n", ret);
+ return ret;
}
for (i = 0; i < cfg->num_clks; i++) {
@@ -92,8 +84,7 @@ void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy)
for (i = cfg->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(phy->clks[i]);
- for (i = cfg->num_regs - 1; i >= 0; i--)
- regulator_disable(phy->regs[i]);
+ regulator_bulk_disable(cfg->num_regs, phy->regs);
pm_runtime_put_sync(dev);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index a8f3b2cbfdc5..99c7853353fd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -682,7 +682,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
return pll_locked;
}
-static struct clk_ops hdmi_8996_pll_ops = {
+static const struct clk_ops hdmi_8996_pll_ops = {
.set_rate = hdmi_8996_pll_set_clk_rate,
.round_rate = hdmi_8996_pll_round_rate,
.recalc_rate = hdmi_8996_pll_recalc_rate,
@@ -695,7 +695,7 @@ static const char * const hdmi_pll_parents[] = {
"xo",
};
-static struct clk_init_data pll_init = {
+static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index fab09e7c6efc..27c9ae563f2f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -116,20 +116,10 @@ out:
trace_msm_atomic_async_commit_finish(crtc_mask);
}
-static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
-{
- struct msm_pending_timer *timer = container_of(t,
- struct msm_pending_timer, timer);
-
- kthread_queue_work(timer->worker, &timer->work);
-
- return HRTIMER_NORESTART;
-}
-
static void msm_atomic_pending_work(struct kthread_work *work)
{
struct msm_pending_timer *timer = container_of(work,
- struct msm_pending_timer, work);
+ struct msm_pending_timer, work.work);
msm_atomic_async_commit(timer->kms, timer->crtc_idx);
}
@@ -139,8 +129,6 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
{
timer->kms = kms;
timer->crtc_idx = crtc_idx;
- hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- timer->timer.function = msm_atomic_pending_timer;
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
@@ -149,7 +137,10 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
return ret;
}
sched_set_fifo(timer->worker->task);
- kthread_init_work(&timer->work, msm_atomic_pending_work);
+
+ msm_hrtimer_work_init(&timer->work, timer->worker,
+ msm_atomic_pending_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
@@ -258,7 +249,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
vsync_time = kms->funcs->vsync_time(kms, async_crtc);
wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
- hrtimer_start(&timer->timer, wakeup_time,
+ msm_hrtimer_queue_work(&timer->work, wakeup_time,
HRTIMER_MODE_ABS);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d4e09703a87d..7936e8d498dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -58,7 +58,7 @@ static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
};
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
-static bool reglog = false;
+static bool reglog;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
module_param(reglog, bool, 0600);
#else
@@ -75,7 +75,7 @@ static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
-bool dumpstate = false;
+bool dumpstate;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
@@ -200,6 +200,35 @@ void msm_rmw(void __iomem *addr, u32 mask, u32 or)
msm_writel(val | or, addr);
}
+static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
+{
+ struct msm_hrtimer_work *work = container_of(t,
+ struct msm_hrtimer_work, timer);
+
+ kthread_queue_work(work->worker, &work->work);
+
+ return HRTIMER_NORESTART;
+}
+
+void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
+ ktime_t wakeup_time,
+ enum hrtimer_mode mode)
+{
+ hrtimer_start(&work->timer, wakeup_time, mode);
+}
+
+void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
+ struct kthread_worker *worker,
+ kthread_work_func_t fn,
+ clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ hrtimer_init(&work->timer, clock_id, mode);
+ work->timer.function = msm_hrtimer_worktimer;
+ work->worker = worker;
+ kthread_init_work(&work->work, fn);
+}
+
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c552f0c3890c..69952b239384 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -60,6 +60,13 @@ enum msm_mdp_plane_property {
PLANE_PROP_MAX_NUM
};
+enum msm_dp_controller {
+ MSM_DP_CONTROLLER_0,
+ MSM_DP_CONTROLLER_1,
+ MSM_DP_CONTROLLER_2,
+ MSM_DP_CONTROLLER_COUNT,
+};
+
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
@@ -153,7 +160,7 @@ struct msm_drm_private {
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
- struct msm_dp *dp;
+ struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -480,6 +487,28 @@ void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
+/**
+ * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
+ *
+ * @timer: hrtimer to control when the kthread work is triggered
+ * @work: the kthread work
+ * @worker: the kthread worker the work will be scheduled on
+ */
+struct msm_hrtimer_work {
+ struct hrtimer timer;
+ struct kthread_work work;
+ struct kthread_worker *worker;
+};
+
+void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
+ ktime_t wakeup_time,
+ enum hrtimer_mode mode);
+void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
+ struct kthread_worker *worker,
+ kthread_work_func_t fn,
+ clockid_t clock_id,
+ enum hrtimer_mode mode);
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 22308a1b66fc..104fdfc14027 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -5,6 +5,7 @@
*/
#include <linux/dma-map-ops.h>
+#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
@@ -85,7 +86,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
paddr = physaddr(obj);
for (i = 0; i < npages; i++) {
- p[i] = phys_to_page(paddr);
+ p[i] = pfn_to_page(__phys_to_pfn(paddr));
paddr += PAGE_SIZE;
}
@@ -1132,6 +1133,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
+ INIT_LIST_HEAD(&msm_obj->node);
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
@@ -1166,7 +1168,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
@@ -1250,7 +1252,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_gem_private_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index e39a8e7ad843..54ca0817d807 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -309,11 +309,6 @@ struct msm_gem_submit {
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
- /* Array of struct dma_fence * to block on before submitting this job.
- */
- struct xarray deps;
- unsigned long last_dep;
-
/* Hw fence, which is created when the scheduler executes the job, and
* is signaled when the hw finishes (via seqno write from cmdstream)
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 0f1b29ee04a9..4a1420b05e97 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/vmalloc.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 151d19e4453c..3cb029f10925 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
-
kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
@@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
{
struct msm_gem_submit *submit =
container_of(kref, struct msm_gem_submit, ref);
- unsigned long index;
- struct dma_fence *fence;
unsigned i;
if (submit->fence_id) {
@@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
mutex_unlock(&submit->queue->lock);
}
- xa_for_each (&submit->deps, index, fence) {
- dma_fence_put(fence);
- }
-
- xa_destroy(&submit->deps);
-
dma_fence_put(submit->user_fence);
dma_fence_put(submit->hw_fence);
@@ -341,11 +331,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
return ret;
}
- if (no_implicit)
+ /* exclusive fences must be ordered */
+ if (no_implicit && !write)
continue;
- ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
- write);
+ ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+ obj,
+ write);
if (ret)
break;
}
@@ -589,7 +581,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (ret)
break;
- ret = drm_gem_fence_array_add(&submit->deps, fence);
+ ret = drm_sched_job_add_dependency(&submit->base, fence);
if (ret)
break;
@@ -799,7 +791,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = drm_gem_fence_array_add(&submit->deps, in_fence);
+ ret = drm_sched_job_add_dependency(&submit->base, in_fence);
if (ret)
goto out_unlock;
}
@@ -879,6 +871,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ drm_sched_job_arm(&submit->base);
+
submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
/*
@@ -890,17 +884,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit->fence_id < 0) {
ret = submit->fence_id = 0;
submit->fence_id = 0;
- goto out;
}
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
struct sync_file *sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto out;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
}
submit_attach_object_fences(submit);
@@ -908,7 +901,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
- drm_sched_entity_push_job(&submit->base, queue->entity);
+ drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8a3a592da3a4..2c46cd968ac4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -296,7 +296,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- for (i = 0; i < submit->nr_bos; i++) {
+ for (i = 0; state->bos && i < submit->nr_bos; i++) {
if (should_dump(submit, i)) {
msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
submit->bos[i].iova, submit->bos[i].flags);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 030f82f149c2..59cdd00b69d0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -112,6 +112,13 @@ struct msm_gpu_devfreq {
* it is inactive.
*/
unsigned long idle_freq;
+
+ /**
+ * idle_work:
+ *
+ * Used to delay clamping to idle freq on active->idle transition.
+ */
+ struct msm_hrtimer_work idle_work;
};
struct msm_gpu {
@@ -203,6 +210,10 @@ struct msm_gpu {
uint32_t suspend_count;
struct msm_gpu_state *crashstate;
+
+ /* Enable clamping to idle freq when inactive: */
+ bool clamp_to_idle;
+
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 84e98c07c900..8b7473f69cb8 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -88,8 +88,12 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
.get_cur_freq = msm_devfreq_get_cur_freq,
};
+static void msm_devfreq_idle_work(struct kthread_work *work);
+
void msm_devfreq_init(struct msm_gpu *gpu)
{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy)
return;
@@ -105,25 +109,27 @@ void msm_devfreq_init(struct msm_gpu *gpu)
msm_devfreq_profile.freq_table = NULL;
msm_devfreq_profile.max_state = 0;
- gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
NULL);
- if (IS_ERR(gpu->devfreq.devfreq)) {
+ if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
- gpu->devfreq.devfreq = NULL;
+ df->devfreq = NULL;
return;
}
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ devfreq_suspend_device(df->devfreq);
- gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
- gpu->devfreq.devfreq);
+ gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq);
if (IS_ERR(gpu->cooling)) {
DRM_DEV_ERROR(&gpu->pdev->dev,
"Couldn't register GPU cooling device\n");
gpu->cooling = NULL;
}
+
+ msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
void msm_devfreq_cleanup(struct msm_gpu *gpu)
@@ -155,6 +161,11 @@ void msm_devfreq_active(struct msm_gpu *gpu)
return;
/*
+ * Cancel any pending transition to idle frequency:
+ */
+ hrtimer_cancel(&df->idle_work.timer);
+
+ /*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
*/
@@ -184,9 +195,12 @@ void msm_devfreq_active(struct msm_gpu *gpu)
mutex_unlock(&df->devfreq->lock);
}
-void msm_devfreq_idle(struct msm_gpu *gpu)
+
+static void msm_devfreq_idle_work(struct kthread_work *work)
{
- struct msm_gpu_devfreq *df = &gpu->devfreq;
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, idle_work.work);
+ struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
unsigned long idle_freq, target_freq = 0;
if (!df->devfreq)
@@ -200,10 +214,19 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
idle_freq = get_freq(gpu);
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ if (gpu->clamp_to_idle)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
df->idle_freq = idle_freq;
mutex_unlock(&df->devfreq->lock);
}
+
+void msm_devfreq_idle(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
+ HRTIMER_MODE_ABS);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index de2bc3467bb5..6a42b819abc4 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -136,8 +136,7 @@ struct msm_kms;
* shortly before vblank to flush pending async updates.
*/
struct msm_pending_timer {
- struct hrtimer timer;
- struct kthread_work work;
+ struct msm_hrtimer_work work;
struct kthread_worker *worker;
struct msm_kms *kms;
unsigned crtc_idx;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bd54c1412649..652b1dedd7c1 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
module_param(num_hw_submissions, uint, 0600);
-static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *s_entity)
-{
- struct msm_gem_submit *submit = to_msm_submit(job);
-
- if (!xa_empty(&submit->deps))
- return xa_erase(&submit->deps, submit->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
@@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
}
const struct drm_sched_backend_ops msm_sched_ops = {
- .dependency = msm_job_dependency,
.run_job = msm_job_run,
.free_job = msm_job_free
};
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index b8621c6e0554..7cb158bcbcf6 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -101,6 +101,7 @@ get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
if (ret) {
+ mutex_unlock(&entity_lock);
kfree(entity);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index ec0432fe1bdf..86d78634a979 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
struct mxsfb_drm_private *mxsfb = drm->dev_private;
mxsfb_enable_axi_clk(mxsfb);
- mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+
+ /* Disable and clear VBLANK IRQ */
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
mxsfb_disable_axi_clk(mxsfb);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index d7b9f7f8c9e3..8e28403ea9b1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1414,7 +1414,7 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
int ret;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
- ret = drm_dp_update_payload_part1(&mstm->mgr);
+ ret = drm_dp_update_payload_part1(&mstm->mgr, 1);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7c15f6448428..6140db756d06 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -364,7 +364,6 @@ void *
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
struct acpi_device *acpidev;
- acpi_handle handle;
int type, ret;
void *edid;
@@ -377,12 +376,8 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = ACPI_HANDLE(dev->dev);
- if (!handle)
- return NULL;
-
- ret = acpi_bus_get_device(handle, &acpidev);
- if (ret)
+ acpidev = ACPI_COMPANION(dev->dev);
+ if (!acpidev)
return NULL;
ret = acpi_video_get_edid(acpidev, type, -1, &edid);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c58bcdba2c7a..12b107acb6ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1250,7 +1250,7 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm))
return 0;
@@ -1273,11 +1273,13 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct nouveau_drm *drm;
struct device *dev;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave)
return;
+ nouveau_ttm_tt_unbind(bdev, ttm);
+
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
@@ -1291,8 +1293,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev,
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
- ttm_agp_unbind(ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 256ec5b35473..85c03c83259b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
- nouveau_sgdma_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index b0c3422cb01f..1a896a24288a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -992,7 +992,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index e7281da5bc6a..455e1a91f0e5 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_OMAP
tristate "OMAP DRM"
- depends on DRM
+ depends on DRM && OF
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
- select OMAP2_DSS
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select HDMI
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 5f1722b040f4..503b5d4bf2c2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -2094,7 +2094,7 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
u8 b1, b2, b3, b4;
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len);
+ DSSDBG("dsi_vc_send_long, %zu bytes\n", msg->tx_len);
/* len + header */
if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
@@ -2390,7 +2390,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
return 0;
err:
- DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len);
+ DSSERR("%s(vc %d, reqlen %zu) failed\n", __func__, vc, msg->tx_len);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index f86e20578143..c05d3975cb31 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -572,7 +572,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
priv->dss->mgr_ops_priv = priv;
soc = soc_device_match(omapdrm_soc_devices);
- priv->omaprev = soc ? (unsigned int)soc->data : 0;
+ priv->omaprev = soc ? (uintptr_t)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
mutex_init(&priv->list_lock);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index f4cde3a169d8..809f86cfc540 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -11,6 +11,8 @@
#include "omap_drv.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
/* -----------------------------------------------------------------------------
* DMABUF Export
*/
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 418638e6e3b0..2cb8eba76af8 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -77,14 +77,26 @@ config DRM_PANEL_LVDS
backlight handling if the panel is attached to a backlight controller.
config DRM_PANEL_SIMPLE
- tristate "support for simple panels"
+ tristate "support for simple panels (other than eDP ones)"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ help
+ DRM panel driver for dumb non-eDP panels that need at most a regulator
+ and a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_EDP
+ tristate "support for simple Embedded DisplayPort panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
depends on PM
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
help
- DRM panel driver for dumb panels that need at most a regulator and
+ DRM panel driver for dumb eDP panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
that it can be automatically turned off when the panel goes into a
low power state.
@@ -393,6 +405,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0
depends on DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6D27A1
+ tristate "Samsung S6D27A1 DPI panel driver"
+ depends on OF && SPI && GPIOLIB
+ select DRM_MIPI_DBI
+ help
+ Say Y here if you want to enable support for the Samsung
+ S6D27A1 DPI 480x800 panel.
+
+ This panel can be found in Samsung Galaxy Ace 2
+ GT-I8160 mobile phone.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index c8132050bcec..6e30640b9099 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_EDP) += panel-edp.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index db9d0b86d542..529561b4fbbc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -45,6 +45,7 @@ struct boe_panel {
const struct panel_desc *desc;
enum drm_panel_orientation orientation;
+ struct regulator *pp3300;
struct regulator *pp1800;
struct regulator *avee;
struct regulator *avdd;
@@ -74,6 +75,670 @@ struct panel_init_cmd {
.len = sizeof((char[]){__VA_ARGS__}), \
.data = (char[]){__VA_ARGS__} }
+static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0xD9),
+ _INIT_DCS_CMD(0x07, 0x78),
+ _INIT_DCS_CMD(0x08, 0x5A),
+ _INIT_DCS_CMD(0x0D, 0x63),
+ _INIT_DCS_CMD(0x0E, 0x91),
+ _INIT_DCS_CMD(0x0F, 0x73),
+ _INIT_DCS_CMD(0x95, 0xEB),
+ _INIT_DCS_CMD(0x96, 0xEB),
+ _INIT_DCS_CMD(0x30, 0x11),
+ _INIT_DCS_CMD(0x6D, 0x66),
+ _INIT_DCS_CMD(0x75, 0xA2),
+ _INIT_DCS_CMD(0x77, 0x3B),
+
+ _INIT_DCS_CMD(0xB0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB1, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xB2, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+
+ _INIT_DCS_CMD(0xB4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB5, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xB6, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+ _INIT_DCS_CMD(0xB8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB9, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xBA, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+
+ _INIT_DCS_CMD(0xFF, 0x21),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0x00),
+ _INIT_DCS_CMD(0x01, 0x00),
+
+ _INIT_DCS_CMD(0x02, 0x1C),
+ _INIT_DCS_CMD(0x03, 0x1C),
+
+ _INIT_DCS_CMD(0x04, 0x1D),
+ _INIT_DCS_CMD(0x05, 0x1D),
+
+ _INIT_DCS_CMD(0x06, 0x04),
+ _INIT_DCS_CMD(0x07, 0x04),
+
+ _INIT_DCS_CMD(0x08, 0x0F),
+ _INIT_DCS_CMD(0x09, 0x0F),
+
+ _INIT_DCS_CMD(0x0A, 0x0E),
+ _INIT_DCS_CMD(0x0B, 0x0E),
+
+ _INIT_DCS_CMD(0x0C, 0x0D),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+
+ _INIT_DCS_CMD(0x0E, 0x0C),
+ _INIT_DCS_CMD(0x0F, 0x0C),
+
+ _INIT_DCS_CMD(0x10, 0x08),
+ _INIT_DCS_CMD(0x11, 0x08),
+
+ _INIT_DCS_CMD(0x12, 0x00),
+ _INIT_DCS_CMD(0x13, 0x00),
+ _INIT_DCS_CMD(0x14, 0x00),
+ _INIT_DCS_CMD(0x15, 0x00),
+
+ _INIT_DCS_CMD(0x16, 0x00),
+ _INIT_DCS_CMD(0x17, 0x00),
+
+ _INIT_DCS_CMD(0x18, 0x1C),
+ _INIT_DCS_CMD(0x19, 0x1C),
+
+ _INIT_DCS_CMD(0x1A, 0x1D),
+ _INIT_DCS_CMD(0x1B, 0x1D),
+
+ _INIT_DCS_CMD(0x1C, 0x04),
+ _INIT_DCS_CMD(0x1D, 0x04),
+
+ _INIT_DCS_CMD(0x1E, 0x0F),
+ _INIT_DCS_CMD(0x1F, 0x0F),
+
+ _INIT_DCS_CMD(0x20, 0x0E),
+ _INIT_DCS_CMD(0x21, 0x0E),
+
+ _INIT_DCS_CMD(0x22, 0x0D),
+ _INIT_DCS_CMD(0x23, 0x0D),
+
+ _INIT_DCS_CMD(0x24, 0x0C),
+ _INIT_DCS_CMD(0x25, 0x0C),
+
+ _INIT_DCS_CMD(0x26, 0x08),
+ _INIT_DCS_CMD(0x27, 0x08),
+
+ _INIT_DCS_CMD(0x28, 0x00),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x00),
+ _INIT_DCS_CMD(0x2B, 0x00),
+
+ _INIT_DCS_CMD(0x2D, 0x20),
+ _INIT_DCS_CMD(0x2F, 0x0A),
+ _INIT_DCS_CMD(0x30, 0x44),
+ _INIT_DCS_CMD(0x33, 0x0C),
+ _INIT_DCS_CMD(0x34, 0x32),
+
+ _INIT_DCS_CMD(0x37, 0x44),
+ _INIT_DCS_CMD(0x38, 0x40),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x5D),
+ _INIT_DCS_CMD(0x3B, 0x60),
+ _INIT_DCS_CMD(0x3D, 0x42),
+ _INIT_DCS_CMD(0x3F, 0x06),
+ _INIT_DCS_CMD(0x43, 0x06),
+ _INIT_DCS_CMD(0x47, 0x66),
+ _INIT_DCS_CMD(0x4A, 0x5D),
+ _INIT_DCS_CMD(0x4B, 0x60),
+ _INIT_DCS_CMD(0x4C, 0x91),
+ _INIT_DCS_CMD(0x4D, 0x21),
+ _INIT_DCS_CMD(0x4E, 0x43),
+ _INIT_DCS_CMD(0x51, 0x12),
+ _INIT_DCS_CMD(0x52, 0x34),
+ _INIT_DCS_CMD(0x55, 0x82, 0x02),
+ _INIT_DCS_CMD(0x56, 0x04),
+ _INIT_DCS_CMD(0x58, 0x21),
+ _INIT_DCS_CMD(0x59, 0x30),
+ _INIT_DCS_CMD(0x5A, 0x60),
+ _INIT_DCS_CMD(0x5B, 0x50),
+ _INIT_DCS_CMD(0x5E, 0x00, 0x06),
+ _INIT_DCS_CMD(0x5F, 0x00),
+ _INIT_DCS_CMD(0x65, 0x82),
+ _INIT_DCS_CMD(0x7E, 0x20),
+ _INIT_DCS_CMD(0x7F, 0x3C),
+ _INIT_DCS_CMD(0x82, 0x04),
+ _INIT_DCS_CMD(0x97, 0xC0),
+ _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
+ _INIT_DCS_CMD(0x91, 0x44),
+ _INIT_DCS_CMD(0x92, 0xA9),
+ _INIT_DCS_CMD(0x93, 0x1A),
+ _INIT_DCS_CMD(0x94, 0x96),
+ _INIT_DCS_CMD(0xD7, 0x55),
+ _INIT_DCS_CMD(0xDA, 0x0A),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDB, 0x05),
+ _INIT_DCS_CMD(0xDC, 0xA9),
+ _INIT_DCS_CMD(0xDD, 0x22),
+
+ _INIT_DCS_CMD(0xDF, 0x05),
+ _INIT_DCS_CMD(0xE0, 0xA9),
+ _INIT_DCS_CMD(0xE1, 0x05),
+ _INIT_DCS_CMD(0xE2, 0xA9),
+ _INIT_DCS_CMD(0xE3, 0x05),
+ _INIT_DCS_CMD(0xE4, 0xA9),
+ _INIT_DCS_CMD(0xE5, 0x05),
+ _INIT_DCS_CMD(0xE6, 0xA9),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x8D, 0x00),
+ _INIT_DCS_CMD(0x8E, 0x00),
+ _INIT_DCS_CMD(0xB5, 0x90),
+ _INIT_DCS_CMD(0xFF, 0x25),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0x00),
+ _INIT_DCS_CMD(0x19, 0x07),
+ _INIT_DCS_CMD(0x1F, 0x60),
+ _INIT_DCS_CMD(0x20, 0x50),
+ _INIT_DCS_CMD(0x26, 0x60),
+ _INIT_DCS_CMD(0x27, 0x50),
+ _INIT_DCS_CMD(0x33, 0x60),
+ _INIT_DCS_CMD(0x34, 0x50),
+ _INIT_DCS_CMD(0x3F, 0xE0),
+ _INIT_DCS_CMD(0x40, 0x00),
+ _INIT_DCS_CMD(0x44, 0x00),
+ _INIT_DCS_CMD(0x45, 0x40),
+ _INIT_DCS_CMD(0x48, 0x60),
+ _INIT_DCS_CMD(0x49, 0x50),
+ _INIT_DCS_CMD(0x5B, 0x00),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0xD0),
+ _INIT_DCS_CMD(0x61, 0x60),
+ _INIT_DCS_CMD(0x62, 0x50),
+ _INIT_DCS_CMD(0xF1, 0x10),
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x64, 0x16),
+ _INIT_DCS_CMD(0x67, 0x16),
+ _INIT_DCS_CMD(0x6A, 0x16),
+
+ _INIT_DCS_CMD(0x70, 0x30),
+
+ _INIT_DCS_CMD(0xA2, 0xF3),
+ _INIT_DCS_CMD(0xA3, 0xFF),
+ _INIT_DCS_CMD(0xA4, 0xFF),
+ _INIT_DCS_CMD(0xA5, 0xFF),
+
+ _INIT_DCS_CMD(0xD6, 0x08),
+
+ _INIT_DCS_CMD(0xFF, 0x26),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0xA1),
+
+ _INIT_DCS_CMD(0x02, 0x31),
+ _INIT_DCS_CMD(0x04, 0x28),
+ _INIT_DCS_CMD(0x06, 0x30),
+ _INIT_DCS_CMD(0x0C, 0x16),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+ _INIT_DCS_CMD(0x0F, 0x00),
+ _INIT_DCS_CMD(0x11, 0x00),
+ _INIT_DCS_CMD(0x12, 0x50),
+ _INIT_DCS_CMD(0x13, 0x56),
+ _INIT_DCS_CMD(0x14, 0x57),
+ _INIT_DCS_CMD(0x15, 0x00),
+ _INIT_DCS_CMD(0x16, 0x10),
+ _INIT_DCS_CMD(0x17, 0xA0),
+ _INIT_DCS_CMD(0x18, 0x86),
+ _INIT_DCS_CMD(0x19, 0x0D),
+ _INIT_DCS_CMD(0x1A, 0x7F),
+ _INIT_DCS_CMD(0x1B, 0x0C),
+ _INIT_DCS_CMD(0x1C, 0xBF),
+ _INIT_DCS_CMD(0x22, 0x00),
+ _INIT_DCS_CMD(0x23, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x0D),
+ _INIT_DCS_CMD(0x2B, 0x7F),
+
+ _INIT_DCS_CMD(0x1D, 0x00),
+ _INIT_DCS_CMD(0x1E, 0x65),
+ _INIT_DCS_CMD(0x1F, 0x65),
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x2F, 0x05),
+ _INIT_DCS_CMD(0x30, 0x65),
+ _INIT_DCS_CMD(0x31, 0x05),
+ _INIT_DCS_CMD(0x32, 0x7D),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x65),
+ _INIT_DCS_CMD(0x20, 0x01),
+ _INIT_DCS_CMD(0x33, 0x11),
+ _INIT_DCS_CMD(0x34, 0x78),
+ _INIT_DCS_CMD(0x35, 0x16),
+ _INIT_DCS_CMD(0xC8, 0x04),
+ _INIT_DCS_CMD(0xC9, 0x80),
+ _INIT_DCS_CMD(0xCA, 0x4E),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xA9, 0x4C),
+ _INIT_DCS_CMD(0xAA, 0x47),
+
+ _INIT_DCS_CMD(0xFF, 0x27),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x56, 0x06),
+ _INIT_DCS_CMD(0x58, 0x80),
+ _INIT_DCS_CMD(0x59, 0x75),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x02),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x20),
+ _INIT_DCS_CMD(0x5F, 0x10),
+ _INIT_DCS_CMD(0x60, 0x00),
+ _INIT_DCS_CMD(0x61, 0x2E),
+ _INIT_DCS_CMD(0x62, 0x00),
+ _INIT_DCS_CMD(0x63, 0x01),
+ _INIT_DCS_CMD(0x64, 0x43),
+ _INIT_DCS_CMD(0x65, 0x2D),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x67, 0x01),
+ _INIT_DCS_CMD(0x68, 0x44),
+
+ _INIT_DCS_CMD(0x00, 0x00),
+ _INIT_DCS_CMD(0x78, 0x00),
+ _INIT_DCS_CMD(0xC3, 0x00),
+
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x22, 0x2F),
+ _INIT_DCS_CMD(0x23, 0x08),
+
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x26, 0xF8),
+ _INIT_DCS_CMD(0x27, 0x00),
+ _INIT_DCS_CMD(0x28, 0x1A),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x1A),
+ _INIT_DCS_CMD(0x2B, 0x00),
+ _INIT_DCS_CMD(0x2D, 0x1A),
+
+ _INIT_DCS_CMD(0xFF, 0x23),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0x80),
+ _INIT_DCS_CMD(0x07, 0x00),
+
+ _INIT_DCS_CMD(0xFF, 0xE0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x14, 0x60),
+ _INIT_DCS_CMD(0x16, 0xC0),
+
+ _INIT_DCS_CMD(0xFF, 0xF0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x3A, 0x08),
+
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xB9, 0x01),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x18, 0x40),
+
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xB9, 0x02),
+ _INIT_DCS_CMD(0x35, 0x00),
+ _INIT_DCS_CMD(0x51, 0x00, 0xFF),
+ _INIT_DCS_CMD(0x53, 0x24),
+ _INIT_DCS_CMD(0x55, 0x00),
+ _INIT_DCS_CMD(0xBB, 0x13),
+ _INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
+ _INIT_DELAY_CMD(100),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(200),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(100),
+ {},
+};
+
+static const struct panel_init_cmd inx_init_cmd[] = {
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0xD1),
+ _INIT_DCS_CMD(0x0D, 0x63),
+ _INIT_DCS_CMD(0x07, 0x8C),
+ _INIT_DCS_CMD(0x08, 0x4B),
+ _INIT_DCS_CMD(0x0E, 0x91),
+ _INIT_DCS_CMD(0x0F, 0x69),
+ _INIT_DCS_CMD(0x95, 0xFF),
+ _INIT_DCS_CMD(0x96, 0xFF),
+ _INIT_DCS_CMD(0x9D, 0x0A),
+ _INIT_DCS_CMD(0x9E, 0x0A),
+ _INIT_DCS_CMD(0x69, 0x98),
+ _INIT_DCS_CMD(0x75, 0xA2),
+ _INIT_DCS_CMD(0x77, 0xB3),
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x91, 0x44),
+ _INIT_DCS_CMD(0x92, 0x7A),
+ _INIT_DCS_CMD(0x93, 0x1A),
+ _INIT_DCS_CMD(0x94, 0x40),
+ _INIT_DCS_CMD(0x9A, 0x08),
+ _INIT_DCS_CMD(0x60, 0x96),
+ _INIT_DCS_CMD(0x61, 0xD0),
+ _INIT_DCS_CMD(0x63, 0x70),
+ _INIT_DCS_CMD(0xC2, 0xCF),
+ _INIT_DCS_CMD(0x9B, 0x0F),
+ _INIT_DCS_CMD(0x9A, 0x08),
+ _INIT_DCS_CMD(0x00, 0x03),
+ _INIT_DCS_CMD(0x01, 0x03),
+ _INIT_DCS_CMD(0x02, 0x03),
+ _INIT_DCS_CMD(0x03, 0x03),
+ _INIT_DCS_CMD(0x04, 0x03),
+ _INIT_DCS_CMD(0x05, 0x03),
+ _INIT_DCS_CMD(0x06, 0x22),
+ _INIT_DCS_CMD(0x07, 0x06),
+ _INIT_DCS_CMD(0x08, 0x00),
+ _INIT_DCS_CMD(0x09, 0x1D),
+ _INIT_DCS_CMD(0x0A, 0x1C),
+ _INIT_DCS_CMD(0x0B, 0x13),
+ _INIT_DCS_CMD(0x0C, 0x12),
+ _INIT_DCS_CMD(0x0D, 0x11),
+ _INIT_DCS_CMD(0x0E, 0x10),
+ _INIT_DCS_CMD(0x0F, 0x0F),
+ _INIT_DCS_CMD(0x10, 0x0E),
+ _INIT_DCS_CMD(0x11, 0x0D),
+ _INIT_DCS_CMD(0x12, 0x0C),
+ _INIT_DCS_CMD(0x13, 0x04),
+ _INIT_DCS_CMD(0x14, 0x03),
+ _INIT_DCS_CMD(0x15, 0x03),
+ _INIT_DCS_CMD(0x16, 0x03),
+ _INIT_DCS_CMD(0x17, 0x03),
+ _INIT_DCS_CMD(0x18, 0x03),
+ _INIT_DCS_CMD(0x19, 0x03),
+ _INIT_DCS_CMD(0x1A, 0x03),
+ _INIT_DCS_CMD(0x1B, 0x03),
+ _INIT_DCS_CMD(0x1C, 0x22),
+ _INIT_DCS_CMD(0x1D, 0x06),
+ _INIT_DCS_CMD(0x1E, 0x00),
+ _INIT_DCS_CMD(0x1F, 0x1D),
+ _INIT_DCS_CMD(0x20, 0x1C),
+ _INIT_DCS_CMD(0x21, 0x13),
+ _INIT_DCS_CMD(0x22, 0x12),
+ _INIT_DCS_CMD(0x23, 0x11),
+ _INIT_DCS_CMD(0x24, 0x10),
+ _INIT_DCS_CMD(0x25, 0x0F),
+ _INIT_DCS_CMD(0x26, 0x0E),
+ _INIT_DCS_CMD(0x27, 0x0D),
+ _INIT_DCS_CMD(0x28, 0x0C),
+ _INIT_DCS_CMD(0x29, 0x04),
+ _INIT_DCS_CMD(0x2A, 0x03),
+ _INIT_DCS_CMD(0x2B, 0x03),
+
+ _INIT_DCS_CMD(0x2F, 0x06),
+ _INIT_DCS_CMD(0x30, 0x32),
+ _INIT_DCS_CMD(0x31, 0x43),
+ _INIT_DCS_CMD(0x33, 0x06),
+ _INIT_DCS_CMD(0x34, 0x32),
+ _INIT_DCS_CMD(0x35, 0x43),
+ _INIT_DCS_CMD(0x37, 0x44),
+ _INIT_DCS_CMD(0x38, 0x40),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x01),
+ _INIT_DCS_CMD(0x3B, 0x48),
+ _INIT_DCS_CMD(0x3D, 0x93),
+ _INIT_DCS_CMD(0xAB, 0x44),
+ _INIT_DCS_CMD(0xAC, 0x40),
+
+ _INIT_DCS_CMD(0x4D, 0x21),
+ _INIT_DCS_CMD(0x4E, 0x43),
+ _INIT_DCS_CMD(0x4F, 0x65),
+ _INIT_DCS_CMD(0x50, 0x87),
+ _INIT_DCS_CMD(0x51, 0x78),
+ _INIT_DCS_CMD(0x52, 0x56),
+ _INIT_DCS_CMD(0x53, 0x34),
+ _INIT_DCS_CMD(0x54, 0x21),
+ _INIT_DCS_CMD(0x55, 0x83),
+ _INIT_DCS_CMD(0x56, 0x08),
+ _INIT_DCS_CMD(0x58, 0x21),
+ _INIT_DCS_CMD(0x59, 0x40),
+ _INIT_DCS_CMD(0x5A, 0x09),
+ _INIT_DCS_CMD(0x5B, 0x48),
+ _INIT_DCS_CMD(0x5E, 0x00, 0x10),
+ _INIT_DCS_CMD(0x5F, 0x00),
+
+ _INIT_DCS_CMD(0x7A, 0x00),
+ _INIT_DCS_CMD(0x7B, 0x00),
+ _INIT_DCS_CMD(0x7C, 0x00),
+ _INIT_DCS_CMD(0x7D, 0x00),
+ _INIT_DCS_CMD(0x7E, 0x20),
+ _INIT_DCS_CMD(0x7F, 0x3C),
+ _INIT_DCS_CMD(0x80, 0x00),
+ _INIT_DCS_CMD(0x81, 0x00),
+ _INIT_DCS_CMD(0x82, 0x08),
+ _INIT_DCS_CMD(0x97, 0x02),
+ _INIT_DCS_CMD(0xC5, 0x10),
+ _INIT_DCS_CMD(0xDA, 0x05),
+ _INIT_DCS_CMD(0xDB, 0x01),
+ _INIT_DCS_CMD(0xDC, 0x7A),
+ _INIT_DCS_CMD(0xDD, 0x55),
+ _INIT_DCS_CMD(0xDE, 0x27),
+ _INIT_DCS_CMD(0xDF, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x7A),
+ _INIT_DCS_CMD(0xE1, 0x01),
+ _INIT_DCS_CMD(0xE2, 0x7A),
+ _INIT_DCS_CMD(0xE3, 0x01),
+ _INIT_DCS_CMD(0xE4, 0x7A),
+ _INIT_DCS_CMD(0xE5, 0x01),
+ _INIT_DCS_CMD(0xE6, 0x7A),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0xE9, 0x01),
+ _INIT_DCS_CMD(0xEA, 0x7A),
+ _INIT_DCS_CMD(0xEB, 0x01),
+ _INIT_DCS_CMD(0xEE, 0x7A),
+ _INIT_DCS_CMD(0xEF, 0x01),
+ _INIT_DCS_CMD(0xF0, 0x7A),
+
+ _INIT_DCS_CMD(0xFF, 0x25),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x05, 0x00),
+
+ _INIT_DCS_CMD(0xF1, 0x10),
+ _INIT_DCS_CMD(0x1E, 0x00),
+ _INIT_DCS_CMD(0x1F, 0x09),
+ _INIT_DCS_CMD(0x20, 0x46),
+ _INIT_DCS_CMD(0x25, 0x00),
+ _INIT_DCS_CMD(0x26, 0x09),
+ _INIT_DCS_CMD(0x27, 0x46),
+ _INIT_DCS_CMD(0x3F, 0x80),
+ _INIT_DCS_CMD(0x40, 0x00),
+ _INIT_DCS_CMD(0x43, 0x00),
+
+ _INIT_DCS_CMD(0x44, 0x09),
+ _INIT_DCS_CMD(0x45, 0x46),
+
+ _INIT_DCS_CMD(0x48, 0x09),
+ _INIT_DCS_CMD(0x49, 0x46),
+ _INIT_DCS_CMD(0x5B, 0x80),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x01),
+ _INIT_DCS_CMD(0x5E, 0x46),
+ _INIT_DCS_CMD(0x61, 0x01),
+ _INIT_DCS_CMD(0x62, 0x46),
+ _INIT_DCS_CMD(0x68, 0x10),
+ _INIT_DCS_CMD(0xFF, 0x26),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0xA1),
+ _INIT_DCS_CMD(0x02, 0x31),
+ _INIT_DCS_CMD(0x0A, 0xF2),
+ _INIT_DCS_CMD(0x04, 0x28),
+ _INIT_DCS_CMD(0x06, 0x30),
+ _INIT_DCS_CMD(0x0C, 0x16),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+ _INIT_DCS_CMD(0x0F, 0x00),
+ _INIT_DCS_CMD(0x11, 0x00),
+ _INIT_DCS_CMD(0x12, 0x50),
+ _INIT_DCS_CMD(0x13, 0x56),
+ _INIT_DCS_CMD(0x14, 0x57),
+ _INIT_DCS_CMD(0x15, 0x00),
+ _INIT_DCS_CMD(0x16, 0x10),
+ _INIT_DCS_CMD(0x17, 0xA0),
+ _INIT_DCS_CMD(0x18, 0x86),
+ _INIT_DCS_CMD(0x22, 0x00),
+ _INIT_DCS_CMD(0x23, 0x00),
+ _INIT_DCS_CMD(0x19, 0x0D),
+ _INIT_DCS_CMD(0x1A, 0x7F),
+ _INIT_DCS_CMD(0x1B, 0x0C),
+ _INIT_DCS_CMD(0x1C, 0xBF),
+ _INIT_DCS_CMD(0x2A, 0x0D),
+ _INIT_DCS_CMD(0x2B, 0x7F),
+ _INIT_DCS_CMD(0x20, 0x00),
+
+ _INIT_DCS_CMD(0x1D, 0x00),
+ _INIT_DCS_CMD(0x1E, 0x78),
+ _INIT_DCS_CMD(0x1F, 0x78),
+
+ _INIT_DCS_CMD(0x2F, 0x03),
+ _INIT_DCS_CMD(0x30, 0x78),
+ _INIT_DCS_CMD(0x33, 0x78),
+ _INIT_DCS_CMD(0x34, 0x66),
+ _INIT_DCS_CMD(0x35, 0x11),
+
+ _INIT_DCS_CMD(0x39, 0x10),
+ _INIT_DCS_CMD(0x3A, 0x78),
+ _INIT_DCS_CMD(0x3B, 0x06),
+
+ _INIT_DCS_CMD(0xC8, 0x04),
+ _INIT_DCS_CMD(0xC9, 0x84),
+ _INIT_DCS_CMD(0xCA, 0x4E),
+ _INIT_DCS_CMD(0xCB, 0x00),
+
+ _INIT_DCS_CMD(0xA9, 0x50),
+ _INIT_DCS_CMD(0xAA, 0x4F),
+ _INIT_DCS_CMD(0xAB, 0x4D),
+ _INIT_DCS_CMD(0xAC, 0x4A),
+ _INIT_DCS_CMD(0xAD, 0x48),
+ _INIT_DCS_CMD(0xAE, 0x46),
+ _INIT_DCS_CMD(0xFF, 0x27),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x18),
+ _INIT_DCS_CMD(0xC1, 0x00),
+ _INIT_DCS_CMD(0xC2, 0x00),
+ _INIT_DCS_CMD(0x56, 0x06),
+ _INIT_DCS_CMD(0x58, 0x80),
+ _INIT_DCS_CMD(0x59, 0x75),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x02),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x20),
+ _INIT_DCS_CMD(0x5F, 0x10),
+ _INIT_DCS_CMD(0x60, 0x00),
+ _INIT_DCS_CMD(0x61, 0x2E),
+ _INIT_DCS_CMD(0x62, 0x00),
+ _INIT_DCS_CMD(0x63, 0x01),
+ _INIT_DCS_CMD(0x64, 0x43),
+ _INIT_DCS_CMD(0x65, 0x2D),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x67, 0x01),
+ _INIT_DCS_CMD(0x68, 0x43),
+ _INIT_DCS_CMD(0x98, 0x01),
+ _INIT_DCS_CMD(0xB4, 0x03),
+ _INIT_DCS_CMD(0x9B, 0xBD),
+ _INIT_DCS_CMD(0xA0, 0x90),
+ _INIT_DCS_CMD(0xAB, 0x1B),
+ _INIT_DCS_CMD(0xBC, 0x0C),
+ _INIT_DCS_CMD(0xBD, 0x28),
+
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x22, 0x2F),
+ _INIT_DCS_CMD(0x23, 0x08),
+
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x26, 0xF8),
+ _INIT_DCS_CMD(0x27, 0x00),
+ _INIT_DCS_CMD(0x28, 0x1A),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x1A),
+ _INIT_DCS_CMD(0x2B, 0x00),
+ _INIT_DCS_CMD(0x2D, 0x1A),
+
+ _INIT_DCS_CMD(0x64, 0x96),
+ _INIT_DCS_CMD(0x65, 0x00),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x6A, 0x96),
+ _INIT_DCS_CMD(0x6B, 0x00),
+ _INIT_DCS_CMD(0x6C, 0x00),
+ _INIT_DCS_CMD(0x70, 0x92),
+ _INIT_DCS_CMD(0x71, 0x00),
+ _INIT_DCS_CMD(0x72, 0x00),
+ _INIT_DCS_CMD(0xA2, 0x33),
+ _INIT_DCS_CMD(0xA3, 0x30),
+ _INIT_DCS_CMD(0xA4, 0xC0),
+ _INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0xFF, 0xF0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x3A, 0x08),
+ _INIT_DCS_CMD(0xFF, 0xD0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0x33),
+ _INIT_DCS_CMD(0x02, 0x77),
+ _INIT_DCS_CMD(0x08, 0x01),
+ _INIT_DCS_CMD(0x09, 0xBF),
+ _INIT_DCS_CMD(0x28, 0x30),
+ _INIT_DCS_CMD(0x2F, 0x33),
+ _INIT_DCS_CMD(0xFF, 0x23),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0x80),
+ _INIT_DCS_CMD(0x07, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x30, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xB9, 0x01),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0x18, 0x40),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xB9, 0x02),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xBB, 0x13),
+ _INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
+ _INIT_DCS_CMD(0x35, 0x00),
+ _INIT_DCS_CMD(0x51, 0x0F, 0xFF),
+ _INIT_DCS_CMD(0x53, 0x24),
+ _INIT_DELAY_CMD(100),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(200),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(100),
+ {},
+};
+
static const struct panel_init_cmd boe_init_cmd[] = {
_INIT_DELAY_CMD(24),
_INIT_DCS_CMD(0xB0, 0x05),
@@ -511,13 +1176,15 @@ static int boe_panel_unprepare(struct drm_panel *panel)
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
+ regulator_disable(boe->pp3300);
} else {
gpiod_set_value(boe->enable_gpio, 0);
- usleep_range(500, 1000);
+ usleep_range(1000, 2000);
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
+ regulator_disable(boe->pp3300);
}
boe->prepared = false;
@@ -536,6 +1203,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(1000, 1500);
+ ret = regulator_enable(boe->pp3300);
+ if (ret < 0)
+ return ret;
+
ret = regulator_enable(boe->pp1800);
if (ret < 0)
return ret;
@@ -549,7 +1220,7 @@ static int boe_panel_prepare(struct drm_panel *panel)
if (ret < 0)
goto poweroffavdd;
- usleep_range(5000, 10000);
+ usleep_range(10000, 11000);
gpiod_set_value(boe->enable_gpio, 1);
usleep_range(1000, 2000);
@@ -586,6 +1257,64 @@ static int boe_panel_enable(struct drm_panel *panel)
return 0;
}
+static const struct drm_display_mode boe_tv110c9m_default_mode = {
+ .clock = 166594,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 40,
+ .hsync_end = 1200 + 40 + 8,
+ .htotal = 1200 + 40 + 8 + 28,
+ .vdisplay = 2000,
+ .vsync_start = 2000 + 26,
+ .vsync_end = 2000 + 26 + 2,
+ .vtotal = 2000 + 26 + 2 + 148,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv110c9m_desc = {
+ .modes = &boe_tv110c9m_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 143,
+ .height_mm = 238,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .init_cmds = boe_tv110c9m_init_cmd,
+};
+
+static const struct drm_display_mode inx_hj110iz_default_mode = {
+ .clock = 166594,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 40,
+ .hsync_end = 1200 + 40 + 8,
+ .htotal = 1200 + 40 + 8 + 28,
+ .vdisplay = 2000,
+ .vsync_start = 2000 + 26,
+ .vsync_end = 2000 + 26 + 1,
+ .vtotal = 2000 + 26 + 1 + 149,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc inx_hj110iz_desc = {
+ .modes = &inx_hj110iz_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 143,
+ .height_mm = 238,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .init_cmds = inx_init_cmd,
+};
+
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
.clock = 159425,
.hdisplay = 1200,
@@ -767,6 +1496,10 @@ static int boe_panel_add(struct boe_panel *boe)
if (IS_ERR(boe->avee))
return PTR_ERR(boe->avee);
+ boe->pp3300 = devm_regulator_get(dev, "pp3300");
+ if (IS_ERR(boe->pp3300))
+ return PTR_ERR(boe->pp3300);
+
boe->pp1800 = devm_regulator_get(dev, "pp1800");
if (IS_ERR(boe->pp1800))
return PTR_ERR(boe->pp1800);
@@ -870,6 +1603,12 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "boe,tv105wum-nw0",
.data = &boe_tv105wum_nw0_desc
},
+ { .compatible = "boe,tv110c9m-ll3",
+ .data = &boe_tv110c9m_desc
+ },
+ { .compatible = "innolux,hj110iz-01a",
+ .data = &inx_hj110iz_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
new file mode 100644
index 000000000000..fc03046de134
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -0,0 +1,1896 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_aux_bus.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+
+/**
+ * struct panel_delay - Describes delays for a simple panel.
+ */
+struct panel_delay {
+ /**
+ * @hpd_reliable: Time for HPD to be reliable
+ *
+ * The time (in milliseconds) that it takes after powering the panel
+ * before the HPD signal is reliable. Ideally this is 0 but some panels,
+ * board designs, or bad pulldown configs can cause a glitch here.
+ *
+ * NOTE: on some old panel data this number appers to be much too big.
+ * Presumably some old panels simply didn't have HPD hooked up and put
+ * the hpd_absent here because this field predates the
+ * hpd_absent. While that works, it's non-ideal.
+ */
+ unsigned int hpd_reliable;
+
+ /**
+ * @hpd_absent: Time to wait if HPD isn't hooked up.
+ *
+ * Add this to the prepare delay if we know Hot Plug Detect isn't used.
+ *
+ * This is T3-max on eDP timing diagrams or the delay from power on
+ * until HPD is guaranteed to be asserted.
+ */
+ unsigned int hpd_absent;
+
+ /**
+ * @prepare_to_enable: Time between prepare and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when prepare finished and enable may begin. If at
+ * enable time less time has passed since prepare finished,
+ * the driver waits for the remaining time.
+ *
+ * If a fixed enable delay is also specified, we'll start
+ * counting before delaying for the fixed delay.
+ *
+ * If a fixed prepare delay is also specified, we won't start
+ * counting until after the fixed delay. We can't overlap this
+ * fixed delay with the min time because the fixed delay
+ * doesn't happen at the end of the function if a HPD GPIO was
+ * specified.
+ *
+ * In other words:
+ * prepare()
+ * ...
+ * // do fixed prepare delay
+ * // wait for HPD GPIO if applicable
+ * // start counting for prepare_to_enable
+ *
+ * enable()
+ * // do fixed enable delay
+ * // enforce prepare_to_enable min time
+ *
+ * This is not specified in a standard way on eDP timing diagrams.
+ * It is effectively the time from HPD going high till you can
+ * turn on the backlight.
+ */
+ unsigned int prepare_to_enable;
+
+ /**
+ * @enable: Time for the panel to display a valid frame.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data.
+ *
+ * This is (T6-min + max(T7-max, T8-min)) on eDP timing diagrams or
+ * the delay after link training finishes until we can turn the
+ * backlight on and see valid data.
+ */
+ unsigned int enable;
+
+ /**
+ * @disable: Time for the panel to turn the display off.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible).
+ *
+ * This is T9-min (delay from backlight off to end of valid video
+ * data) on eDP timing diagrams. It is not common to set.
+ */
+ unsigned int disable;
+
+ /**
+ * @unprepare: Time to power down completely.
+ *
+ * The time (in milliseconds) that it takes for the panel
+ * to power itself down completely.
+ *
+ * This time is used to prevent a future "prepare" from
+ * starting until at least this many milliseconds has passed.
+ * If at prepare time less time has passed since unprepare
+ * finished, the driver waits for the remaining time.
+ *
+ * This is T12-min on eDP timing diagrams.
+ */
+ unsigned int unprepare;
+};
+
+/**
+ * struct panel_desc - Describes a simple panel.
+ */
+struct panel_desc {
+ /**
+ * @modes: Pointer to array of fixed modes appropriate for this panel.
+ *
+ * If only one mode then this can just be the address of the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ */
+ const struct drm_display_mode *modes;
+
+ /** @num_modes: Number of elements in modes array. */
+ unsigned int num_modes;
+
+ /**
+ * @timings: Pointer to array of display timings
+ *
+ * NOTE: cannot be used with "modes" and also these will be used to
+ * validate a device tree override if one is present.
+ */
+ const struct display_timing *timings;
+
+ /** @num_timings: Number of elements in timings array. */
+ unsigned int num_timings;
+
+ /** @bpc: Bits per color. */
+ unsigned int bpc;
+
+ /** @size: Structure containing the physical size of this panel. */
+ struct {
+ /**
+ * @size.width: Width (in mm) of the active display area.
+ */
+ unsigned int width;
+
+ /**
+ * @size.height: Height (in mm) of the active display area.
+ */
+ unsigned int height;
+ } size;
+
+ /** @delay: Structure containing various delay values for this panel. */
+ struct panel_delay delay;
+};
+
+/**
+ * struct edp_panel_entry - Maps panel ID to delay / panel name.
+ */
+struct edp_panel_entry {
+ /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
+ u32 panel_id;
+
+ /* @delay: The power sequencing delays needed for this panel. */
+ const struct panel_delay *delay;
+
+ /* @name: Name of this panel (for printing to logs). */
+ const char *name;
+};
+
+struct panel_edp {
+ struct drm_panel base;
+ bool enabled;
+ bool no_hpd;
+
+ bool prepared;
+
+ ktime_t prepared_time;
+ ktime_t unprepared_time;
+
+ const struct panel_desc *desc;
+
+ struct regulator *supply;
+ struct i2c_adapter *ddc;
+ struct drm_dp_aux *aux;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *hpd_gpio;
+
+ struct edid *edid;
+
+ struct drm_display_mode override_mode;
+
+ enum drm_panel_orientation orientation;
+};
+
+static inline struct panel_edp *to_panel_edp(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_edp, base);
+}
+
+static unsigned int panel_edp_get_timings_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_timings == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ return num;
+}
+
+static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay,
+ drm_mode_vrefresh(m));
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ return num;
+}
+
+static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ bool has_override = panel->override_mode.type;
+ unsigned int num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ if (has_override) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel->override_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num = 1;
+ } else {
+ dev_err(panel->base.dev, "failed to add override mode\n");
+ }
+ }
+
+ /* Only add timings if override was not there or failed to validate */
+ if (num == 0 && panel->desc->num_timings)
+ num = panel_edp_get_timings_modes(panel, connector);
+
+ /*
+ * Only add fixed modes if timings/override added no mode.
+ *
+ * We should only ever have either the display timings specified
+ * or a fixed mode. Anything else is rather bogus.
+ */
+ WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
+ if (num == 0)
+ num = panel_edp_get_display_modes(panel, connector);
+
+ connector->display_info.bpc = panel->desc->bpc;
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+
+ return num;
+}
+
+static void panel_edp_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+ ktime_t now_ktime, min_ktime;
+
+ if (!min_ms)
+ return;
+
+ min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+ now_ktime = ktime_get();
+
+ if (ktime_before(now_ktime, min_ktime))
+ msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
+static int panel_edp_disable(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_edp_suspend(struct device *dev)
+{
+ struct panel_edp *p = dev_get_drvdata(dev);
+
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ return 0;
+}
+
+static int panel_edp_unprepare(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int ret;
+
+ /* Unpreparing when already unprepared is a no-op */
+ if (!p->prepared)
+ return 0;
+
+ pm_runtime_mark_last_busy(panel->dev);
+ ret = pm_runtime_put_autosuspend(panel->dev);
+ if (ret < 0)
+ return ret;
+ p->prepared = false;
+
+ return 0;
+}
+
+static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
+{
+ int err;
+
+ p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(p->hpd_gpio)) {
+ err = PTR_ERR(p->hpd_gpio);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int panel_edp_prepare_once(struct panel_edp *p)
+{
+ struct device *dev = p->base.dev;
+ unsigned int delay;
+ int err;
+ int hpd_asserted;
+ unsigned long hpd_wait_us;
+
+ panel_edp_wait(p->unprepared_time, p->desc->delay.unprepare);
+
+ err = regulator_enable(p->supply);
+ if (err < 0) {
+ dev_err(dev, "failed to enable supply: %d\n", err);
+ return err;
+ }
+
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
+
+ delay = p->desc->delay.hpd_reliable;
+ if (p->no_hpd)
+ delay = max(delay, p->desc->delay.hpd_absent);
+ if (delay)
+ msleep(delay);
+
+ if (p->hpd_gpio) {
+ if (p->desc->delay.hpd_absent)
+ hpd_wait_us = p->desc->delay.hpd_absent * 1000UL;
+ else
+ hpd_wait_us = 2000000;
+
+ err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+ hpd_asserted, hpd_asserted,
+ 1000, hpd_wait_us);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+
+ if (err) {
+ if (err != -ETIMEDOUT)
+ dev_err(dev,
+ "error waiting for hpd GPIO: %d\n", err);
+ goto error;
+ }
+ }
+
+ p->prepared_time = ktime_get();
+
+ return 0;
+
+error:
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ return err;
+}
+
+/*
+ * Some panels simply don't always come up and need to be power cycled to
+ * work properly. We'll allow for a handful of retries.
+ */
+#define MAX_PANEL_PREPARE_TRIES 5
+
+static int panel_edp_resume(struct device *dev)
+{
+ struct panel_edp *p = dev_get_drvdata(dev);
+ int ret;
+ int try;
+
+ for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
+ ret = panel_edp_prepare_once(p);
+ if (ret != -ETIMEDOUT)
+ break;
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "Prepare timeout after %d tries\n", try);
+ else if (try)
+ dev_warn(dev, "Prepare needed %d retries\n", try);
+
+ return ret;
+}
+
+static int panel_edp_prepare(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int ret;
+
+ /* Preparing when already prepared is a no-op */
+ if (p->prepared)
+ return 0;
+
+ ret = pm_runtime_get_sync(panel->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(panel->dev);
+ return ret;
+ }
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_edp_enable(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ unsigned int delay;
+
+ if (p->enabled)
+ return 0;
+
+ delay = p->desc->delay.enable;
+
+ /*
+ * If there is a "prepare_to_enable" delay then that's supposed to be
+ * the delay from HPD going high until we can turn the backlight on.
+ * However, we can only count this if HPD is handled by the panel
+ * driver, not if it goes to a dedicated pin on the controller.
+ * If we aren't handling the HPD pin ourselves then the best we
+ * can do is assume that HPD went high immediately before we were
+ * called (and link training took zero time).
+ *
+ * NOTE: if we ever end up in this "if" statement then we're
+ * guaranteed that the panel_edp_wait() call below will do no delay.
+ * It already handles that case, though, so we don't need any special
+ * code for it.
+ */
+ if (p->desc->delay.prepare_to_enable && !p->hpd_gpio && !p->no_hpd)
+ delay = max(delay, p->desc->delay.prepare_to_enable);
+
+ if (delay)
+ msleep(delay);
+
+ panel_edp_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int panel_edp_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int num = 0;
+
+ /* probe EDID if a DDC bus is available */
+ if (p->ddc) {
+ pm_runtime_get_sync(panel->dev);
+
+ if (!p->edid)
+ p->edid = drm_get_edid(connector, p->ddc);
+
+ if (p->edid)
+ num += drm_add_edid_modes(connector, p->edid);
+
+ pm_runtime_mark_last_busy(panel->dev);
+ pm_runtime_put_autosuspend(panel->dev);
+ }
+
+ /*
+ * Add hard-coded panel modes. Don't call this if there are no timings
+ * and no modes (the generic edp-panel case) because it will clobber
+ * the display_info that was already set by drm_add_edid_modes().
+ */
+ if (p->desc->num_timings || p->desc->num_modes)
+ num += panel_edp_get_non_edid_modes(p, connector);
+ else if (!num)
+ dev_warn(p->base.dev, "No display modes\n");
+
+ /* set up connector's "panel orientation" property */
+ drm_connector_set_panel_orientation(connector, p->orientation);
+
+ return num;
+}
+
+static int panel_edp_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
+static const struct drm_panel_funcs panel_edp_funcs = {
+ .disable = panel_edp_disable,
+ .unprepare = panel_edp_unprepare,
+ .prepare = panel_edp_prepare,
+ .enable = panel_edp_enable,
+ .get_modes = panel_edp_get_modes,
+ .get_timings = panel_edp_get_timings,
+};
+
+#define PANEL_EDP_BOUNDS_CHECK(to_check, bounds, field) \
+ (to_check->field.typ >= bounds->field.min && \
+ to_check->field.typ <= bounds->field.max)
+static void panel_edp_parse_panel_timing_node(struct device *dev,
+ struct panel_edp *panel,
+ const struct display_timing *ot)
+{
+ const struct panel_desc *desc = panel->desc;
+ struct videomode vm;
+ unsigned int i;
+
+ if (WARN_ON(desc->num_modes)) {
+ dev_err(dev, "Reject override mode: panel has a fixed mode\n");
+ return;
+ }
+ if (WARN_ON(!desc->num_timings)) {
+ dev_err(dev, "Reject override mode: no timings specified\n");
+ return;
+ }
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+
+ if (!PANEL_EDP_BOUNDS_CHECK(ot, dt, hactive) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hfront_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hback_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hsync_len) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vactive) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vfront_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vback_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vsync_len))
+ continue;
+
+ if (ot->flags != dt->flags)
+ continue;
+
+ videomode_from_timing(ot, &vm);
+ drm_display_mode_from_videomode(&vm, &panel->override_mode);
+ panel->override_mode.type |= DRM_MODE_TYPE_DRIVER |
+ DRM_MODE_TYPE_PREFERRED;
+ break;
+ }
+
+ if (WARN_ON(!panel->override_mode.type))
+ dev_err(dev, "Reject override mode: No display_timing found\n");
+}
+
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
+
+static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
+{
+ const struct edp_panel_entry *edp_panel;
+ struct panel_desc *desc;
+ u32 panel_id;
+ char vend[4];
+ u16 product_id;
+ u32 reliable_ms = 0;
+ u32 absent_ms = 0;
+ int ret;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ panel->desc = desc;
+
+ /*
+ * Read the dts properties for the initial probe. These are used by
+ * the runtime resume code which will get called by the
+ * pm_runtime_get_sync() call below.
+ */
+ of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
+ desc->delay.hpd_reliable = reliable_ms;
+ of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
+ desc->delay.hpd_reliable = absent_ms;
+
+ /* Power the panel on so we can read the EDID */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret);
+ goto exit;
+ }
+
+ panel_id = drm_edid_get_panel_id(panel->ddc);
+ if (!panel_id) {
+ dev_err(dev, "Couldn't identify panel via EDID\n");
+ ret = -EIO;
+ goto exit;
+ }
+ drm_edid_decode_panel_id(panel_id, vend, &product_id);
+
+ edp_panel = find_edp_panel(panel_id);
+
+ /*
+ * We're using non-optimized timings and want it really obvious that
+ * someone needs to add an entry to the table, so we'll do a WARN_ON
+ * splat.
+ */
+ if (WARN_ON(!edp_panel)) {
+ dev_warn(dev,
+ "Unknown panel %s %#06x, using conservative timings\n",
+ vend, product_id);
+
+ /*
+ * It's highly likely that the panel will work if we use very
+ * conservative timings, so let's do that. We already know that
+ * the HPD-related delays must have worked since we got this
+ * far, so we really just need the "unprepare" / "enable"
+ * delays. We don't need "prepare_to_enable" since that
+ * overlaps the "enable" delay anyway.
+ *
+ * Nearly all panels have a "unprepare" delay of 500 ms though
+ * there are a few with 1000. Let's stick 2000 in just to be
+ * super conservative.
+ *
+ * An "enable" delay of 80 ms seems the most common, but we'll
+ * throw in 200 ms to be safe.
+ */
+ desc->delay.unprepare = 2000;
+ desc->delay.enable = 200;
+ } else {
+ dev_info(dev, "Detected %s %s (%#06x)\n",
+ vend, edp_panel->name, product_id);
+
+ /* Update the delay; everything else comes from EDID */
+ desc->delay = *edp_panel->delay;
+ }
+
+ ret = 0;
+exit:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
+ struct drm_dp_aux *aux)
+{
+ struct panel_edp *panel;
+ struct display_timing dt;
+ struct device_node *ddc;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->prepared_time = 0;
+ panel->desc = desc;
+ panel->aux = aux;
+
+ panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+ if (!panel->no_hpd) {
+ err = panel_edp_get_hpd_gpio(dev, panel);
+ if (err)
+ return err;
+ }
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return PTR_ERR(panel->supply);
+
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio)) {
+ err = PTR_ERR(panel->enable_gpio);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
+
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return err;
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc)
+ return -EPROBE_DEFER;
+ } else if (aux) {
+ panel->ddc = &aux->ddc;
+ }
+
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_edp_parse_panel_timing_node(dev, panel, &dt);
+
+ dev_set_drvdata(dev, panel);
+
+ drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ goto err_finished_ddc_init;
+
+ /*
+ * We use runtime PM for prepare / unprepare since those power the panel
+ * on and off and those can be very slow operations. This is important
+ * to optimize powering the panel on briefly to read the EDID before
+ * fully enabling the panel.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ if (of_device_is_compatible(dev->of_node, "edp-panel")) {
+ err = generic_edp_panel_probe(dev, panel);
+ if (err) {
+ dev_err_probe(dev, err,
+ "Couldn't detect panel nor find a fallback\n");
+ goto err_finished_pm_runtime;
+ }
+ /* generic_edp_panel_probe() replaces desc in the panel */
+ desc = panel->desc;
+ } else if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10) {
+ dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
+ }
+
+ if (!panel->base.backlight && panel->aux) {
+ pm_runtime_get_sync(dev);
+ err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ if (err)
+ goto err_finished_pm_runtime;
+ }
+
+ drm_panel_add(&panel->base);
+
+ return 0;
+
+err_finished_pm_runtime:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+err_finished_ddc_init:
+ if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ put_device(&panel->ddc->dev);
+
+ return err;
+}
+
+static int panel_edp_remove(struct device *dev)
+{
+ struct panel_edp *panel = dev_get_drvdata(dev);
+
+ drm_panel_remove(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
+
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+ if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ put_device(&panel->ddc->dev);
+
+ kfree(panel->edid);
+ panel->edid = NULL;
+
+ return 0;
+}
+
+static void panel_edp_shutdown(struct device *dev)
+{
+ struct panel_edp *panel = dev_get_drvdata(dev);
+
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
+}
+
+static const struct display_timing auo_b101ean01_timing = {
+ .pixelclock = { 65300000, 72500000, 75000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 119, 119 },
+ .hback_porch = { 21, 21, 21 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 8, 8, 8 },
+ .vsync_len = { 18, 20, 20 },
+};
+
+static const struct panel_desc auo_b101ean01 = {
+ .timings = &auo_b101ean01_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
+static const struct drm_display_mode auo_b116xak01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 10,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 6,
+ .vtotal = 768 + 4 + 6 + 15,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xak01 = {
+ .modes = &auo_b116xak01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ },
+};
+
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .enable = 400,
+ },
+};
+
+static const struct drm_display_mode auo_b133han05_mode = {
+ .clock = 142600,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 58,
+ .hsync_end = 1920 + 58 + 42,
+ .htotal = 1920 + 58 + 42 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 54,
+};
+
+static const struct panel_desc auo_b133han05 = {
+ .modes = &auo_b133han05_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_reliable = 100,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_reliable = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode auo_b133xtn01_mode = {
+ .clock = 69500,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 20,
+ .vdisplay = 768,
+ .vsync_start = 768 + 3,
+ .vsync_end = 768 + 3 + 6,
+ .vtotal = 768 + 3 + 6 + 13,
+};
+
+static const struct panel_desc auo_b133xtn01 = {
+ .modes = &auo_b133xtn01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+};
+
+static const struct drm_display_mode auo_b140han06_mode = {
+ .clock = 141000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 152,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 14,
+ .vtotal = 1080 + 3 + 14 + 19,
+};
+
+static const struct panel_desc auo_b140han06 = {
+ .modes = &auo_b140han06_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ .hpd_reliable = 100,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
+ {
+ .clock = 71900,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ },
+ {
+ .clock = 57500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ },
+};
+
+static const struct panel_desc boe_nv101wxmn51 = {
+ .modes = boe_nv101wxmn51_modes,
+ .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+ {
+ .clock = 207800,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ {
+ .clock = 138500,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+ .modes = boe_nv110wtm_n61_modes,
+ .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+ .bpc = 8,
+ .size = {
+ .width = 233,
+ .height = 155,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .prepare_to_enable = 80,
+ .enable = 50,
+ .unprepare = 500,
+ },
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+ .clock = 147840,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 6,
+ .vtotal = 1080 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+ .modes = &boe_nv133fhm_n61_modes,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ /*
+ * When power is first given to the panel there's a short
+ * spike on the HPD line. It was explained that this spike
+ * was until the TCON data download was complete. On
+ * one system this was measured at 8 ms. We'll put 15 ms
+ * in the prepare delay just to be safe. That means:
+ * - If HPD isn't hooked up you still have 200 ms delay.
+ * - If HPD is hooked up we won't try to look at it for the
+ * first 15 ms.
+ */
+ .hpd_reliable = 15,
+ .hpd_absent = 200,
+
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
+ {
+ .clock = 148500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 2200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1125,
+ },
+};
+
+static const struct panel_desc boe_nv140fhmn49 = {
+ .modes = boe_nv140fhmn49_modes,
+ .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
+ .bpc = 6,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bca_ea1_mode = {
+ .clock = 76420,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 136,
+ .hsync_end = 1366 + 136 + 30,
+ .htotal = 1366 + 136 + 30 + 60,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 12,
+ .vtotal = 768 + 8 + 12 + 12,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bca_ea1 = {
+ .modes = &innolux_n116bca_ea1_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .prepare_to_enable = 80,
+ .unprepare = 500,
+ },
+};
+
+/*
+ * Datasheet specifies that at 60 Hz refresh rate:
+ * - total horizontal time: { 1506, 1592, 1716 }
+ * - total vertical time: { 788, 800, 868 }
+ *
+ * ...but doesn't go into exactly how that should be split into a front
+ * porch, back porch, or sync length. For now we'll leave a single setting
+ * here which allows a bit of tweaking of the pixel clock at the expense of
+ * refresh rate.
+ */
+static const struct display_timing innolux_n116bge_timing = {
+ .pixelclock = { 72600000, 76420000, 80240000 },
+ .hactive = { 1366, 1366, 1366 },
+ .hfront_porch = { 136, 136, 136 },
+ .hback_porch = { 60, 60, 60 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 8, 8, 8 },
+ .vback_porch = { 12, 12, 12 },
+ .vsync_len = { 12, 12, 12 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .timings = &innolux_n116bge_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n125hce_gn1_mode = {
+ .clock = 162000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 4,
+ .vtotal = 1080 + 4 + 4 + 24,
+};
+
+static const struct panel_desc innolux_n125hce_gn1 = {
+ .modes = &innolux_n125hce_gn1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 276,
+ .height = 155,
+ },
+};
+
+static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_p120zdg_bf1 = {
+ .modes = &innolux_p120zdg_bf1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 254,
+ .height = 169,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+ .clock = 138778,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 24,
+ .hsync_end = 1920 + 24 + 48,
+ .htotal = 1920 + 24 + 48 + 88,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 12,
+ .vtotal = 1080 + 3 + 12 + 17,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+ .modes = &ivo_m133nwf4_r0_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
+ .clock = 81000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 32,
+ .htotal = 1366 + 40 + 32 + 62,
+ .vdisplay = 768,
+ .vsync_start = 768 + 5,
+ .vsync_end = 768 + 5 + 5,
+ .vtotal = 768 + 5 + 5 + 122,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
+ .modes = &kingdisplay_kd116n21_30nv_a010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ },
+};
+
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+ .clock = 200000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 12,
+ .hsync_end = 1536 + 12 + 16,
+ .htotal = 1536 + 12 + 16 + 48,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 8,
+ .vsync_end = 2048 + 8 + 4,
+ .vtotal = 2048 + 8 + 4 + 8,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+ .modes = &lg_lp079qx1_sp0v_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 129,
+ .height = 171,
+ },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+ .clock = 205210,
+ .hdisplay = 2048,
+ .hsync_start = 2048 + 150,
+ .hsync_end = 2048 + 150 + 5,
+ .htotal = 2048 + 150 + 5 + 5,
+ .vdisplay = 1536,
+ .vsync_start = 1536 + 3,
+ .vsync_end = 1536 + 3 + 1,
+ .vtotal = 1536 + 3 + 1 + 9,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+ .modes = &lg_lp097qx1_spa1_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 208,
+ .height = 147,
+ },
+};
+
+static const struct drm_display_mode lg_lp120up1_mode = {
+ .clock = 162300,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 4,
+ .vsync_end = 1280 + 4 + 4,
+ .vtotal = 1280 + 4 + 4 + 12,
+};
+
+static const struct panel_desc lg_lp120up1 = {
+ .modes = &lg_lp120up1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 267,
+ .height = 183,
+ },
+};
+
+static const struct drm_display_mode lg_lp129qe_mode = {
+ .clock = 285250,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1700,
+ .vsync_start = 1700 + 3,
+ .vsync_end = 1700 + 3 + 10,
+ .vtotal = 1700 + 3 + 10 + 36,
+};
+
+static const struct panel_desc lg_lp129qe = {
+ .modes = &lg_lp129qe_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 272,
+ .height = 181,
+ },
+};
+
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .hpd_reliable = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+ .clock = 271560,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 2,
+ .vsync_end = 1600 + 2 + 5,
+ .vtotal = 1600 + 2 + 5 + 57,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+ .modes = &samsung_lsn122dl01_c01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
+static const struct drm_display_mode samsung_ltn140at29_301_mode = {
+ .clock = 76300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 48,
+ .htotal = 1366 + 64 + 48 + 128,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 5,
+ .vtotal = 768 + 2 + 5 + 17,
+};
+
+static const struct panel_desc samsung_ltn140at29_301 = {
+ .modes = &samsung_ltn140at29_301_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 320,
+ .height = 187,
+ },
+};
+
+static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
+ .clock = 168480,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 3,
+ .vsync_end = 1280 + 3 + 10,
+ .vtotal = 1280 + 3 + 10 + 57,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc sharp_ld_d5116z01b = {
+ .modes = &sharp_ld_d5116z01b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 260,
+ .height = 120,
+ },
+};
+
+static const struct display_timing sharp_lq123p1jx31_timing = {
+ .pixelclock = { 252750000, 252750000, 266604720 },
+ .hactive = { 2400, 2400, 2400 },
+ .hfront_porch = { 48, 48, 48 },
+ .hback_porch = { 80, 80, 84 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1600, 1600, 1600 },
+ .vfront_porch = { 3, 3, 3 },
+ .vback_porch = { 33, 33, 120 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+ .timings = &sharp_lq123p1jx31_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+ .delay = {
+ .hpd_reliable = 110,
+ .enable = 50,
+ .unprepare = 550,
+ },
+};
+
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+ .clock = 147000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 15,
+ .vsync_end = 1200 + 15 + 2,
+ .vtotal = 1200 + 15 + 2 + 18,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+ .modes = &starry_kr122ea0sra_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 10 + 200,
+ .enable = 50,
+ .unprepare = 10 + 500,
+ },
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ /* Must be first */
+ .compatible = "edp-panel",
+ }, {
+ .compatible = "auo,b101ean01",
+ .data = &auo_b101ean01,
+ }, {
+ .compatible = "auo,b116xa01",
+ .data = &auo_b116xak01,
+ }, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
+ .compatible = "auo,b133han05",
+ .data = &auo_b133han05,
+ }, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
+ .compatible = "auo,b133xtn01",
+ .data = &auo_b133xtn01,
+ }, {
+ .compatible = "auo,b140han06",
+ .data = &auo_b140han06,
+ }, {
+ .compatible = "boe,nv101wxmn51",
+ .data = &boe_nv101wxmn51,
+ }, {
+ .compatible = "boe,nv110wtm-n61",
+ .data = &boe_nv110wtm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n61",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n62",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv140fhmn49",
+ .data = &boe_nv140fhmn49,
+ }, {
+ .compatible = "innolux,n116bca-ea1",
+ .data = &innolux_n116bca_ea1,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n125hce-gn1",
+ .data = &innolux_n125hce_gn1,
+ }, {
+ .compatible = "innolux,p120zdg-bf1",
+ .data = &innolux_p120zdg_bf1,
+ }, {
+ .compatible = "ivo,m133nwf4-r0",
+ .data = &ivo_m133nwf4_r0,
+ }, {
+ .compatible = "kingdisplay,kd116n21-30nv-a010",
+ .data = &kingdisplay_kd116n21_30nv_a010,
+ }, {
+ .compatible = "lg,lp079qx1-sp0v",
+ .data = &lg_lp079qx1_sp0v,
+ }, {
+ .compatible = "lg,lp097qx1-spa1",
+ .data = &lg_lp097qx1_spa1,
+ }, {
+ .compatible = "lg,lp120up1",
+ .data = &lg_lp120up1,
+ }, {
+ .compatible = "lg,lp129qe",
+ .data = &lg_lp129qe,
+ }, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
+ .compatible = "samsung,lsn122dl01-c01",
+ .data = &samsung_lsn122dl01_c01,
+ }, {
+ .compatible = "samsung,ltn140at29-301",
+ .data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "sharp,ld-d5116z01b",
+ .data = &sharp_ld_d5116z01b,
+ }, {
+ .compatible = "sharp,lq123p1jx31",
+ .data = &sharp_lq123p1jx31,
+ }, {
+ .compatible = "starry,kr122ea0sra",
+ .data = &starry_kr122ea0sra,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static const struct panel_delay delay_200_500_p2e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .prepare_to_enable = 80,
+};
+
+static const struct panel_delay delay_200_500_p2e100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .prepare_to_enable = 100,
+};
+
+static const struct panel_delay delay_200_500_e50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+};
+
+#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
+{ \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .delay = _delay \
+}
+
+/*
+ * This table is used to figure out power sequencing delays for panels that
+ * are detected by EDID. Entries here may point to entries in the
+ * platform_of_match table (if a panel is listed in both places).
+ *
+ * Sort first by vendor, then by product ID.
+ */
+static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+
+ { /* sentinal */ }
+};
+
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id)
+{
+ const struct edp_panel_entry *panel;
+
+ if (!panel_id)
+ return NULL;
+
+ for (panel = edp_panels; panel->panel_id; panel++)
+ if (panel->panel_id == panel_id)
+ return panel;
+
+ return NULL;
+}
+
+static int panel_edp_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ /* Skip one since "edp-panel" is only supported on DP AUX bus */
+ id = of_match_node(platform_of_match + 1, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_edp_probe(&pdev->dev, id->data, NULL);
+}
+
+static int panel_edp_platform_remove(struct platform_device *pdev)
+{
+ return panel_edp_remove(&pdev->dev);
+}
+
+static void panel_edp_platform_shutdown(struct platform_device *pdev)
+{
+ panel_edp_shutdown(&pdev->dev);
+}
+
+static const struct dev_pm_ops panel_edp_pm_ops = {
+ SET_RUNTIME_PM_OPS(panel_edp_suspend, panel_edp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver panel_edp_platform_driver = {
+ .driver = {
+ .name = "panel-edp",
+ .of_match_table = platform_of_match,
+ .pm = &panel_edp_pm_ops,
+ },
+ .probe = panel_edp_platform_probe,
+ .remove = panel_edp_platform_remove,
+ .shutdown = panel_edp_platform_shutdown,
+};
+
+static int panel_edp_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, aux_ep->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_edp_probe(&aux_ep->dev, id->data, aux_ep->aux);
+}
+
+static void panel_edp_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
+{
+ panel_edp_remove(&aux_ep->dev);
+}
+
+static void panel_edp_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
+{
+ panel_edp_shutdown(&aux_ep->dev);
+}
+
+static struct dp_aux_ep_driver panel_edp_dp_aux_ep_driver = {
+ .driver = {
+ .name = "panel-simple-dp-aux",
+ .of_match_table = platform_of_match, /* Same as platform one! */
+ .pm = &panel_edp_pm_ops,
+ },
+ .probe = panel_edp_dp_aux_ep_probe,
+ .remove = panel_edp_dp_aux_ep_remove,
+ .shutdown = panel_edp_dp_aux_ep_shutdown,
+};
+
+static int __init panel_edp_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&panel_edp_platform_driver);
+ if (err < 0)
+ return err;
+
+ err = dp_aux_dp_driver_register(&panel_edp_dp_aux_ep_driver);
+ if (err < 0)
+ goto err_did_platform_register;
+
+ return 0;
+
+err_did_platform_register:
+ platform_driver_unregister(&panel_edp_platform_driver);
+
+ return err;
+}
+module_init(panel_edp_init);
+
+static void __exit panel_edp_exit(void)
+{
+ dp_aux_dp_driver_unregister(&panel_edp_dp_aux_ep_driver);
+ platform_driver_unregister(&panel_edp_platform_driver);
+}
+module_exit(panel_edp_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple eDP Panels");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 0145129d7c66..534dd7414d42 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.clock = 69700,
.hdisplay = 800,
- .hsync_start = 800 + 6,
- .hsync_end = 800 + 6 + 15,
- .htotal = 800 + 6 + 15 + 16,
+ .hsync_start = 800 + 52,
+ .hsync_end = 800 + 52 + 8,
+ .htotal = 800 + 52 + 8 + 48,
.vdisplay = 1280,
- .vsync_start = 1280 + 8,
- .vsync_end = 1280 + 8 + 48,
- .vtotal = 1280 + 8 + 48 + 52,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 6,
+ .vtotal = 1280 + 16 + 6 + 15,
.width_mm = 135,
.height_mm = 217,
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index f80b44a8a700..dfb43b1374e7 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -60,6 +60,9 @@
#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
+#define OTM8009A_HDISPLAY 480
+#define OTM8009A_VDISPLAY 800
+
struct otm8009a {
struct device *dev;
struct drm_panel panel;
@@ -70,19 +73,35 @@ struct otm8009a {
bool enabled;
};
-static const struct drm_display_mode default_mode = {
- .clock = 29700,
- .hdisplay = 480,
- .hsync_start = 480 + 98,
- .hsync_end = 480 + 98 + 32,
- .htotal = 480 + 98 + 32 + 98,
- .vdisplay = 800,
- .vsync_start = 800 + 15,
- .vsync_end = 800 + 15 + 10,
- .vtotal = 800 + 15 + 10 + 14,
- .flags = 0,
- .width_mm = 52,
- .height_mm = 86,
+static const struct drm_display_mode modes[] = {
+ { /* 50 Hz, preferred */
+ .clock = 29700,
+ .hdisplay = 480,
+ .hsync_start = 480 + 98,
+ .hsync_end = 480 + 98 + 32,
+ .htotal = 480 + 98 + 32 + 98,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 14,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 52,
+ .height_mm = 86,
+ },
+ { /* 60 Hz */
+ .clock = 33000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 70,
+ .hsync_end = 480 + 70 + 32,
+ .htotal = 480 + 70 + 32 + 72,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 16,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 52,
+ .height_mm = 86,
+ },
};
static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
@@ -208,12 +227,11 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Default portrait 480x800 rgb24 */
dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_column_address(dsi, 0,
- default_mode.hdisplay - 1);
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1);
if (ret)
return ret;
- ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1);
if (ret)
return ret;
@@ -337,24 +355,33 @@ static int otm8009a_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &default_mode);
- if (!mode) {
- dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
- return -ENOMEM;
+ unsigned int num_modes = ARRAY_SIZE(modes);
+ unsigned int i;
+
+ for (i = 0; i < num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev, &modes[i]);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ modes[i].hdisplay,
+ modes[i].vdisplay,
+ drm_mode_vrefresh(&modes[i]));
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ /* Setting first mode as preferred */
+ if (!i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
}
- drm_mode_set_name(mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
- return 1;
+ return num_modes;
}
static const struct drm_panel_funcs otm8009a_drm_funcs = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
new file mode 100644
index 000000000000..1696ceb36aa0
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel.
+ * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone.
+ */
+
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */
+#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */
+#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */
+#define S6D27A1_READID1 0xDA /* Read panel ID 1 */
+#define S6D27A1_READID2 0xDB /* Read panel ID 2 */
+#define S6D27A1_READID3 0xDC /* Read panel ID 3 */
+#define S6D27A1_DISPCTL 0xF2 /* Display Control */
+#define S6D27A1_MANPWR 0xF3 /* Manual Control */
+#define S6D27A1_PWRCTL1 0xF4 /* Power Control */
+#define S6D27A1_SRCCTL 0xF6 /* Source Control */
+#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/
+
+static const u8 s6d27a1_dbi_read_commands[] = {
+ S6D27A1_READID1,
+ S6D27A1_READID2,
+ S6D27A1_READID3,
+ 0, /* sentinel */
+};
+
+struct s6d27a1 {
+ struct device *dev;
+ struct mipi_dbi dbi;
+ struct drm_panel panel;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data regulators[2];
+};
+
+static const struct drm_display_mode s6d27a1_480_800_mode = {
+ /*
+ * The vendor driver states that the S6D27A1 panel
+ * has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz.
+ */
+ .clock = 24960,
+ .hdisplay = 480,
+ .hsync_start = 480 + 63,
+ .hsync_end = 480 + 63 + 2,
+ .htotal = 480 + 63 + 2 + 63,
+ .vdisplay = 800,
+ .vsync_start = 800 + 11,
+ .vsync_end = 800 + 11 + 2,
+ .vtotal = 800 + 11 + 2 + 10,
+ .width_mm = 50,
+ .height_mm = 84,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6d27a1, panel);
+}
+
+static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx)
+{
+ struct mipi_dbi *dbi = &ctx->dbi;
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 1\n");
+ return;
+ }
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 2\n");
+ return;
+ }
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 3\n");
+ return;
+ }
+ dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
+}
+
+static int s6d27a1_power_on(struct s6d27a1 *ctx)
+{
+ struct mipi_dbi *dbi = &ctx->dbi;
+ int ret;
+
+ /* Power up */
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+ if (ret) {
+ dev_err(ctx->dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+
+ /* Assert reset >=1 ms */
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ usleep_range(1000, 5000);
+ /* De-assert reset */
+ gpiod_set_value_cansleep(ctx->reset, 0);
+ /* Wait >= 10 ms */
+ msleep(20);
+
+ /*
+ * Exit sleep mode and initialize display - some hammering is
+ * necessary.
+ */
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(120);
+
+ /* Magic to unlock level 2 control of the display */
+ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A);
+
+ /* Configure resolution to 480RGBx800 */
+ mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22);
+
+ mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c);
+
+ mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00);
+
+ mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F);
+
+ mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x44, 0x05, 0x88, 0x4B, 0x50);
+
+ mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16);
+
+ mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08,
+ 0x01, 0x09, 0x0D, 0x0A, 0x0E,
+ 0x0B, 0x0F, 0x0C, 0x10, 0x01,
+ 0x11, 0x12, 0x13, 0x14, 0x05,
+ 0x06, 0x07, 0x08, 0x01, 0x09,
+ 0x0D, 0x0A, 0x0E, 0x0B, 0x0F,
+ 0x0C, 0x10, 0x01, 0x11, 0x12,
+ 0x13, 0x14);
+
+ /* lock the level 2 control */
+ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5);
+
+ s6d27a1_read_mtp_id(ctx);
+
+ return 0;
+}
+
+static int s6d27a1_power_off(struct s6d27a1 *ctx)
+{
+ /* Go into RESET and disable regulators */
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+}
+
+static int s6d27a1_unprepare(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(120);
+ return s6d27a1_power_off(to_s6d27a1(panel));
+}
+
+static int s6d27a1_disable(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(25);
+
+ return 0;
+}
+
+static int s6d27a1_prepare(struct drm_panel *panel)
+{
+ return s6d27a1_power_on(to_s6d27a1(panel));
+}
+
+static int s6d27a1_enable(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+
+ return 0;
+}
+
+static int s6d27a1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode);
+ if (!mode) {
+ dev_err(ctx->dev, "failed to add mode\n");
+ return -ENOMEM;
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bus_flags =
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6d27a1_drm_funcs = {
+ .disable = s6d27a1_disable,
+ .unprepare = s6d27a1_unprepare,
+ .prepare = s6d27a1_prepare,
+ .enable = s6d27a1_enable,
+ .get_modes = s6d27a1_get_modes,
+};
+
+static int s6d27a1_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct s6d27a1 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ /*
+ * VCI is the analog voltage supply
+ * VCCIO is the digital I/O voltage supply
+ */
+ ctx->regulators[0].supply = "vci";
+ ctx->regulators[1].supply = "vccio";
+ ret = devm_regulator_bulk_get(dev,
+ ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset)) {
+ ret = PTR_ERR(ctx->reset);
+ return dev_err_probe(dev, ret, "no RESET GPIO\n");
+ }
+
+ ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
+
+ ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
+
+ drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add backlight\n");
+
+ spi_set_drvdata(spi, ctx);
+
+ drm_panel_add(&ctx->panel);
+
+ return 0;
+}
+
+static int s6d27a1_remove(struct spi_device *spi)
+{
+ struct s6d27a1 *ctx = spi_get_drvdata(spi);
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id s6d27a1_match[] = {
+ { .compatible = "samsung,s6d27a1", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6d27a1_match);
+
+static struct spi_driver s6d27a1_driver = {
+ .probe = s6d27a1_probe,
+ .remove = s6d27a1_remove,
+ .driver = {
+ .name = "s6d27a1-panel",
+ .of_match_table = s6d27a1_match,
+ },
+};
+module_spi_driver(s6d27a1_driver);
+
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_DESCRIPTION("Samsung S6D27A1 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9b6c4e6c38a1..7f3e1b84b5f5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -36,8 +35,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -95,44 +92,6 @@ struct panel_desc {
unsigned int prepare;
/**
- * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
- *
- * Add this to the prepare delay if we know Hot Plug Detect
- * isn't used.
- */
- unsigned int hpd_absent_delay;
-
- /**
- * @delay.prepare_to_enable: Time between prepare and enable.
- *
- * The minimum time, in milliseconds, that needs to have passed
- * between when prepare finished and enable may begin. If at
- * enable time less time has passed since prepare finished,
- * the driver waits for the remaining time.
- *
- * If a fixed enable delay is also specified, we'll start
- * counting before delaying for the fixed delay.
- *
- * If a fixed prepare delay is also specified, we won't start
- * counting until after the fixed delay. We can't overlap this
- * fixed delay with the min time because the fixed delay
- * doesn't happen at the end of the function if a HPD GPIO was
- * specified.
- *
- * In other words:
- * prepare()
- * ...
- * // do fixed prepare delay
- * // wait for HPD GPIO if applicable
- * // start counting for prepare_to_enable
- *
- * enable()
- * // do fixed enable delay
- * // enforce prepare_to_enable min time
- */
- unsigned int prepare_to_enable;
-
- /**
* @delay.enable: Time for the panel to display a valid frame.
*
* The time (in milliseconds) that it takes for the panel to
@@ -176,7 +135,6 @@ struct panel_desc {
struct panel_simple {
struct drm_panel base;
bool enabled;
- bool no_hpd;
bool prepared;
@@ -187,10 +145,8 @@ struct panel_simple {
struct regulator *supply;
struct i2c_adapter *ddc;
- struct drm_dp_aux *aux;
struct gpio_desc *enable_gpio;
- struct gpio_desc *hpd_gpio;
struct edid *edid;
@@ -374,30 +330,10 @@ static int panel_simple_unprepare(struct drm_panel *panel)
return 0;
}
-static int panel_simple_get_hpd_gpio(struct device *dev, struct panel_simple *p)
-{
- int err;
-
- p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
- if (IS_ERR(p->hpd_gpio)) {
- err = PTR_ERR(p->hpd_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
-
- return err;
- }
-
- return 0;
-}
-
-static int panel_simple_prepare_once(struct panel_simple *p)
+static int panel_simple_resume(struct device *dev)
{
- struct device *dev = p->base.dev;
- unsigned int delay;
+ struct panel_simple *p = dev_get_drvdata(dev);
int err;
- int hpd_asserted;
- unsigned long hpd_wait_us;
panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
@@ -409,68 +345,12 @@ static int panel_simple_prepare_once(struct panel_simple *p)
gpiod_set_value_cansleep(p->enable_gpio, 1);
- delay = p->desc->delay.prepare;
- if (p->no_hpd)
- delay += p->desc->delay.hpd_absent_delay;
- if (delay)
- msleep(delay);
-
- if (p->hpd_gpio) {
- if (p->desc->delay.hpd_absent_delay)
- hpd_wait_us = p->desc->delay.hpd_absent_delay * 1000UL;
- else
- hpd_wait_us = 2000000;
-
- err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
- hpd_asserted, hpd_asserted,
- 1000, hpd_wait_us);
- if (hpd_asserted < 0)
- err = hpd_asserted;
-
- if (err) {
- if (err != -ETIMEDOUT)
- dev_err(dev,
- "error waiting for hpd GPIO: %d\n", err);
- goto error;
- }
- }
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
p->prepared_time = ktime_get();
return 0;
-
-error:
- gpiod_set_value_cansleep(p->enable_gpio, 0);
- regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
-
- return err;
-}
-
-/*
- * Some panels simply don't always come up and need to be power cycled to
- * work properly. We'll allow for a handful of retries.
- */
-#define MAX_PANEL_PREPARE_TRIES 5
-
-static int panel_simple_resume(struct device *dev)
-{
- struct panel_simple *p = dev_get_drvdata(dev);
- int ret;
- int try;
-
- for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
- ret = panel_simple_prepare_once(p);
- if (ret != -ETIMEDOUT)
- break;
- }
-
- if (ret == -ETIMEDOUT)
- dev_err(dev, "Prepare timeout after %d tries\n", try);
- else if (try)
- dev_warn(dev, "Prepare needed %d retries\n", try);
-
- return ret;
}
static int panel_simple_prepare(struct drm_panel *panel)
@@ -503,8 +383,6 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
- panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
-
p->enabled = true;
return 0;
@@ -660,8 +538,7 @@ static void panel_simple_parse_panel_timing_node(struct device *dev,
dev_err(dev, "Reject override mode: No display_timing found\n");
}
-static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
- struct drm_dp_aux *aux)
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct panel_simple *panel;
struct display_timing dt;
@@ -677,14 +554,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
panel->enabled = false;
panel->prepared_time = 0;
panel->desc = desc;
- panel->aux = aux;
-
- panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
- if (!panel->no_hpd) {
- err = panel_simple_get_hpd_gpio(dev, panel);
- if (err)
- return err;
- }
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -712,8 +581,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
if (!panel->ddc)
return -EPROBE_DEFER;
- } else if (aux) {
- panel->ddc = &aux->ddc;
}
if (desc == &panel_dpi) {
@@ -749,9 +616,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
desc->bpc != 8);
break;
case DRM_MODE_CONNECTOR_eDP:
- if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10)
- dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
- break;
+ dev_warn(dev, "eDP panels moved to panel-edp\n");
+ err = -EINVAL;
+ goto free_ddc;
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
@@ -798,15 +665,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
if (err)
goto disable_pm_runtime;
- if (!panel->base.backlight && panel->aux) {
- pm_runtime_get_sync(dev);
- err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- if (err)
- goto disable_pm_runtime;
- }
-
drm_panel_add(&panel->base);
return 0;
@@ -815,7 +673,7 @@ disable_pm_runtime:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
free_ddc:
- if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ if (panel->ddc)
put_device(&panel->ddc->dev);
return err;
@@ -831,7 +689,7 @@ static int panel_simple_remove(struct device *dev)
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
- if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ if (panel->ddc)
put_device(&panel->ddc->dev);
return 0;
@@ -970,28 +828,6 @@ static const struct panel_desc auo_b101aw03 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct display_timing auo_b101ean01_timing = {
- .pixelclock = { 65300000, 72500000, 75000000 },
- .hactive = { 1280, 1280, 1280 },
- .hfront_porch = { 18, 119, 119 },
- .hback_porch = { 21, 21, 21 },
- .hsync_len = { 32, 32, 32 },
- .vactive = { 800, 800, 800 },
- .vfront_porch = { 4, 4, 4 },
- .vback_porch = { 8, 8, 8 },
- .vsync_len = { 18, 20, 20 },
-};
-
-static const struct panel_desc auo_b101ean01 = {
- .timings = &auo_b101ean01_timing,
- .num_timings = 1,
- .bpc = 6,
- .size = {
- .width = 217,
- .height = 136,
- },
-};
-
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
@@ -1015,172 +851,6 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
-static const struct drm_display_mode auo_b116xak01_mode = {
- .clock = 69300,
- .hdisplay = 1366,
- .hsync_start = 1366 + 48,
- .hsync_end = 1366 + 48 + 32,
- .htotal = 1366 + 48 + 32 + 10,
- .vdisplay = 768,
- .vsync_start = 768 + 4,
- .vsync_end = 768 + 4 + 6,
- .vtotal = 768 + 4 + 6 + 15,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xak01 = {
- .modes = &auo_b116xak01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b116xw03_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
- .modes = &auo_b116xw03_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .enable = 400,
- },
- .bus_flags = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b133xtn01_mode = {
- .clock = 69500,
- .hdisplay = 1366,
- .hsync_start = 1366 + 48,
- .hsync_end = 1366 + 48 + 32,
- .htotal = 1366 + 48 + 32 + 20,
- .vdisplay = 768,
- .vsync_start = 768 + 3,
- .vsync_end = 768 + 3 + 6,
- .vtotal = 768 + 3 + 6 + 13,
-};
-
-static const struct panel_desc auo_b133xtn01 = {
- .modes = &auo_b133xtn01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 293,
- .height = 165,
- },
-};
-
-static const struct drm_display_mode auo_b133han05_mode = {
- .clock = 142600,
- .hdisplay = 1920,
- .hsync_start = 1920 + 58,
- .hsync_end = 1920 + 58 + 42,
- .htotal = 1920 + 58 + 42 + 60,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 54,
-};
-
-static const struct panel_desc auo_b133han05 = {
- .modes = &auo_b133han05_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 293,
- .height = 165,
- },
- .delay = {
- .prepare = 100,
- .enable = 20,
- .unprepare = 50,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b133htn01_mode = {
- .clock = 150660,
- .hdisplay = 1920,
- .hsync_start = 1920 + 172,
- .hsync_end = 1920 + 172 + 80,
- .htotal = 1920 + 172 + 80 + 60,
- .vdisplay = 1080,
- .vsync_start = 1080 + 25,
- .vsync_end = 1080 + 25 + 10,
- .vtotal = 1080 + 25 + 10 + 10,
-};
-
-static const struct panel_desc auo_b133htn01 = {
- .modes = &auo_b133htn01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 293,
- .height = 165,
- },
- .delay = {
- .prepare = 105,
- .enable = 20,
- .unprepare = 50,
- },
-};
-
-static const struct drm_display_mode auo_b140han06_mode = {
- .clock = 141000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 16,
- .hsync_end = 1920 + 16 + 16,
- .htotal = 1920 + 16 + 16 + 152,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 14,
- .vtotal = 1080 + 3 + 14 + 19,
-};
-
-static const struct panel_desc auo_b140han06 = {
- .modes = &auo_b140han06_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 309,
- .height = 174,
- },
- .delay = {
- .prepare = 100,
- .enable = 20,
- .unprepare = 50,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
@@ -1524,169 +1194,6 @@ static const struct panel_desc boe_hv070wsa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
- {
- .clock = 71900,
- .hdisplay = 1280,
- .hsync_start = 1280 + 48,
- .hsync_end = 1280 + 48 + 32,
- .htotal = 1280 + 48 + 32 + 80,
- .vdisplay = 800,
- .vsync_start = 800 + 3,
- .vsync_end = 800 + 3 + 5,
- .vtotal = 800 + 3 + 5 + 24,
- },
- {
- .clock = 57500,
- .hdisplay = 1280,
- .hsync_start = 1280 + 48,
- .hsync_end = 1280 + 48 + 32,
- .htotal = 1280 + 48 + 32 + 80,
- .vdisplay = 800,
- .vsync_start = 800 + 3,
- .vsync_end = 800 + 3 + 5,
- .vtotal = 800 + 3 + 5 + 24,
- },
-};
-
-static const struct panel_desc boe_nv101wxmn51 = {
- .modes = boe_nv101wxmn51_modes,
- .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
- .bpc = 8,
- .size = {
- .width = 217,
- .height = 136,
- },
- .delay = {
- .prepare = 210,
- .enable = 50,
- .unprepare = 160,
- },
-};
-
-static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
- {
- .clock = 207800,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 100,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 6,
- .vtotal = 1440 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
- },
- {
- .clock = 138500,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 100,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 6,
- .vtotal = 1440 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
- },
-};
-
-static const struct panel_desc boe_nv110wtm_n61 = {
- .modes = boe_nv110wtm_n61_modes,
- .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
- .bpc = 8,
- .size = {
- .width = 233,
- .height = 155,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .prepare_to_enable = 80,
- .enable = 50,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-/* Also used for boe_nv133fhm_n62 */
-static const struct drm_display_mode boe_nv133fhm_n61_modes = {
- .clock = 147840,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 200,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 6,
- .vtotal = 1080 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
-/* Also used for boe_nv133fhm_n62 */
-static const struct panel_desc boe_nv133fhm_n61 = {
- .modes = &boe_nv133fhm_n61_modes,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 294,
- .height = 165,
- },
- .delay = {
- /*
- * When power is first given to the panel there's a short
- * spike on the HPD line. It was explained that this spike
- * was until the TCON data download was complete. On
- * one system this was measured at 8 ms. We'll put 15 ms
- * in the prepare delay just to be safe and take it away
- * from the hpd_absent_delay (which would otherwise be 200 ms)
- * to handle this. That means:
- * - If HPD isn't hooked up you still have 200 ms delay.
- * - If HPD is hooked up we won't try to look at it for the
- * first 15 ms.
- */
- .prepare = 15,
- .hpd_absent_delay = 185,
-
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
- {
- .clock = 148500,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 2200,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1125,
- },
-};
-
-static const struct panel_desc boe_nv140fhmn49 = {
- .modes = boe_nv140fhmn49_modes,
- .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
- .bpc = 6,
- .size = {
- .width = 309,
- .height = 174,
- },
- .delay = {
- .prepare = 210,
- .enable = 50,
- .unprepare = 160,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2609,96 +2116,6 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
-static const struct drm_display_mode innolux_n116bca_ea1_mode = {
- .clock = 76420,
- .hdisplay = 1366,
- .hsync_start = 1366 + 136,
- .hsync_end = 1366 + 136 + 30,
- .htotal = 1366 + 136 + 30 + 60,
- .vdisplay = 768,
- .vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 12,
- .vtotal = 768 + 8 + 12 + 12,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
-static const struct panel_desc innolux_n116bca_ea1 = {
- .modes = &innolux_n116bca_ea1_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .prepare_to_enable = 80,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-/*
- * Datasheet specifies that at 60 Hz refresh rate:
- * - total horizontal time: { 1506, 1592, 1716 }
- * - total vertical time: { 788, 800, 868 }
- *
- * ...but doesn't go into exactly how that should be split into a front
- * porch, back porch, or sync length. For now we'll leave a single setting
- * here which allows a bit of tweaking of the pixel clock at the expense of
- * refresh rate.
- */
-static const struct display_timing innolux_n116bge_timing = {
- .pixelclock = { 72600000, 76420000, 80240000 },
- .hactive = { 1366, 1366, 1366 },
- .hfront_porch = { 136, 136, 136 },
- .hback_porch = { 60, 60, 60 },
- .hsync_len = { 30, 30, 30 },
- .vactive = { 768, 768, 768 },
- .vfront_porch = { 8, 8, 8 },
- .vback_porch = { 12, 12, 12 },
- .vsync_len = { 12, 12, 12 },
- .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
-};
-
-static const struct panel_desc innolux_n116bge = {
- .timings = &innolux_n116bge_timing,
- .num_timings = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode innolux_n125hce_gn1_mode = {
- .clock = 162000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 40,
- .hsync_end = 1920 + 40 + 40,
- .htotal = 1920 + 40 + 40 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 4,
- .vsync_end = 1080 + 4 + 4,
- .vtotal = 1080 + 4 + 4 + 24,
-};
-
-static const struct panel_desc innolux_n125hce_gn1 = {
- .modes = &innolux_n125hce_gn1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 276,
- .height = 155,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -2724,33 +2141,6 @@ static const struct panel_desc innolux_n156bge_l21 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
- .clock = 206016,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 80,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 10,
- .vtotal = 1440 + 3 + 10 + 27,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc innolux_p120zdg_bf1 = {
- .modes = &innolux_p120zdg_bf1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 254,
- .height = 169,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .unprepare = 500,
- },
-};
-
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -2773,64 +2163,6 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
-static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
- .clock = 138778,
- .hdisplay = 1920,
- .hsync_start = 1920 + 24,
- .hsync_end = 1920 + 24 + 48,
- .htotal = 1920 + 24 + 48 + 88,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 12,
- .vtotal = 1080 + 3 + 12 + 17,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc ivo_m133nwf4_r0 = {
- .modes = &ivo_m133nwf4_r0_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 294,
- .height = 165,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
- .clock = 81000,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 32,
- .htotal = 1366 + 40 + 32 + 62,
- .vdisplay = 768,
- .vsync_start = 768 + 5,
- .vsync_end = 768 + 5 + 5,
- .vtotal = 768 + 5 + 5 + 122,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
- .modes = &kingdisplay_kd116n21_30nv_a010_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2982,94 +2314,6 @@ static const struct panel_desc lg_lb070wv8 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
- .clock = 200000,
- .hdisplay = 1536,
- .hsync_start = 1536 + 12,
- .hsync_end = 1536 + 12 + 16,
- .htotal = 1536 + 12 + 16 + 48,
- .vdisplay = 2048,
- .vsync_start = 2048 + 8,
- .vsync_end = 2048 + 8 + 4,
- .vtotal = 2048 + 8 + 4 + 8,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc lg_lp079qx1_sp0v = {
- .modes = &lg_lp079qx1_sp0v_mode,
- .num_modes = 1,
- .size = {
- .width = 129,
- .height = 171,
- },
-};
-
-static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
- .clock = 205210,
- .hdisplay = 2048,
- .hsync_start = 2048 + 150,
- .hsync_end = 2048 + 150 + 5,
- .htotal = 2048 + 150 + 5 + 5,
- .vdisplay = 1536,
- .vsync_start = 1536 + 3,
- .vsync_end = 1536 + 3 + 1,
- .vtotal = 1536 + 3 + 1 + 9,
-};
-
-static const struct panel_desc lg_lp097qx1_spa1 = {
- .modes = &lg_lp097qx1_spa1_mode,
- .num_modes = 1,
- .size = {
- .width = 208,
- .height = 147,
- },
-};
-
-static const struct drm_display_mode lg_lp120up1_mode = {
- .clock = 162300,
- .hdisplay = 1920,
- .hsync_start = 1920 + 40,
- .hsync_end = 1920 + 40 + 40,
- .htotal = 1920 + 40 + 40+ 80,
- .vdisplay = 1280,
- .vsync_start = 1280 + 4,
- .vsync_end = 1280 + 4 + 4,
- .vtotal = 1280 + 4 + 4 + 12,
-};
-
-static const struct panel_desc lg_lp120up1 = {
- .modes = &lg_lp120up1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 267,
- .height = 183,
- },
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode lg_lp129qe_mode = {
- .clock = 285250,
- .hdisplay = 2560,
- .hsync_start = 2560 + 48,
- .hsync_end = 2560 + 48 + 32,
- .htotal = 2560 + 48 + 32 + 80,
- .vdisplay = 1700,
- .vsync_start = 1700 + 3,
- .vsync_end = 1700 + 3 + 10,
- .vtotal = 1700 + 3 + 10 + 36,
-};
-
-static const struct panel_desc lg_lp129qe = {
- .modes = &lg_lp129qe_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 272,
- .height = 181,
- },
-};
-
static const struct display_timing logictechno_lt161010_2nh_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
@@ -3158,19 +2402,6 @@ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
-static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
- .clock = 30400,
- .hdisplay = 800,
- .hsync_start = 800 + 0,
- .hsync_end = 800 + 1,
- .htotal = 800 + 0 + 1 + 160,
- .vdisplay = 480,
- .vsync_start = 480 + 0,
- .vsync_end = 480 + 48 + 1,
- .vtotal = 480 + 48 + 1 + 0,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
static const struct drm_display_mode logicpd_type_28_mode = {
.clock = 9107,
.hdisplay = 480,
@@ -3205,6 +2436,19 @@ static const struct panel_desc logicpd_type_28 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
+ .clock = 30400,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 1,
+ .htotal = 800 + 0 + 1 + 160,
+ .vdisplay = 480,
+ .vsync_start = 480 + 0,
+ .vsync_end = 480 + 48 + 1,
+ .vtotal = 480 + 48 + 1 + 0,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,
@@ -3330,49 +2574,6 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
-static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
- {
- .clock = 138500,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 23,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
- }, {
- .clock = 110920,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 23,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
- }
-};
-
-static const struct panel_desc neweast_wjfh116008a = {
- .modes = neweast_wjfh116008a_modes,
- .num_modes = 2,
- .bpc = 6,
- .size = {
- .width = 260,
- .height = 150,
- },
- .delay = {
- .prepare = 110,
- .enable = 20,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -3783,27 +2984,6 @@ static const struct panel_desc rocktech_rk101ii01d_ct = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
- .clock = 271560,
- .hdisplay = 2560,
- .hsync_start = 2560 + 48,
- .hsync_end = 2560 + 48 + 32,
- .htotal = 2560 + 48 + 32 + 80,
- .vdisplay = 1600,
- .vsync_start = 1600 + 2,
- .vsync_end = 1600 + 2 + 5,
- .vtotal = 1600 + 2 + 5 + 57,
-};
-
-static const struct panel_desc samsung_lsn122dl01_c01 = {
- .modes = &samsung_lsn122dl01_c01_mode,
- .num_modes = 1,
- .size = {
- .width = 263,
- .height = 164,
- },
-};
-
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -3829,28 +3009,6 @@ static const struct panel_desc samsung_ltn101nt05 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode samsung_ltn140at29_301_mode = {
- .clock = 76300,
- .hdisplay = 1366,
- .hsync_start = 1366 + 64,
- .hsync_end = 1366 + 64 + 48,
- .htotal = 1366 + 64 + 48 + 128,
- .vdisplay = 768,
- .vsync_start = 768 + 2,
- .vsync_end = 768 + 2 + 5,
- .vtotal = 768 + 2 + 5 + 17,
-};
-
-static const struct panel_desc samsung_ltn140at29_301 = {
- .modes = &samsung_ltn140at29_301_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 320,
- .height = 187,
- },
-};
-
static const struct display_timing satoz_sat050at40h12r2_timing = {
.pixelclock = {33300000, 33300000, 50000000},
.hactive = {800, 800, 800},
@@ -3875,31 +3033,6 @@ static const struct panel_desc satoz_sat050at40h12r2 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
- .clock = 168480,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1280,
- .vsync_start = 1280 + 3,
- .vsync_end = 1280 + 3 + 10,
- .vtotal = 1280 + 3 + 10 + 57,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc sharp_ld_d5116z01b = {
- .modes = &sharp_ld_d5116z01b_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 260,
- .height = 120,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
-};
-
static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -3974,34 +3107,6 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct display_timing sharp_lq123p1jx31_timing = {
- .pixelclock = { 252750000, 252750000, 266604720 },
- .hactive = { 2400, 2400, 2400 },
- .hfront_porch = { 48, 48, 48 },
- .hback_porch = { 80, 80, 84 },
- .hsync_len = { 32, 32, 32 },
- .vactive = { 1600, 1600, 1600 },
- .vfront_porch = { 3, 3, 3 },
- .vback_porch = { 33, 33, 120 },
- .vsync_len = { 10, 10, 10 },
- .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
-};
-
-static const struct panel_desc sharp_lq123p1jx31 = {
- .timings = &sharp_lq123p1jx31_timing,
- .num_timings = 1,
- .bpc = 8,
- .size = {
- .width = 259,
- .height = 173,
- },
- .delay = {
- .prepare = 110,
- .enable = 50,
- .unprepare = 550,
- },
-};
-
static const struct drm_display_mode sharp_ls020b1dd01d_modes[] = {
{ /* 50 Hz */
.clock = 3000,
@@ -4090,33 +3195,6 @@ static const struct panel_desc starry_kr070pe2t = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
-static const struct drm_display_mode starry_kr122ea0sra_mode = {
- .clock = 147000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 16,
- .hsync_end = 1920 + 16 + 16,
- .htotal = 1920 + 16 + 16 + 32,
- .vdisplay = 1200,
- .vsync_start = 1200 + 15,
- .vsync_end = 1200 + 15 + 2,
- .vtotal = 1200 + 15 + 2 + 18,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc starry_kr122ea0sra = {
- .modes = &starry_kr122ea0sra_mode,
- .num_modes = 1,
- .size = {
- .width = 263,
- .height = 164,
- },
- .delay = {
- .prepare = 10 + 200,
- .enable = 50,
- .unprepare = 10 + 500,
- },
-};
-
static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.clock = 30000,
.hdisplay = 800,
@@ -4484,30 +3562,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
- .compatible = "auo,b101ean01",
- .data = &auo_b101ean01,
- }, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
- .compatible = "auo,b116xa01",
- .data = &auo_b116xak01,
- }, {
- .compatible = "auo,b116xw03",
- .data = &auo_b116xw03,
- }, {
- .compatible = "auo,b133han05",
- .data = &auo_b133han05,
- }, {
- .compatible = "auo,b133htn01",
- .data = &auo_b133htn01,
- }, {
- .compatible = "auo,b140han06",
- .data = &auo_b140han06,
- }, {
- .compatible = "auo,b133xtn01",
- .data = &auo_b133xtn01,
- }, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
@@ -4547,21 +3604,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
- .compatible = "boe,nv101wxmn51",
- .data = &boe_nv101wxmn51,
- }, {
- .compatible = "boe,nv110wtm-n61",
- .data = &boe_nv110wtm_n61,
- }, {
- .compatible = "boe,nv133fhm-n61",
- .data = &boe_nv133fhm_n61,
- }, {
- .compatible = "boe,nv133fhm-n62",
- .data = &boe_nv133fhm_n61,
- }, {
- .compatible = "boe,nv140fhmn49",
- .data = &boe_nv140fhmn49,
- }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -4673,30 +3715,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
- .compatible = "innolux,n116bca-ea1",
- .data = &innolux_n116bca_ea1,
- }, {
- .compatible = "innolux,n116bge",
- .data = &innolux_n116bge,
- }, {
- .compatible = "innolux,n125hce-gn1",
- .data = &innolux_n125hce_gn1,
- }, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
- .compatible = "innolux,p120zdg-bf1",
- .data = &innolux_p120zdg_bf1,
- }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
- .compatible = "ivo,m133nwf4-r0",
- .data = &ivo_m133nwf4_r0,
- }, {
- .compatible = "kingdisplay,kd116n21-30nv-a010",
- .data = &kingdisplay_kd116n21_30nv_a010,
- }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -4715,18 +3739,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
- .compatible = "lg,lp079qx1-sp0v",
- .data = &lg_lp079qx1_sp0v,
- }, {
- .compatible = "lg,lp097qx1-spa1",
- .data = &lg_lp097qx1_spa1,
- }, {
- .compatible = "lg,lp120up1",
- .data = &lg_lp120up1,
- }, {
- .compatible = "lg,lp129qe",
- .data = &lg_lp129qe,
- }, {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
@@ -4757,9 +3769,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
- .compatible = "neweast,wjfh116008a",
- .data = &neweast_wjfh116008a,
- }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -4808,21 +3817,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk101ii01d-ct",
.data = &rocktech_rk101ii01d_ct,
}, {
- .compatible = "samsung,lsn122dl01-c01",
- .data = &samsung_lsn122dl01_c01,
- }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "samsung,ltn140at29-301",
- .data = &samsung_ltn140at29_301,
- }, {
.compatible = "satoz,sat050at40h12r2",
.data = &satoz_sat050at40h12r2,
}, {
- .compatible = "sharp,ld-d5116z01b",
- .data = &sharp_ld_d5116z01b,
- }, {
.compatible = "sharp,lq035q7db03",
.data = &sharp_lq035q7db03,
}, {
@@ -4832,9 +3832,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
- .compatible = "sharp,lq123p1jx31",
- .data = &sharp_lq123p1jx31,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -4844,9 +3841,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr070pe2t",
.data = &starry_kr070pe2t,
}, {
- .compatible = "starry,kr122ea0sra",
- .data = &starry_kr122ea0sra,
- }, {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
@@ -4918,7 +3912,7 @@ static int panel_simple_platform_probe(struct platform_device *pdev)
if (!id)
return -ENODEV;
- return panel_simple_probe(&pdev->dev, id->data, NULL);
+ return panel_simple_probe(&pdev->dev, id->data);
}
static int panel_simple_platform_remove(struct platform_device *pdev)
@@ -5198,7 +4192,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
desc = id->data;
- err = panel_simple_probe(&dsi->dev, &desc->desc, NULL);
+ err = panel_simple_probe(&dsi->dev, &desc->desc);
if (err < 0)
return err;
@@ -5243,38 +4237,6 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = {
.shutdown = panel_simple_dsi_shutdown,
};
-static int panel_simple_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
-{
- const struct of_device_id *id;
-
- id = of_match_node(platform_of_match, aux_ep->dev.of_node);
- if (!id)
- return -ENODEV;
-
- return panel_simple_probe(&aux_ep->dev, id->data, aux_ep->aux);
-}
-
-static void panel_simple_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
-{
- panel_simple_remove(&aux_ep->dev);
-}
-
-static void panel_simple_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
-{
- panel_simple_shutdown(&aux_ep->dev);
-}
-
-static struct dp_aux_ep_driver panel_simple_dp_aux_ep_driver = {
- .driver = {
- .name = "panel-simple-dp-aux",
- .of_match_table = platform_of_match, /* Same as platform one! */
- .pm = &panel_simple_pm_ops,
- },
- .probe = panel_simple_dp_aux_ep_probe,
- .remove = panel_simple_dp_aux_ep_remove,
- .shutdown = panel_simple_dp_aux_ep_shutdown,
-};
-
static int __init panel_simple_init(void)
{
int err;
@@ -5283,21 +4245,14 @@ static int __init panel_simple_init(void)
if (err < 0)
return err;
- err = dp_aux_dp_driver_register(&panel_simple_dp_aux_ep_driver);
- if (err < 0)
- goto err_did_platform_register;
-
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
if (err < 0)
- goto err_did_aux_ep_register;
+ goto err_did_platform_register;
}
return 0;
-err_did_aux_ep_register:
- dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
-
err_did_platform_register:
platform_driver_unregister(&panel_simple_platform_driver);
@@ -5310,7 +4265,6 @@ static void __exit panel_simple_exit(void)
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
- dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
platform_driver_unregister(&panel_simple_platform_driver);
}
module_exit(panel_simple_exit);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index bd9b7be63b0f..7f51a4682ccb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -198,7 +198,6 @@ err:
int panfrost_device_init(struct panfrost_device *pfdev)
{
int err;
- struct resource *res;
mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
@@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto out_reset;
- res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
- pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_pm_domain;
@@ -400,8 +398,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
#ifdef CONFIG_PM
int panfrost_device_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
@@ -411,8 +408,7 @@ int panfrost_device_resume(struct device *dev)
int panfrost_device_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
if (!panfrost_job_is_idle(pfdev))
return -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ffaef5ec5ff..82ad9a67f251 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
if (ret)
goto fail;
- ret = drm_gem_fence_array_add(&job->deps, fence);
+ ret = drm_sched_job_add_dependency(&job->base, fence);
if (ret)
goto fail;
@@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
- int ret = 0;
+ int ret = 0, slot;
if (!args->jc)
return -EINVAL;
@@ -253,38 +253,47 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
- goto fail_out_sync;
+ goto out_put_syncout;
}
kref_init(&job->refcount);
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
job->pfdev = pfdev;
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->file_priv = file->driver_priv;
+ slot = panfrost_job_get_slot(job);
+
+ ret = drm_sched_job_init(&job->base,
+ &job->file_priv->sched_entity[slot],
+ NULL);
+ if (ret)
+ goto out_put_job;
+
ret = panfrost_copy_in_sync(dev, file, args, job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
ret = panfrost_lookup_bos(dev, file, args, job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
ret = panfrost_job_push(job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
/* Update the return sync object for the job */
if (sync_out)
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
-fail_job:
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&job->base);
+out_put_job:
panfrost_job_put(job);
-fail_out_sync:
+out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
@@ -629,8 +638,8 @@ static const struct panfrost_compatible amlogic_data = {
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
-const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
.supply_names = mediatek_mt8183_supplies,
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 71a72fb50e6b..908d79520853 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
return &fence->base;
}
-static int panfrost_job_get_slot(struct panfrost_job *job)
+int panfrost_job_get_slot(struct panfrost_job *job)
{
/* JS0: fragment jobs.
* JS1: vertex/tiler jobs
@@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
*/
affinity = pfdev->features.shader_present;
- job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
- job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
+ job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
+ job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
}
static u32
@@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
- job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
- job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
panfrost_job_write_affinity(pfdev, job->requirements, js);
@@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int bo_count,
- struct xarray *deps)
+ struct drm_sched_job *job)
{
int i, ret;
for (i = 0; i < bo_count; i++) {
/* panfrost always uses write mode in its current uapi */
- ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ true);
if (ret)
return ret;
}
@@ -269,29 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int panfrost_job_push(struct panfrost_job *job)
{
struct panfrost_device *pfdev = job->pfdev;
- int slot = panfrost_job_get_slot(job);
- struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
-
ret = drm_gem_lock_reservations(job->bos, job->bo_count,
&acquire_ctx);
if (ret)
return ret;
mutex_lock(&pfdev->sched_lock);
-
- ret = drm_sched_job_init(&job->base, entity, NULL);
- if (ret) {
- mutex_unlock(&pfdev->sched_lock);
- goto unlock;
- }
+ drm_sched_job_arm(&job->base);
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
- &job->deps);
+ &job->base);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
@@ -299,7 +292,7 @@ int panfrost_job_push(struct panfrost_job *job)
kref_get(&job->refcount); /* put by scheduler job completion */
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
mutex_unlock(&pfdev->sched_lock);
@@ -316,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
{
struct panfrost_job *job = container_of(ref, struct panfrost_job,
refcount);
- struct dma_fence *fence;
- unsigned long index;
unsigned int i;
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
@@ -363,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
panfrost_job_put(job);
}
-static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct panfrost_job *job = to_panfrost_job(sched_job);
-
- if (!xa_empty(&job->deps))
- return xa_erase(&job->deps, job->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -763,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
- .dependency = panfrost_job_dependency,
.run_job = panfrost_job_run,
.timedout_job = panfrost_job_timedout,
.free_job = panfrost_job_free
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 82306a03b57e..77e6d0e6f612 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -19,10 +19,6 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_file_priv *file_priv;
- /* Contains both explicit and implicit fences */
- struct xarray deps;
- unsigned long last_dep;
-
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
void panfrost_job_fini(struct panfrost_device *pfdev);
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index dfe5f1d29763..f51d3f791a17 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,21 +58,37 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, u64 size)
+ u64 region_start, u64 size)
{
u8 region_width;
- u64 region = iova & PAGE_MASK;
+ u64 region;
+ u64 region_end = region_start + size;
- /* The size is encoded as ceil(log2) minus(1), which may be calculated
- * with fls. The size must be clamped to hardware bounds.
+ if (!size)
+ return;
+
+ /*
+ * The locked region is a naturally aligned power of 2 block encoded as
+ * log2 minus(1).
+ * Calculate the desired start/end and look for the highest bit which
+ * differs. The smallest naturally aligned block must include this bit
+ * change, the desired region starts with this bit (and subsequent bits)
+ * zeroed and ends with the bit (and subsequent bits) set to one.
*/
- size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
- region_width = fls64(size - 1) - 1;
- region |= region_width;
+ region_width = max(fls64(region_start ^ (region_end - 1)),
+ const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
+
+ /*
+ * Mask off the low bits of region_start (which would be ignored by
+ * the hardware anyway)
+ */
+ region_start &= GENMASK_ULL(63, region_width);
+
+ region = region_width | region_start;
/* Lock the region that needs to be updated */
- mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
- mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
+ mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
+ mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
@@ -114,14 +130,14 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
- mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
- mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
- mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
- mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 5ab03d605f57..e116a4d9b8e5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
- gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
- gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
+ gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
+ gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
gpu_write(pfdev, GPU_INT_CLEAR,
GPU_IRQ_CLEAN_CACHES_COMPLETED |
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b19f2f00b215..469979cd0341 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -36,10 +36,10 @@
/* manage releaseables */
/* stack them 16 high for now -drawable object is 191 */
#define RELEASE_SIZE 256
-#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
#define SURFACE_RELEASE_SIZE 128
-#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 37a1b6a6ad6d..b2e33d5ba5d0 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
*/
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(ttm);
}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index d2a0f5394fef..dde0501aea68 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
- pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, entry->busaddr[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
@@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
+ if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = -EFAULT;
@@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
+ entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 83e8b8547f9b..bd5dc09e860f 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -5983,7 +5983,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
#else // not __cplusplus
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (offsetof(ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES, FieldName)/sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f0cfb58da467..ac006bed4743 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -390,8 +390,7 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
- u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+ u16 hi_sidd, lo_sidd;
struct radeon_cac_tdp_table *cac_tdp_table =
rdev->pm.dpm.dyn_state.cac_tdp_table;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 35b77c944701..9d2bcb9551e6 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -820,12 +820,12 @@ union fan_info {
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
{
- u32 size = atom_table->ucNumEntries *
- sizeof(struct radeon_clock_voltage_dependency_entry);
int i;
ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
- radeon_table->entries = kzalloc(size, GFP_KERNEL);
+ radeon_table->entries = kcalloc(atom_table->ucNumEntries,
+ sizeof(struct radeon_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!radeon_table->entries)
return -ENOMEM;
@@ -1361,7 +1361,9 @@ u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
u8 r600_encode_pci_lane_width(u32 lanes)
{
- u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+ static const u8 encoded_lanes[] = {
+ 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6
+ };
if (lanes > 16)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index ec867fa880a4..751c2c075e09 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -423,7 +423,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
radeon_connector->port,
mst_enc->pbn, slots);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+ drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
radeon_dp_mst_set_be_cntl(primary, mst_enc,
radeon_connector->mst_port->hpd.hpd, true);
@@ -452,7 +452,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+ drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
/* and this can also fail */
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index e9c47ec28ade..73e3117420bf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = dma_fence_signal_locked(&fence->base);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
-
+ dma_fence_signal_locked(&fence->base);
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
dma_fence_put(&fence->base);
- } else
- DMA_FENCE_TRACE(&fence->base, "pending\n");
+ }
return 0;
}
@@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
dma_fence_get(f);
-
- DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return true;
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
- int ret;
-
- ret = dma_fence_signal(&fence->base);
- if (!ret)
- DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ dma_fence_signal(&fence->base);
return true;
}
return false;
@@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
- int r_sig;
/*
* This function should not be called on !radeon fences.
@@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = dma_fence_signal(&fence->base);
- if (!r_sig)
- DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ dma_fence_signal(&fence->base);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a06d4cc2fb1c..11b21d605584 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t
{
struct radeon_ttm_tt *gtt = (void *)ttm;
- radeon_ttm_backend_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
-
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@@ -548,14 +545,14 @@ static int radeon_ttm_tt_populate(struct ttm_device *bdev,
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
return 0;
}
@@ -572,11 +569,13 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+ radeon_ttm_tt_unbind(bdev, ttm);
if (gtt && gtt->userptr) {
kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+ ttm->page_flags &= ~TTM_TT_FLAG_EXTERNAL;
return;
}
@@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct radeon_device *rdev = radeon_get_rdev(bdev);
if (rdev->flags & RADEON_IS_AGP) {
- ttm_agp_unbind(ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index ea7e39d03545..5672830ca184 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -1206,7 +1206,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
int ret;
/* Get the CRTC clock and the optional external clock. */
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_CLOCK)) {
sprintf(clk_name, "du.%u", hwindex);
name = clk_name;
} else {
@@ -1243,7 +1243,10 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[hwindex];
rcrtc->index = hwindex;
- rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC;
+ rcrtc->dsysr = rcrtc->index % 2 ? 0 : DSYSR_DRES;
+
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_TVM_SYNC))
+ rcrtc->dsysr |= DSYSR_TVM_TVSYNC;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
@@ -1269,7 +1272,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
/* Register the interrupt handler. */
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ)) {
/* The IRQ's are associated with the CRTC (sw)index. */
irq = platform_get_irq(pdev, swindex);
irqflags = 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 5f2940c42225..66e8839db708 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -93,17 +93,6 @@ struct rcar_du_crtc_state {
#define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state)
-enum rcar_du_output {
- RCAR_DU_OUTPUT_DPAD0,
- RCAR_DU_OUTPUT_DPAD1,
- RCAR_DU_OUTPUT_LVDS0,
- RCAR_DU_OUTPUT_LVDS1,
- RCAR_DU_OUTPUT_HDMI0,
- RCAR_DU_OUTPUT_HDMI1,
- RCAR_DU_OUTPUT_TCON,
- RCAR_DU_OUTPUT_MAX,
-};
-
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
unsigned int hwindex);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 4ac26d08ebb4..5612a9e7a905 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -36,7 +37,8 @@
static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -58,7 +60,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -79,7 +82,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -105,7 +109,8 @@ static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -134,7 +139,8 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -163,7 +169,8 @@ static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE,
.channels_mask = BIT(1) | BIT(0),
.routes = {
@@ -189,7 +196,8 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
static const struct rcar_du_device_info rcar_du_r8a774e1_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -239,7 +247,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.quirks = RCAR_DU_QUIRK_ALIGN_128B,
@@ -269,7 +278,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
/* M2-W (r8a7791) and M2-N (r8a7793) are identical */
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -292,7 +302,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
static const struct rcar_du_device_info rcar_du_r8a7792_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -311,7 +322,8 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -333,7 +345,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -366,7 +379,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -395,7 +409,8 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
static const struct rcar_du_device_info rcar_du_r8a77965_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -424,7 +439,8 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = {
static const struct rcar_du_device_info rcar_du_r8a77970_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -448,7 +464,8 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE,
.channels_mask = BIT(1) | BIT(0),
.routes = {
@@ -473,6 +490,25 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
.lvds_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .channels_mask = BIT(1) | BIT(0),
+ .routes = {
+ /* R8A779A0 has two MIPI DSI outputs. */
+ [RCAR_DU_OUTPUT_DSI0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DSI1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ },
+ .dsi_clk_mask = BIT(1) | BIT(0),
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
@@ -497,11 +533,30 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
+ { .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+const char *rcar_du_output_name(enum rcar_du_output output)
+{
+ static const char * const names[] = {
+ [RCAR_DU_OUTPUT_DPAD0] = "DPAD0",
+ [RCAR_DU_OUTPUT_DPAD1] = "DPAD1",
+ [RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
+ [RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
+ [RCAR_DU_OUTPUT_HDMI0] = "HDMI0",
+ [RCAR_DU_OUTPUT_HDMI1] = "HDMI1",
+ [RCAR_DU_OUTPUT_TCON] = "TCON",
+ };
+
+ if (output >= ARRAY_SIZE(names) || !names[output])
+ return "UNKNOWN";
+
+ return names[output];
+}
+
/* -----------------------------------------------------------------------------
* DRM operations
*/
@@ -510,7 +565,11 @@ DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(rcar_du_dumb_create),
+ .dumb_create = rcar_du_dumb_create,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -570,7 +629,7 @@ static void rcar_du_shutdown(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
struct rcar_du_device *rcdu;
- struct resource *mem;
+ unsigned int mask;
int ret;
/* Allocate and initialize the R-Car device structure. */
@@ -585,11 +644,20 @@ static int rcar_du_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ rcdu->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcdu->mmio))
return PTR_ERR(rcdu->mmio);
+ /*
+ * Set the DMA coherent mask to reflect the DU 32-bit DMA address space
+ * limitations. When sourcing frames from a VSP the DU doesn't perform
+ * any memory access so set the mask to 40 bits to accept all buffers.
+ */
+ mask = rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE) ? 40 : 32;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(mask));
+ if (ret)
+ return ret;
+
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 02ca2d0e1b55..101f42df86ea 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -26,13 +26,27 @@ struct drm_bridge;
struct drm_property;
struct rcar_du_device;
-#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
-#define RCAR_DU_FEATURE_INTERLACED BIT(2) /* HW supports interlaced */
-#define RCAR_DU_FEATURE_TVM_SYNC BIT(3) /* Has TV switch/sync modes */
+#define RCAR_DU_FEATURE_CRTC_IRQ BIT(0) /* Per-CRTC IRQ */
+#define RCAR_DU_FEATURE_CRTC_CLOCK BIT(1) /* Per-CRTC clock */
+#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */
+#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */
+#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
+enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+ RCAR_DU_OUTPUT_DPAD1,
+ RCAR_DU_OUTPUT_DSI0,
+ RCAR_DU_OUTPUT_DSI1,
+ RCAR_DU_OUTPUT_HDMI0,
+ RCAR_DU_OUTPUT_HDMI1,
+ RCAR_DU_OUTPUT_LVDS0,
+ RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_TCON,
+ RCAR_DU_OUTPUT_MAX,
+};
+
/*
* struct rcar_du_output_routing - Output routing specification
* @possible_crtcs: bitmask of possible CRTCs for the output
@@ -56,6 +70,7 @@ struct rcar_du_output_routing {
* @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
* @num_lvds: number of internal LVDS encoders
* @dpll_mask: bit mask of DU channels equipped with a DPLL
+ * @dsi_clk_mask: bitmask of channels that can use the DSI clock as dot clock
* @lvds_clk_mask: bitmask of channels that can use the LVDS clock as dot clock
*/
struct rcar_du_device_info {
@@ -66,6 +81,7 @@ struct rcar_du_device_info {
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
unsigned int dpll_mask;
+ unsigned int dsi_clk_mask;
unsigned int lvds_clk_mask;
};
@@ -126,4 +142,6 @@ static inline void rcar_du_write(struct rcar_du_device *rcdu, u32 reg, u32 data)
iowrite32(data, rcdu->mmio + reg);
}
+const char *rcar_du_output_name(enum rcar_du_output output);
+
#endif /* __RCAR_DU_DRV_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4bf4e25d7f01..3977aaa1ab5a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -103,8 +103,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOLINK;
}
- dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
- enc_node, output);
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
+ enc_node, rcar_du_output_name(output));
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
@@ -118,8 +118,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
ret = drm_bridge_attach(&renc->base, bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
- dev_err(rcdu->dev, "failed to attach bridge for output %u\n",
- output);
+ dev_err(rcdu->dev,
+ "failed to attach bridge %pOF for output %s (%d)\n",
+ bridge->of_node, rcar_du_output_name(output), ret);
return ret;
}
@@ -127,7 +128,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base);
if (IS_ERR(connector)) {
dev_err(rcdu->dev,
- "failed to created connector for output %u\n", output);
+ "failed to created connector for output %s (%ld)\n",
+ rcar_du_output_name(output), PTR_ERR(connector));
return PTR_ERR(connector);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 88a783ceb3e9..8665a1dd2186 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -122,10 +122,12 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
didsr = DIDSR_CODE;
for (i = 0; i < num_crtcs; ++i, ++rcrtc) {
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index))
- didsr |= DIDSR_LCDS_LVDS0(i)
+ didsr |= DIDSR_LDCS_LVDS0(i)
| DIDSR_PDCS_CLK(i, 0);
+ else if (rcdu->info->dsi_clk_mask & BIT(rcrtc->index))
+ didsr |= DIDSR_LDCS_DSI(i);
else
- didsr |= DIDSR_LCDS_DCLKIN(i)
+ didsr |= DIDSR_LDCS_DCLKIN(i)
| DIDSR_PDCS_CLK(i, 0);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fdb8a0d127ad..eacb1f17f747 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,6 +19,7 @@
#include <drm/drm_vblank.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/wait.h>
@@ -325,6 +326,51 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
* Frame buffer
*/
+static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
+ .free = drm_gem_cma_free_object,
+ .print_info = drm_gem_cma_print_info,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = drm_gem_cma_mmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
+struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
+ return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+
+ /* Create a CMA GEM buffer. */
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_obj = &cma_obj->base;
+ gem_obj->funcs = &rcar_du_gem_funcs;
+
+ drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size);
+ cma_obj->map_noncoherent = false;
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret) {
+ drm_gem_object_release(gem_obj);
+ kfree(cma_obj);
+ return ERR_PTR(ret);
+ }
+
+ cma_obj->paddr = 0;
+ cma_obj->sgt = sgt;
+
+ return gem_obj;
+}
+
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -513,8 +559,8 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
ret = rcar_du_encoder_init(rcdu, output, entity);
if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
dev_warn(rcdu->dev,
- "failed to initialize encoder %pOF on output %u (%d), skipping\n",
- entity, output, ret);
+ "failed to initialize encoder %pOF on output %s (%d), skipping\n",
+ entity, rcar_du_output_name(output), ret);
of_node_put(entity);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index 8f5fff176754..789154e19535 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -12,10 +12,13 @@
#include <linux/types.h>
+struct dma_buf_attachment;
struct drm_file;
struct drm_device;
+struct drm_gem_object;
struct drm_mode_create_dumb;
struct rcar_du_device;
+struct sg_table;
struct rcar_du_format_info {
u32 fourcc;
@@ -34,4 +37,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu);
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
#endif /* __RCAR_DU_KMS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index fb9964949368..1cdaa51eb9ac 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -257,10 +257,11 @@
#define DIDSR 0x20028
#define DIDSR_CODE (0x7790 << 16)
-#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
-#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
-#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
-#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
+#define DIDSR_LDCS_DCLKIN(n) (0 << (8 + (n) * 2))
+#define DIDSR_LDCS_DSI(n) (2 << (8 + (n) * 2)) /* V3U only */
+#define DIDSR_LDCS_LVDS0(n) (2 << (8 + (n) * 2))
+#define DIDSR_LDCS_LVDS1(n) (3 << (8 + (n) * 2))
+#define DIDSR_LDCS_MASK(n) (3 << (8 + (n) * 2))
#define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2))
#define DIDSR_PDCS_MASK(n) (3 << ((n) * 2))
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 23e41c83c875..b7fc5b069cbc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -187,17 +187,43 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
struct rcar_du_device *rcdu = vsp->dev;
- unsigned int i;
+ unsigned int i, j;
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
- ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
- gem->base.size);
- if (ret)
- goto fail;
+ if (gem->sgt) {
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ /*
+ * If the GEM buffer has a scatter gather table, it has
+ * been imported from a dma-buf and has no physical
+ * address as it might not be physically contiguous.
+ * Copy the original scatter gather table to map it to
+ * the VSP.
+ */
+ ret = sg_alloc_table(sgt, gem->sgt->orig_nents,
+ GFP_KERNEL);
+ if (ret)
+ goto fail;
+
+ src = gem->sgt->sgl;
+ dst = sgt->sgl;
+ for (j = 0; j < gem->sgt->orig_nents; ++j) {
+ sg_set_page(dst, sg_page(src), src->length,
+ src->offset);
+ src = sg_next(src);
+ dst = sg_next(dst);
+ }
+ } else {
+ ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
+ gem->paddr, gem->base.size);
+ if (ret)
+ goto fail;
+ }
ret = vsp1_du_map_sg(vsp->vsp, sgt);
if (ret) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index b672c5bd72ee..72a272cfc11e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -813,7 +813,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
- struct resource *mem;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@@ -836,8 +835,7 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->mmio))
return PTR_ERR(lvds->mmio);
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 558f1b58bd69..9f1ecefc3933 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -9,7 +9,6 @@ config DRM_ROCKCHIP
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
- select DRM_RGB if ROCKCHIP_RGB
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ade2327a10e2..8abb5ac26807 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -467,6 +467,6 @@ struct platform_driver rockchip_dp_driver = {
.driver = {
.name = "rockchip-dp",
.pm = &rockchip_dp_pm_ops,
- .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
+ .of_match_table = rockchip_dp_dt_ids,
},
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 13c6b857158f..16497c31d9f9 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -697,7 +697,6 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
struct device *dev = dp->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
@@ -705,8 +704,7 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
return PTR_ERR(dp->grf);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->regs = devm_ioremap_resource(dev, res);
+ dp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dp->regs)) {
DRM_DEV_ERROR(dev, "ioremap reg failed\n");
return PTR_ERR(dp->regs);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index a2262bee5aa4..a9acbcc420d0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
-#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <video/mipi_display.h>
@@ -643,7 +642,7 @@ struct hstt {
}
/* Table A-3 High-Speed Transition Times */
-struct hstt hstt_table[] = {
+static struct hstt hstt_table[] = {
HSTT( 90, 32, 20, 26, 13),
HSTT( 100, 35, 23, 28, 14),
HSTT( 110, 32, 22, 26, 13),
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 7afdc54eb3ec..046e8ec2a71c 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -810,7 +810,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct inno_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -821,8 +820,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index bfba9793d238..e4ebe60b3cc1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -138,9 +138,6 @@ static int rockchip_drm_bind(struct device *dev)
drm_dev->dev_private = private;
- INIT_LIST_HEAD(&private->psr_list);
- mutex_init(&private->psr_list_lock);
-
ret = rockchip_drm_init_iommu(drm_dev);
if (ret)
goto err_free;
@@ -275,10 +272,17 @@ int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
return -ENODEV;
/* status disabled will prevent creation of platform-devices */
+ if (!of_device_is_available(node)) {
+ of_node_put(node);
+ return -ENODEV;
+ }
+
pdev = of_find_device_by_node(node);
of_node_put(node);
+
+ /* enabled non-platform-devices can immediately return here */
if (!pdev)
- return -ENODEV;
+ return false;
/*
* All rockchip subdrivers have probed at this point, so
@@ -370,7 +374,7 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
}
iommu = of_parse_phandle(port->parent, "iommus", 0);
- if (!iommu || !of_device_is_available(iommu->parent)) {
+ if (!iommu || !of_device_is_available(iommu)) {
DRM_DEV_DEBUG(dev,
"no iommu attached for %pOF, using non-iommu buffers\n",
port->parent);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index e33c2dcd0d4b..aa0909e8edf9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -48,8 +48,6 @@ struct rockchip_drm_private {
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
- struct list_head psr_list;
- struct mutex psr_list_lock;
};
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 551653940e39..be74c87a8be4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -439,11 +440,9 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->regs = devm_ioremap_resource(lvds->dev, res);
+ lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
@@ -612,9 +611,9 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
+ connector = &lvds->connector;
if (lvds->panel) {
- connector = &lvds->connector;
connector->dpms = DRM_MODE_DPMS_OFF;
ret = drm_connector_init(drm_dev, connector,
&rockchip_lvds_connector_funcs,
@@ -627,17 +626,27 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to attach encoder: %d\n", ret);
- goto err_free_connector;
- }
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
+
+ connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ ret = PTR_ERR(connector);
+ goto err_free_encoder;
+ }
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index d691d9bef8e7..09be9678f2bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -27,6 +28,7 @@ struct rockchip_rgb {
struct drm_device *drm_dev;
struct drm_bridge *bridge;
struct drm_encoder encoder;
+ struct drm_connector connector;
int output_mode;
};
@@ -80,6 +82,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
int ret = 0, child_count = 0;
struct drm_panel *panel;
struct drm_bridge *bridge;
+ struct drm_connector *connector;
rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
@@ -142,12 +145,32 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
+ connector = &rgb->connector;
+ connector = drm_bridge_connector_init(rgb->drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ ret = PTR_ERR(connector);
+ goto err_free_encoder;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
+ }
+
return rgb;
+err_free_connector:
+ drm_connector_cleanup(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
return ERR_PTR(ret);
@@ -157,6 +180,7 @@ EXPORT_SYMBOL_GPL(rockchip_rgb_init);
void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
+ drm_connector_cleanup(&rgb->connector);
drm_encoder_cleanup(&rgb->encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index ca7cc82125cb..1f7353f0684a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1124,6 +1124,6 @@ struct platform_driver vop_platform_driver = {
.remove = vop_remove,
.driver = {
.name = "rockchip-vop",
- .of_match_table = of_match_ptr(vop_driver_dt_match),
+ .of_match_table = vop_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 79554aa4dbb1..27e1573af96e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -45,8 +45,14 @@
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
- * Note: the sched_list should have at least one element to schedule
- * the entity
+ * Note that the &sched_list must have at least one element to schedule the entity.
+ *
+ * For changing @priority later on at runtime see
+ * drm_sched_entity_set_priority(). For changing the set of schedulers
+ * @sched_list at runtime see drm_sched_entity_modify_sched().
+ *
+ * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
*/
@@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init);
* @sched_list: the list of new drm scheds which will replace
* existing entity->sched_list
* @num_sched_list: number of drm sched in sched_list
+ *
+ * Note that this must be called under the same common lock for @entity as
+ * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
+ * guarantee through some other means that this is never called while new jobs
+ * can be pushed to @entity.
*/
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
@@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
-/**
- * drm_sched_entity_is_idle - Check if entity is idle
- *
- * @entity: scheduler entity
- *
- * Returns true if the entity does not have any unscheduled jobs.
- */
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
@@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
-/**
- * drm_sched_entity_is_ready - Check if entity is ready
- *
- * @entity: scheduler entity
- *
- * Return true if entity could provide a job.
- */
+/* Return true if entity could provide a job. */
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
@@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-/**
- * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
- *
- * @f: signaled fence
- * @cb: our callback structure
- *
- * Signal the scheduler finished fence when the entity in question is killed.
- */
+/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
job->sched->ops->free_job(job);
}
-/**
- * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
- *
- * @entity: entity which is cleaned up
- *
- * Makes sure that all remaining jobs in an entity are killed before it is
- * destroyed.
- */
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ if (!xa_empty(&job->dependencies))
+ return xa_erase(&job->dependencies, job->last_dependency++);
+
+ if (job->sched->ops->dependency)
+ return job->sched->ops->dependency(job, entity);
+
+ return NULL;
+}
+
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
@@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
- while ((f = job->sched->ops->dependency(job, entity)))
+ while ((f = drm_sched_job_dependency(job, entity)))
dma_fence_wait(f, false);
drm_sched_fence_scheduled(s_fence);
@@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*
* @entity: scheduler entity
*
- * This should be called after @drm_sched_entity_do_release. It goes over the
- * entity and signals all jobs with an error code if the process was killed.
+ * Cleanups up @entity which has been initialized by drm_sched_entity_init().
*
+ * If there are potentially job still in flight or getting newly queued
+ * drm_sched_entity_flush() must be called first. This function then goes over
+ * the entity and signals all jobs with an error code if the process was killed.
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
@@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_destroy - Destroy a context entity
- *
* @entity: scheduler entity
*
- * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
+ * convenience wrapper.
*/
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
@@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
-/*
- * drm_sched_entity_clear_dep - callback to clear the entities dependency
- */
+/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
static void drm_sched_entity_clear_dep(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
-/**
- * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
- *
- * @entity: entity with dependency
- *
+/*
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
@@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
return false;
}
-/**
- * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
- *
- * @entity: entity to get the job from
- *
- * Process all dependencies and try to get one job from the entities queue.
- */
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job;
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
@@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
return NULL;
while ((entity->dependency =
- sched->ops->dependency(sched_job, entity))) {
+ drm_sched_job_dependency(sched_job, entity))) {
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
if (drm_sched_entity_add_dependency_cb(entity))
@@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(entity->last_scheduled);
+
entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+ /*
+ * If the queue is empty we allow drm_sched_entity_select_rq() to
+ * locklessly access ->last_scheduled. This only works if we set the
+ * pointer before we dequeue and if we a write barrier here.
+ */
+ smp_wmb();
+
spsc_queue_pop(&entity->job_queue);
return sched_job;
}
-/**
- * drm_sched_entity_select_rq - select a new rq for the entity
- *
- * @entity: scheduler entity
- *
- * Check all prerequisites and select a new rq for the entity for load
- * balancing.
- */
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
- if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
+ /* single possible engine and already selected */
+ if (!entity->sched_list)
return;
- fence = READ_ONCE(entity->last_scheduled);
+ /* queue non-empty, stay on the same engine */
+ if (spsc_queue_count(&entity->job_queue))
+ return;
+
+ /*
+ * Only when the queue is empty are we guaranteed that the scheduler
+ * thread cannot change ->last_scheduled. To enforce ordering we need
+ * a read barrier here. See drm_sched_entity_pop_job() for the other
+ * side.
+ */
+ smp_rmb();
+
+ fence = entity->last_scheduled;
+
+ /* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence))
return;
@@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
- *
* @sched_job: job to submit
- * @entity: scheduler entity
*
- * Note: To guarantee that the order of insertion to queue matches
- * the job's fence sequence number this function should be
- * called with drm_sched_job_init under common lock.
+ * Note: To guarantee that the order of insertion to queue matches the job's
+ * fence sequence number this function should be called with drm_sched_job_arm()
+ * under common lock for the struct drm_sched_entity that was set up for
+ * @sched_job in drm_sched_job_init().
*
* Returns 0 for success, negative error code otherwise.
*/
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity)
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
+ struct drm_sched_entity *entity = sched_job->entity;
bool first;
trace_drm_sched_job(sched_job, entity);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 69de2c76731f..7fd869520ef2 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
- int ret = dma_fence_signal(&fence->scheduled);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->scheduled,
- "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->scheduled,
- "was already signaled\n");
+ dma_fence_signal(&fence->scheduled);
}
void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
- int ret = dma_fence_signal(&fence->finished);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->finished,
- "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->finished,
- "was already signaled\n");
+ dma_fence_signal(&fence->finished);
}
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
@@ -83,19 +69,28 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
+static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+ if (!WARN_ON_ONCE(!fence))
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
/**
- * drm_sched_fence_free - free up the fence memory
+ * drm_sched_fence_free - free up an uninitialized fence
*
- * @rcu: RCU callback head
+ * @fence: fence to free
*
- * Free up the fence memory after the RCU grace period.
+ * Free up the fence memory. Should only be used if drm_sched_fence_init()
+ * has not been called yet.
*/
-static void drm_sched_fence_free(struct rcu_head *rcu)
+void drm_sched_fence_free(struct drm_sched_fence *fence)
{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct drm_sched_fence *fence = to_drm_sched_fence(f);
-
- kmem_cache_free(sched_fence_slab, fence);
+ /* This function should not be called if the fence has been initialized. */
+ if (!WARN_ON_ONCE(fence->sched))
+ kmem_cache_free(sched_fence_slab, fence);
}
/**
@@ -111,7 +106,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
- call_rcu(&fence->finished.rcu, drm_sched_fence_free);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
}
/**
@@ -152,27 +147,32 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
}
EXPORT_SYMBOL(to_drm_sched_fence);
-struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
- void *owner)
+struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
+ void *owner)
{
struct drm_sched_fence *fence = NULL;
- unsigned seq;
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
- fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
+ return fence;
+}
+
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity)
+{
+ unsigned seq;
+
+ fence->sched = entity->rq->sched;
seq = atomic_inc_return(&entity->fence_seq);
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
&fence->lock, entity->fence_context + 1, seq);
-
- return fence;
}
module_init(drm_sched_fence_slab_init);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 67382621b429..042c16b5d54a 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -48,9 +48,11 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
+#include <linux/dma-resv.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -564,7 +566,6 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
/**
* drm_sched_job_init - init a scheduler job
- *
* @job: scheduler job to init
* @entity: scheduler entity to use
* @owner: job owner for debugging
@@ -572,43 +573,193 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
*
+ * Drivers must make sure drm_sched_job_cleanup() if this function returns
+ * successfully, even when @job is aborted before drm_sched_job_arm() is called.
+ *
+ * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
+ * has died, which can mean that there's no valid runqueue for a @entity.
+ * This function returns -ENOENT in this case (which probably should be -EIO as
+ * a more meanigful return value).
+ *
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
- struct drm_gpu_scheduler *sched;
-
drm_sched_entity_select_rq(entity);
if (!entity->rq)
return -ENOENT;
- sched = entity->rq->sched;
-
- job->sched = sched;
job->entity = entity;
- job->s_priority = entity->rq - sched->sched_rq;
- job->s_fence = drm_sched_fence_create(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->id = atomic64_inc_return(&sched->job_id_count);
INIT_LIST_HEAD(&job->list);
+ xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
+
return 0;
}
EXPORT_SYMBOL(drm_sched_job_init);
/**
- * drm_sched_job_cleanup - clean up scheduler job resources
+ * drm_sched_job_arm - arm a scheduler job for execution
+ * @job: scheduler job to arm
+ *
+ * This arms a scheduler job for execution. Specifically it initializes the
+ * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
+ * or other places that need to track the completion of this job.
+ *
+ * Refer to drm_sched_entity_push_job() documentation for locking
+ * considerations.
+ *
+ * This can only be called if drm_sched_job_init() succeeded.
+ */
+void drm_sched_job_arm(struct drm_sched_job *job)
+{
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_entity *entity = job->entity;
+
+ BUG_ON(!entity);
+
+ sched = entity->rq->sched;
+
+ job->sched = sched;
+ job->s_priority = entity->rq - sched->sched_rq;
+ job->id = atomic64_inc_return(&sched->job_id_count);
+
+ drm_sched_fence_init(job->s_fence, job->entity);
+}
+EXPORT_SYMBOL(drm_sched_job_arm);
+
+/**
+ * drm_sched_job_add_dependency - adds the fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Note that @fence is consumed in both the success and error cases.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(&job->dependencies, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(&job->dependencies, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_dependency);
+
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ * dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
*
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
+
+ return drm_sched_job_add_dependency(job, fence);
+ }
+
+ ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_sched_job_add_dependency(job, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+
+
+/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
+ *
+ * Cleans up the resources allocated with drm_sched_job_init().
+ *
+ * Drivers should call this from their error unwind code if @job is aborted
+ * before drm_sched_job_arm() is called.
+ *
+ * After that point of no return @job is committed to be executed by the
+ * scheduler, and this function should be called from the
+ * &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
- dma_fence_put(&job->s_fence->finished);
+ struct dma_fence *fence;
+ unsigned long index;
+
+ if (kref_read(&job->s_fence->finished.refcount)) {
+ /* drm_sched_job_arm() has been called */
+ dma_fence_put(&job->s_fence->finished);
+ } else {
+ /* aborted job before committing to run it */
+ drm_sched_fence_free(job->s_fence);
+ }
+
job->s_fence = NULL;
+
+ xa_for_each(&job->dependencies, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&job->dependencies);
+
}
EXPORT_SYMBOL(drm_sched_job_cleanup);
@@ -676,15 +827,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
- /*
- * Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch pending_list
- */
- if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr)) ||
- kthread_should_park())
- return NULL;
-
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
@@ -693,17 +835,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
+
+ /* cancel this job's TO timer */
+ cancel_delayed_work(&sched->work_tdr);
/* make the scheduled timestamp more accurate */
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
- if (next)
+
+ if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
-
+ /* start TO timer for next job */
+ drm_sched_start_timeout(sched);
+ }
} else {
job = NULL;
- /* queue timeout for next job */
- drm_sched_start_timeout(sched);
}
spin_unlock(&sched->job_list_lock);
@@ -791,11 +937,8 @@ static int drm_sched_main(void *param)
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
- if (cleanup_job) {
+ if (cleanup_job)
sched->ops->free_job(cleanup_job);
- /* queue timeout for next job */
- drm_sched_start_timeout(sched);
- }
if (!entity)
continue;
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
index 1c19a5d3eefb..8d8d8e214c28 100644
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state)
mock_device.driver = &mock_driver;
mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
mock_plane.dev = &mock_device;
+ mock_obj_props.count = 0;
mock_plane.base.properties = &mock_obj_props;
mock_prop.base.id = 1; /* 0 is an invalid id */
mock_prop.dev = &mock_device;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7db01904d18d..80078a9fd7f6 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -192,7 +192,6 @@ static int shmob_drm_probe(struct platform_device *pdev)
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
struct shmob_drm_device *sdev;
struct drm_device *ddev;
- struct resource *res;
unsigned int i;
int ret;
@@ -215,8 +214,7 @@ static int shmob_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ sdev->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mmio))
return PTR_ERR(sdev->mmio);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index d09b08995b12..3c61ba8b43e0 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -927,12 +927,12 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
header = (struct fw_header *)firmware->data;
if (firmware->size < sizeof(*header)) {
- DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
+ DRM_ERROR("Invalid firmware size (%zu)\n", firmware->size);
goto out;
}
if ((sizeof(*header) + header->rd_size + header->wr_size +
header->pmem_size + header->dmem_size) != firmware->size) {
- DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
+ DRM_ERROR("Invalid fmw structure (%zu+%d+%d+%d+%d != %zu)\n",
sizeof(*header), header->rd_size, header->wr_size,
header->pmem_size, header->dmem_size,
firmware->size);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 195de30eb90c..dbdee954692a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -845,7 +845,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
/* Specifies the constant alpha value */
- val = CONSTA_MAX;
+ val = newstate->alpha >> 8;
reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
@@ -997,6 +997,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_helper_add(plane, &ltdc_plane_helper_funcs);
+ drm_plane_create_alpha_property(plane);
+
DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
return plane;
@@ -1024,6 +1026,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
return -EINVAL;
}
+ drm_plane_create_zpos_immutable_property(primary, 0);
+
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
@@ -1046,6 +1050,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
+ drm_plane_create_zpos_immutable_property(overlay, i);
}
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf8cfefa0365..f52ff4e6c662 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -782,7 +782,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
const struct sun4i_backend_quirks *quirks;
- struct resource *res;
void __iomem *regs;
int i, ret;
@@ -815,8 +814,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (IS_ERR(backend->frontend))
dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index edb60ae0a9b7..56ae38389db0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -561,7 +561,6 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
struct sun4i_frontend *frontend;
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
- struct resource *res;
void __iomem *regs;
frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
@@ -576,8 +575,7 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
if (!frontend->data)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 2f2c9f0a1071..3799a745b7dd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -489,7 +489,6 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
- struct resource *res;
u32 reg;
int ret;
@@ -504,8 +503,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
if (!hdmi->variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->base = devm_ioremap_resource(dev, res);
+ hdmi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->base)) {
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
return PTR_ERR(hdmi->base);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 9f06dec0fc61..88db2d2a9336 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -841,11 +841,9 @@ static int sun4i_tcon_init_regmap(struct device *dev,
struct sun4i_tcon *tcon)
{
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
void __iomem *regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index cb91bc11a0c7..94883abe0dfd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -538,7 +538,6 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tv *tv;
- struct resource *res;
void __iomem *regs;
int ret;
@@ -548,8 +547,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
tv->drv = drv;
dev_set_drvdata(dev, tv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
dev_err(dev, "Couldn't map the TV encoder registers\n");
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 4f5efcace68e..527c7b2474da 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1104,7 +1104,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
- struct resource *res;
void __iomem *base;
int ret;
@@ -1120,18 +1119,16 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
"allwinner,sun6i-a31-mipi-dsi"))
bus_clk_name = "bus";
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Couldn't map the DSI encoder registers\n");
return PTR_ERR(base);
}
dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
- if (IS_ERR(dsi->regulator)) {
- dev_err(dev, "Couldn't get VCC-DSI supply\n");
- return PTR_ERR(dsi->regulator);
- }
+ if (IS_ERR(dsi->regulator))
+ return dev_err_probe(dev, PTR_ERR(dsi->regulator),
+ "Couldn't get VCC-DSI supply\n");
dsi->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(dsi->reset)) {
@@ -1146,10 +1143,9 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
}
dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
- if (IS_ERR(dsi->bus_clk)) {
- dev_err(dev, "Couldn't get the DSI bus clock\n");
- return PTR_ERR(dsi->bus_clk);
- }
+ if (IS_ERR(dsi->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(dsi->bus_clk),
+ "Couldn't get the DSI bus clock\n");
ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
if (ret)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index a55a38ad849c..022cafa6c06c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -16,8 +16,8 @@ struct sun8i_mixer;
#define CCSC10_OFFSET 0xA0000
#define CCSC11_OFFSET 0xF0000
-#define SUN8I_CSC_CTRL(base) (base + 0x0)
-#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i)
+#define SUN8I_CSC_CTRL(base) ((base) + 0x0)
+#define SUN8I_CSC_COEFF(base, i) ((base) + 0x10 + 4 * (i))
#define SUN8I_CSC_CTRL_EN BIT(0)
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 016b877051da..a8d75fd7e9f4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -153,22 +153,19 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return -EPROBE_DEFER;
hdmi->rst_ctrl = devm_reset_control_get(dev, "ctrl");
- if (IS_ERR(hdmi->rst_ctrl)) {
- dev_err(dev, "Could not get ctrl reset control\n");
- return PTR_ERR(hdmi->rst_ctrl);
- }
+ if (IS_ERR(hdmi->rst_ctrl))
+ return dev_err_probe(dev, PTR_ERR(hdmi->rst_ctrl),
+ "Could not get ctrl reset control\n");
hdmi->clk_tmds = devm_clk_get(dev, "tmds");
- if (IS_ERR(hdmi->clk_tmds)) {
- dev_err(dev, "Couldn't get the tmds clock\n");
- return PTR_ERR(hdmi->clk_tmds);
- }
+ if (IS_ERR(hdmi->clk_tmds))
+ return dev_err_probe(dev, PTR_ERR(hdmi->clk_tmds),
+ "Couldn't get the tmds clock\n");
hdmi->regulator = devm_regulator_get(dev, "hvcc");
- if (IS_ERR(hdmi->regulator)) {
- dev_err(dev, "Couldn't get regulator\n");
- return PTR_ERR(hdmi->regulator);
- }
+ if (IS_ERR(hdmi->regulator))
+ return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
+ "Couldn't get regulator\n");
ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
if (!ret) {
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 5b42cf25cc86..f5e8aeaa3cdf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -337,7 +337,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
- struct resource *res;
void __iomem *regs;
unsigned int base;
int plane_cnt;
@@ -390,8 +389,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
if (!mixer->cfg)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 75d8e60c149d..1b9b8b48f4a7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -128,7 +128,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
const struct sun8i_tcon_top_quirks *quirks;
- struct resource *res;
void __iomem *regs;
int ret, i;
@@ -158,8 +157,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
return PTR_ERR(tcon_top->bus);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
tcon_top->regs = regs;
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index cae8b8cbe9dd..c04dda8353fd 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
- if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
else
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 6ec598f5d5b3..d38fd7e12b57 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -12,6 +12,7 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+#include <linux/module.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
@@ -20,6 +21,8 @@
#include "drm.h"
#include "gem.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index e00ec3f40ec8..16a1cdc28657 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
return true;
/* check for the sector layout bit */
- if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
if (!tegra_plane_supports_sector_layout(plane))
return false;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 6b03f89a98d4..3ddb7c710a3d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -186,10 +186,8 @@ static void tilcdc_fini(struct drm_device *dev)
if (priv->mmio)
iounmap(priv->mmio);
- if (priv->wq) {
- flush_workqueue(priv->wq);
+ if (priv->wq)
destroy_workqueue(priv->wq);
- }
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index d31be274a2bd..1ceb93fbdc50 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
- depends on DRM && USB
+ depends on DRM && USB && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -53,7 +53,7 @@ config DRM_GM12U320
config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
- depends on DRM
+ depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 73415fa9ae0f..2ce3bd903b70 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -63,6 +63,7 @@ MODULE_PARM_DESC(defy, "default y resolution");
enum bochs_types {
BOCHS_QEMU_STDVGA,
+ BOCHS_SIMICS,
BOCHS_UNKNOWN,
};
@@ -695,6 +696,13 @@ static const struct pci_device_id bochs_pci_tbl[] = {
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_UNKNOWN,
},
+ {
+ .vendor = 0x4321,
+ .device = 0x1111,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = BOCHS_SIMICS,
+ },
{ /* end of list */ }
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bb9e02c31946..d62b2013c367 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
+{
+ struct ttm_device *bdev = bo->bdev;
+
+ list_move_tail(&bo->lru, &bdev->pinned);
+
+ if (bdev->funcs->del_from_lru_notify)
+ bdev->funcs->del_from_lru_notify(bo);
+}
+
+static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
if (bo->pin_count) {
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
return;
}
@@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -914,57 +924,11 @@ out:
return ret;
}
-static bool ttm_bo_places_compat(const struct ttm_place *places,
- unsigned num_placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- unsigned i;
-
- if (mem->placement & TTM_PL_FLAG_TEMPORARY)
- return false;
-
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
-
- if ((mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
-
- *new_flags = heap->flags;
- if ((mem->mem_type == heap->mem_type) &&
- (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
- }
- return false;
-}
-
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- if (ttm_bo_places_compat(placement->placement, placement->num_placement,
- mem, new_flags))
- return true;
-
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_bo_places_compat(placement->busy_placement,
- placement->num_busy_placement,
- mem, new_flags))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ttm_bo_mem_compat);
-
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
int ret;
- uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
@@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
+ if (!ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -1151,8 +1115,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return -EBUSY;
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
@@ -1165,7 +1129,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return ret == -EBUSY ? -ENOSPC : ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
@@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
if (bo->ttm == NULL)
return;
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1c5ffe2935af..72a94301bc95 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -78,22 +78,21 @@ void ttm_mem_io_free(struct ttm_device *bdev,
/**
* ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
- * @bo: The struct ttm_buffer_object.
- * @new_mem: The struct ttm_resource we're moving to (copy destination).
- * @new_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @clear: Whether to clear rather than copy.
+ * @num_pages: Number of pages of the operation.
+ * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
* @src_iter: A struct ttm_kmap_iter representing the source resource.
*
* This function is intended to be able to move out async under a
* dma-fence if desired.
*/
-void ttm_move_memcpy(struct ttm_buffer_object *bo,
+void ttm_move_memcpy(bool clear,
u32 num_pages,
struct ttm_kmap_iter *dst_iter,
struct ttm_kmap_iter *src_iter)
{
const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
- struct ttm_tt *ttm = bo->ttm;
struct dma_buf_map src_map, dst_map;
pgoff_t i;
@@ -102,10 +101,7 @@ void ttm_move_memcpy(struct ttm_buffer_object *bo,
return;
/* Don't move nonexistent data. Clear destination instead. */
- if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
- if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
- return;
-
+ if (clear) {
for (i = 0; i < num_pages; ++i) {
dst_ops->map_local(dst_iter, &dst_map, i);
if (dst_map.is_iomem)
@@ -148,9 +144,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_kmap_iter_linear_io io;
} _dst_iter, _src_iter;
struct ttm_kmap_iter *dst_iter, *src_iter;
+ bool clear;
int ret = 0;
- if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+ if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
@@ -171,7 +168,9 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out_src_iter;
}
- ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+ clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
+ ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
@@ -190,6 +189,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
+ dma_resv_fini(&fbo->base.base._resv);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index f56be5bc0861..33680c94127c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -162,9 +162,11 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
}
return 0;
@@ -346,8 +348,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->base.vma_node) +
- page_offset;
pfn = page_to_pfn(page);
}
@@ -519,11 +519,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
- if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(bo->ttm);
- if (unlikely(ret != 0))
- return ret;
- }
fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 2df59b3c2ea1..be24bb6cefd0 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -220,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
+ INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -257,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev)
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ unsigned int i, j;
+
+ spin_lock(&bdev->lru_lock);
+ while (!list_empty(&bdev->pinned)) {
+ bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+ /* Take ref against racing releases once lru_lock is unlocked */
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+
+ for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ man = ttm_manager_type(bdev, i);
+ if (!man || !man->use_tt)
+ continue;
+
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ while (!list_empty(&man->lru[j])) {
+ bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+ }
+ }
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 7fcdef278c74..0037eefe3239 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -40,6 +40,18 @@
#include "ttm_module.h"
/**
+ * DOC: TTM
+ *
+ * TTM is a memory manager for accelerator devices with dedicated memory.
+ *
+ * The basic idea is that resources are grouped together in buffer objects of
+ * certain size and TTM handles lifetime, movement and CPU mappings of those
+ * objects.
+ *
+ * TODO: Add more design background and information here.
+ */
+
+/**
* ttm_prot_from_caching - Modify the page protection according to the
* ttm cacing mode
* @caching: The ttm caching mode
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 82cbb29a05aa..1bba0a0ed3f9 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static struct mutex shrinker_lock;
+static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
- unsigned int num_freed;
+ unsigned int num_pages;
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
- num_freed = 1 << pt->order;
+ num_pages = 1 << pt->order;
} else {
- num_freed = 0;
+ num_pages = 0;
}
- list_move_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
-
- return num_freed;
+ return num_pages;
}
/* Return the allocation order based for a page */
@@ -372,7 +371,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
WARN_ON(!num_pages || ttm_tt_is_populated(tt));
WARN_ON(dma_addr && !pool->dev);
- if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail)
@@ -531,6 +530,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+ * that no shrinker is concurrently freeing pages from the pool.
+ */
+ synchronize_shrinkers();
}
/* As long as pages are available make sure to release at least one */
@@ -605,7 +609,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
@@ -614,7 +618,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
@@ -641,7 +645,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -657,7 +661,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
@@ -694,7 +698,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- mutex_init(&shrinker_lock);
+ spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index f4b08a8705b3..67d68a4a8640 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_device *bdev,
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
@@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev,
ttm_resource_manager_set_used(man, true);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_init);
+EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
* ttm_range_man_fini
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init);
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_device *bdev,
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
@@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev,
kfree(rman);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_fini);
+EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 2431717376e7..035d71332d18 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
+static bool ttm_resource_places_compat(struct ttm_resource *res,
+ const struct ttm_place *places,
+ unsigned num_placement)
+{
+ unsigned i;
+
+ if (res->placement & TTM_PL_FLAG_TEMPORARY)
+ return false;
+
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
+
+ if (res->start < heap->fpfn || (heap->lpfn &&
+ (res->start + res->num_pages) > heap->lpfn))
+ continue;
+
+ if ((res->mem_type == heap->mem_type) &&
+ (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ttm_resource_compat - check if resource is compatible with placement
+ *
+ * @res: the resource to check
+ * @placement: the placement to check against
+ *
+ * Returns true if the placement is compatible.
+ */
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement)
+{
+ if (ttm_resource_places_compat(res, placement->placement,
+ placement->num_placement))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_resource_places_compat(res, placement->busy_placement,
+ placement->num_busy_placement))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ttm_resource_compat);
+
/**
* ttm_resource_manager_init
*
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5cd8b5dc0bf..7e83c00a3f48 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -68,12 +68,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
- page_flags |= TTM_PAGE_FLAG_SG;
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
pr_err("Illegal buffer object type\n");
@@ -84,6 +84,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
+
return 0;
}
@@ -122,17 +125,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- ttm_tt_unpopulate(bdev, ttm);
-
- if (ttm->swap_storage)
- fput(ttm->swap_storage);
-
- ttm->swap_storage = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_destroy_common);
-
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
@@ -167,6 +159,12 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
+ ttm->swap_storage = NULL;
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -183,7 +181,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags, caching);
- if (page_flags & TTM_PAGE_FLAG_SG)
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);
else
ret = ttm_dma_tt_alloc_page_directory(ttm);
@@ -229,7 +227,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
@@ -284,7 +282,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages;
@@ -294,17 +292,6 @@ out_err:
return ret;
}
-static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- pgoff_t i;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = bdev->dev_mapping;
-}
-
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
@@ -316,7 +303,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages,
@@ -341,9 +328,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ret)
goto error;
- ttm_tt_add_mapping(bdev, ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm);
@@ -354,7 +340,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0;
error:
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
@@ -364,39 +350,24 @@ error:
}
EXPORT_SYMBOL(ttm_tt_populate);
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- pgoff_t i;
- struct page **page = ttm->pages;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- (*page)->mapping = NULL;
- (*page++)->index = 0;
- }
-}
-
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
- ttm_tt_clear_mapping(ttm);
if (bdev->funcs->ttm_tt_unpopulate)
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
- ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 1f497d8f1ae5..c744175c6992 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,6 +4,7 @@ config DRM_UDL
depends on DRM
depends on USB
depends on USB_ARCH_HAS_HCD
+ depends on MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 9a5c44606337..e973ec487484 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_V3D
tristate "Broadcom V3D 3.x and newer"
- depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
+ depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST
depends on DRM
depends on COMMON_CLK
depends on MMU
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 9403c3b36aca..bd46396a1ae0 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -83,7 +83,6 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
switch (args->param) {
case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
@@ -97,6 +96,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
args->value = (v3d->ver >= 40);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -136,9 +138,8 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
- }
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
@@ -147,7 +148,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
- * protection between clients. Note that render nodes would be be
+ * protection between clients. Note that render nodes would be
* able to submit CLs that could access BOs from clients authenticated
* with the master node. The TFU doesn't use the GMP, so it would
* need to stay DRM_AUTH until we do buffer size/offset validation.
@@ -206,10 +207,7 @@ MODULE_DEVICE_TABLE(of, v3d_of_match);
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
- struct resource *res =
- platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
-
- *regs = devm_ioremap_resource(v3d->drm.dev, res);
+ *regs = devm_platform_ioremap_resource_byname(v3d_to_pdev(v3d), name);
return PTR_ERR_OR_ZERO(*regs);
}
@@ -222,7 +220,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
u32 mmu_debug;
u32 ident1;
-
v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
if (IS_ERR(v3d))
return PTR_ERR(v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 270134779073..b74b1351bfc8 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,15 +19,6 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
-/* Enum for each of the V3D queues. */
-enum v3d_queue {
- V3D_BIN,
- V3D_RENDER,
- V3D_TFU,
- V3D_CSD,
- V3D_CACHE_CLEAN,
-};
-
#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
struct v3d_queue_state {
@@ -234,11 +225,6 @@ struct v3d_job {
struct drm_gem_object **bo;
u32 bo_count;
- /* Array of struct dma_fence * to block on before submitting this job.
- */
- struct xarray deps;
- unsigned long last_dep;
-
/* v3d fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *irq_fence;
@@ -299,6 +285,21 @@ struct v3d_csd_job {
struct drm_v3d_submit_csd args;
};
+struct v3d_submit_outsync {
+ struct drm_syncobj *syncobj;
+};
+
+struct v3d_submit_ext {
+ u32 flags;
+ u32 wait_stage;
+
+ u32 in_sync_count;
+ u64 in_syncs;
+
+ u32 out_sync_count;
+ struct v3d_submit_outsync *out_syncs;
+};
+
/**
* __wait_for - magic wait macro
*
@@ -379,6 +380,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void v3d_job_cleanup(struct v3d_job *job);
void v3d_job_put(struct v3d_job *job);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5689da118197..6a000d77c568 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -197,8 +197,8 @@ v3d_clean_caches(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
- V3D_L2TCACTL_L2TFLS), 100)) {
- DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
+ V3D_L2TCACTL_TMUWCF), 100)) {
+ DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
}
mutex_lock(&v3d->cache_clean_lock);
@@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
return ret;
for (i = 0; i < job->bo_count; i++) {
- ret = drm_gem_fence_array_add_implicit(&job->deps,
- job->bo[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
if (ret) {
drm_gem_unlock_reservations(job->bo, job->bo_count,
acquire_ctx);
@@ -356,8 +356,6 @@ static void
v3d_job_free(struct kref *ref)
{
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- unsigned long index;
- struct dma_fence *fence;
int i;
for (i = 0; i < job->bo_count; i++) {
@@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref)
}
kvfree(job->bo);
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@@ -397,6 +390,15 @@ v3d_render_job_free(struct kref *ref)
v3d_job_free(ref);
}
+void v3d_job_cleanup(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ drm_sched_job_cleanup(&job->base);
+ v3d_job_put(job);
+}
+
void v3d_job_put(struct v3d_job *job)
{
kref_put(&job->refcount, job->free);
@@ -417,7 +419,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
ret = drm_gem_dma_resv_wait(file_priv, args->handle,
- true, timeout_jiffies);
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -436,58 +438,97 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
}
static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- struct v3d_job *job, void (*free)(struct kref *ref),
- u32 in_sync)
+v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
+ u32 in_sync, u32 point)
{
struct dma_fence *in_fence = NULL;
int ret;
+ ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
+ if (ret == -EINVAL)
+ return ret;
+
+ return drm_sched_job_add_dependency(&job->base, in_fence);
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ void **container, size_t size, void (*free)(struct kref *ref),
+ u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct v3d_job *job;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int ret, i;
+
+ *container = kcalloc(1, size, GFP_KERNEL);
+ if (!*container) {
+ DRM_ERROR("Cannot allocate memory for v3d job.");
+ return -ENOMEM;
+ }
+
+ job = *container;
job->v3d = v3d;
job->free = free;
ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
- return ret;
-
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
- ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
- if (ret == -EINVAL)
goto fail;
- ret = drm_gem_fence_array_add(&job->deps, in_fence);
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ v3d_priv);
if (ret)
- goto fail;
+ goto fail_job;
+
+ if (has_multisync) {
+ if (se->in_sync_count && se->wait_stage == queue) {
+ struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+ for (i = 0; i < se->in_sync_count; i++) {
+ struct drm_v3d_sem in;
+
+ ret = copy_from_user(&in, handle++, sizeof(in));
+ if (ret) {
+ DRM_DEBUG("Failed to copy wait dep handle.\n");
+ goto fail_deps;
+ }
+ ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
+ if (ret)
+ goto fail_deps;
+ }
+ }
+ } else {
+ ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
+ if (ret)
+ goto fail_deps;
+ }
kref_init(&job->refcount);
return 0;
-fail:
- xa_destroy(&job->deps);
+
+fail_deps:
+ drm_sched_job_cleanup(&job->base);
+fail_job:
pm_runtime_put_autosuspend(v3d->drm.dev);
+fail:
+ kfree(*container);
+ *container = NULL;
+
return ret;
}
-static int
-v3d_push_job(struct v3d_file_priv *v3d_priv,
- struct v3d_job *job, enum v3d_queue queue)
+static void
+v3d_push_job(struct v3d_job *job)
{
- int ret;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- v3d_priv);
- if (ret)
- return ret;
+ drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
/* put by scheduler job completion */
kref_get(&job->refcount);
- drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
-
- return 0;
+ drm_sched_entity_push_job(&job->base);
}
static void
@@ -495,27 +536,173 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
struct v3d_job *job,
struct ww_acquire_ctx *acquire_ctx,
u32 out_sync,
+ struct v3d_submit_ext *se,
struct dma_fence *done_fence)
{
struct drm_syncobj *sync_out;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
int i;
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
dma_resv_add_excl_fence(job->bo[i]->resv,
- job->done_fence);
+ job->done_fence);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
/* Update the return sync object for the job */
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
+ /* If it only supports a single signal semaphore*/
+ if (!has_multisync) {
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ return;
+ }
+
+ /* If multiple semaphores extension is supported */
+ if (se->out_sync_count) {
+ for (i = 0; i < se->out_sync_count; i++) {
+ drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+ done_fence);
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ }
+ kvfree(se->out_syncs);
}
}
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+ unsigned int i;
+
+ if (!(se && se->out_sync_count))
+ return;
+
+ for (i = 0; i < se->out_sync_count; i++)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+ struct v3d_submit_ext *se,
+ u32 count, u64 handles)
+{
+ struct drm_v3d_sem __user *post_deps;
+ int i, ret;
+
+ if (!count)
+ return 0;
+
+ se->out_syncs = (struct v3d_submit_outsync *)
+ kvmalloc_array(count,
+ sizeof(struct v3d_submit_outsync),
+ GFP_KERNEL);
+ if (!se->out_syncs)
+ return -ENOMEM;
+
+ post_deps = u64_to_user_ptr(handles);
+
+ for (i = 0; i < count; i++) {
+ struct drm_v3d_sem out;
+
+ ret = copy_from_user(&out, post_deps++, sizeof(out));
+ if (ret) {
+ DRM_DEBUG("Failed to copy post dep handles\n");
+ goto fail;
+ }
+
+ se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+ out.handle);
+ if (!se->out_syncs[i].syncobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+ se->out_sync_count = count;
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+
+ return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ void *data)
+{
+ struct drm_v3d_multi_sync multisync;
+ struct v3d_submit_ext *se = data;
+ int ret;
+
+ ret = copy_from_user(&multisync, ext, sizeof(multisync));
+ if (ret)
+ return ret;
+
+ if (multisync.pad)
+ return -EINVAL;
+
+ ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
+ multisync.out_syncs);
+ if (ret)
+ return ret;
+
+ se->in_sync_count = multisync.in_sync_count;
+ se->in_syncs = multisync.in_syncs;
+ se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+ se->wait_stage = multisync.wait_stage;
+
+ return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+ u64 ext_handles,
+ void *data)
+{
+ struct drm_v3d_extension __user *user_ext;
+ int ret;
+
+ user_ext = u64_to_user_ptr(ext_handles);
+ while (user_ext) {
+ struct drm_v3d_extension ext;
+
+ if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+ DRM_DEBUG("Failed to copy submit extension\n");
+ return -EFAULT;
+ }
+
+ switch (ext.id) {
+ case DRM_V3D_EXT_ID_MULTI_SYNC:
+ ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+ return -EINVAL;
+ }
+
+ user_ext = u64_to_user_ptr(ext.next);
+ }
+
+ return 0;
+}
+
/**
* v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
* @dev: DRM device
@@ -535,8 +722,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_cl *args = data;
+ struct v3d_submit_ext se = {0};
struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render;
+ struct v3d_render_job *render = NULL;
struct v3d_job *clean_job = NULL;
struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
@@ -544,44 +732,38 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0)
+ if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ if (args->flags &&
+ args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+ DRM_V3D_SUBMIT_EXTENSION)) {
DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
- render = kcalloc(1, sizeof(*render), GFP_KERNEL);
- if (!render)
- return -ENOMEM;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
+ v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+ if (ret)
+ goto fail;
render->start = args->rcl_start;
render->end = args->rcl_end;
INIT_LIST_HEAD(&render->unref_list);
- ret = v3d_job_init(v3d, file_priv, &render->base,
- v3d_render_job_free, args->in_sync_rcl);
- if (ret) {
- kfree(render);
- return ret;
- }
-
if (args->bcl_start != args->bcl_end) {
- bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
- if (!bin) {
- v3d_job_put(&render->base);
- return -ENOMEM;
- }
-
- ret = v3d_job_init(v3d, file_priv, &bin->base,
- v3d_job_free, args->in_sync_bcl);
- if (ret) {
- v3d_job_put(&render->base);
- kfree(bin);
- return ret;
- }
+ ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
+ v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+ if (ret)
+ goto fail;
bin->start = args->bcl_start;
bin->end = args->bcl_end;
@@ -592,18 +774,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
- if (!clean_job) {
- ret = -ENOMEM;
- goto fail;
- }
-
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
- if (ret) {
- kfree(clean_job);
- clean_job = NULL;
+ ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
+ v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ if (ret)
goto fail;
- }
last_job = clean_job;
} else {
@@ -633,31 +807,26 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (bin) {
bin->base.perfmon = render->base.perfmon;
v3d_perfmon_get(bin->base.perfmon);
- ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&bin->base);
- ret = drm_gem_fence_array_add(&render->base.deps,
- dma_fence_get(bin->base.done_fence));
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
if (ret)
goto fail_unreserve;
}
- ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&render->base);
if (clean_job) {
struct dma_fence *render_fence =
dma_fence_get(render->base.done_fence);
- ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
if (ret)
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
v3d_perfmon_get(clean_job->perfmon);
- ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(clean_job);
}
mutex_unlock(&v3d->sched_lock);
@@ -666,6 +835,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
last_job,
&acquire_ctx,
args->out_sync,
+ &se,
last_job->done_fence);
if (bin)
@@ -681,11 +851,10 @@ fail_unreserve:
drm_gem_unlock_reservations(last_job->bo,
last_job->bo_count, &acquire_ctx);
fail:
- if (bin)
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- if (clean_job)
- v3d_job_put(clean_job);
+ v3d_job_cleanup((void *)bin);
+ v3d_job_cleanup((void *)render);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -704,30 +873,37 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
- struct v3d_tfu_job *job;
+ struct v3d_submit_ext se = {0};
+ struct v3d_tfu_job *job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
- job = kcalloc(1, sizeof(*job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_DEBUG("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync);
- if (ret) {
- kfree(job);
- return ret;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
}
+ ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
+ v3d_job_free, args->in_sync, &se, V3D_TFU);
+ if (ret)
+ goto fail;
+
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
if (!job->base.bo) {
- v3d_job_put(&job->base);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
job->args = *args;
@@ -761,26 +937,22 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
goto fail;
mutex_lock(&v3d->sched_lock);
- ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&job->base);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
&job->base, &acquire_ctx,
args->out_sync,
+ &se,
job->base.done_fence);
v3d_job_put(&job->base);
return 0;
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
- &acquire_ctx);
fail:
- v3d_job_put(&job->base);
+ v3d_job_cleanup((void *)job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -801,42 +973,44 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_csd *args = data;
- struct v3d_csd_job *job;
- struct v3d_job *clean_job;
+ struct v3d_submit_ext se = {0};
+ struct v3d_csd_job *job = NULL;
+ struct v3d_job *clean_job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret;
trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+ if (args->pad)
+ return -EINVAL;
+
if (!v3d_has_csd(v3d)) {
DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
return -EINVAL;
}
- job = kcalloc(1, sizeof(*job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync);
- if (ret) {
- kfree(job);
- return ret;
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
}
- clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
- if (!clean_job) {
- v3d_job_put(&job->base);
- kfree(job);
- return -ENOMEM;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
}
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
- if (ret) {
- v3d_job_put(&job->base);
- kfree(clean_job);
- return ret;
- }
+ ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
+ v3d_job_free, args->in_sync, &se, V3D_CSD);
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
+ v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail;
job->args = *args;
@@ -859,24 +1033,21 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&v3d->sched_lock);
- ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&job->base);
- ret = drm_gem_fence_array_add(&clean_job->deps,
- dma_fence_get(job->base.done_fence));
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
- ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(clean_job);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
clean_job,
&acquire_ctx,
args->out_sync,
+ &se,
clean_job->done_fence);
v3d_job_put(&job->base);
@@ -889,8 +1060,9 @@ fail_unreserve:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
+ v3d_job_cleanup((void *)job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -924,8 +1096,7 @@ v3d_gem_init(struct drm_device *dev)
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
- "Failed to allocate page tables. "
- "Please ensure you have CMA enabled.\n");
+ "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index dd7fcc36d726..e0cb7d0697a7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -13,7 +13,7 @@
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
- * v3d_job_dependency() to manage the dependency between bin and
+ * drm_sched_job_add_dependency() to manage the dependency between bin and
* render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job)
}
static void
-v3d_job_free(struct drm_sched_job *sched_job)
+v3d_sched_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- drm_sched_job_cleanup(sched_job);
- v3d_job_put(job);
+ v3d_job_cleanup(job);
}
static void
@@ -73,28 +72,6 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
v3d_perfmon_start(v3d, job->perfmon);
}
-/*
- * Returns the fences that the job depends on, one by one.
- *
- * If placed in the scheduler's .dependency method, the corresponding
- * .run_job won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct v3d_job *job = to_v3d_job(sched_job);
-
- /* XXX: Wait on a fence for switching the GMP if necessary,
- * and then do so.
- */
-
- if (!xa_empty(&job->deps))
- return xa_erase(&job->deps, job->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
@@ -373,38 +350,33 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_bin_job_run,
.timedout_job = v3d_bin_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_render_job_run,
.timedout_job = v3d_render_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_generic_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_csd_job_run,
.timedout_job = v3d_csd_job_timedout,
- .free_job = v3d_job_free
+ .free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_cache_clean_job_run,
.timedout_job = v3d_generic_job_timedout,
- .free_job = v3d_job_free
+ .free_job = v3d_sched_job_free
};
int
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 2b81cb259d23..a6c81af37345 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -69,7 +69,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = vbox_mode_init(vbox);
if (ret)
- goto err_mm_fini;
+ goto err_hw_fini;
ret = vbox_irq_init(vbox);
if (ret)
@@ -87,8 +87,6 @@ err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
vbox_mode_fini(vbox);
-err_mm_fini:
- vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
return ret;
@@ -101,7 +99,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(&vbox->ddev);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
- vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 4903b91d7fe4..e77bd6512eb1 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -139,7 +139,6 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
int vbox_mm_init(struct vbox_private *vbox);
-void vbox_mm_fini(struct vbox_private *vbox);
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index fd8a53a4d8d6..dc24c2172fd4 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -13,22 +13,21 @@
int vbox_mm_init(struct vbox_private *vbox)
{
int ret;
+ resource_size_t base, size;
struct drm_device *dev = &vbox->ddev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
- vbox->available_vram_size);
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_phys_wc_add(&pdev->dev, base, size);
+
+ ret = drmm_vram_helper_init(dev, base, vbox->available_vram_size);
if (ret) {
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
- vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
return 0;
}
-
-void vbox_mm_fini(struct vbox_private *vbox)
-{
- arch_phys_wc_del(vbox->fb_mtrr);
-}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index a90f2545baee..c180eb60bee8 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -229,26 +229,19 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct device *dev = &dpi->pdev->dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
- int ret;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &bridge);
- if (ret) {
+ bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
*/
- if (ret == -ENODEV)
+ if (PTR_ERR(bridge) == -ENODEV)
return 0;
else
- return ret;
+ return PTR_ERR(bridge);
}
- if (panel)
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DPI);
-
return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index f6c16c5aee68..16abc3a3d601 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -50,13 +50,11 @@
#define DRIVER_PATCHLEVEL 0
/* Helper function for mapping the regs on a platform device. */
-void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
+void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
{
- struct resource *res;
void __iomem *map;
- res = platform_get_resource(dev, IORESOURCE_MEM, index);
- map = devm_ioremap_resource(&dev->dev, res);
+ map = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(map))
return map;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index a185027911ce..a229da58962a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1497,7 +1497,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
- struct drm_panel *panel;
const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
@@ -1609,27 +1608,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->bridge);
- if (ret) {
- /* If the bridge or panel pointed by dev->of_node is not
- * enabled, just return 0 here so that we don't prevent the DRM
- * dev from being registered. Of course that means the DSI
- * encoder won't be exposed, but that's not a problem since
- * nothing is connected to it.
- */
- if (ret == -ENODEV)
- return 0;
-
- return ret;
- }
-
- if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
- if (IS_ERR(dsi->bridge))
- return PTR_ERR(dsi->bridge);
- }
+ dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->bridge))
+ return PTR_ERR(dsi->bridge);
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
@@ -1667,8 +1648,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- if (dsi->bridge)
- pm_runtime_disable(dev);
+ pm_runtime_disable(dev);
/*
* Restore the bridge_chain so the bridge detach procedure can happen
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ed8a4b7f8b6e..b284623e2863 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1556,10 +1556,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
- struct drm_device *dev = vc4_hdmi->connector.dev;
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_device *dev = connector->dev;
if (dev && dev->registered)
- drm_kms_helper_hotplug_event(dev);
+ drm_connector_helper_hpd_irq_event(connector);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index bf38a7e319d1..a87eafa89e9f 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
@@ -50,87 +51,11 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static const struct drm_gem_object_funcs vgem_gem_object_funcs;
-
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
} *vgem_device;
-static void vgem_gem_free_object(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
-
- kvfree(vgem_obj->pages);
- mutex_destroy(&vgem_obj->pages_lock);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, vgem_obj->table);
-
- drm_gem_object_release(obj);
- kfree(vgem_obj);
-}
-
-static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_vgem_gem_object *obj = vma->vm_private_data;
- /* We don't use vmf->pgoff since that has the fake offset */
- unsigned long vaddr = vmf->address;
- vm_fault_t ret = VM_FAULT_SIGBUS;
- loff_t num_pages;
- pgoff_t page_offset;
- page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-
- num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
- if (page_offset >= num_pages)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&obj->pages_lock);
- if (obj->pages) {
- get_page(obj->pages[page_offset]);
- vmf->page = obj->pages[page_offset];
- ret = 0;
- }
- mutex_unlock(&obj->pages_lock);
- if (ret) {
- struct page *page;
-
- page = shmem_read_mapping_page(
- file_inode(obj->base.filp)->i_mapping,
- page_offset);
- if (!IS_ERR(page)) {
- vmf->page = page;
- ret = 0;
- } else switch (PTR_ERR(page)) {
- case -ENOSPC:
- case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
- case -EBUSY:
- ret = VM_FAULT_RETRY;
- break;
- case -EFAULT:
- case -EINVAL:
- ret = VM_FAULT_SIGBUS;
- break;
- default:
- WARN_ON(PTR_ERR(page));
- ret = VM_FAULT_SIGBUS;
- break;
- }
-
- }
- return ret;
-}
-
-static const struct vm_operations_struct vgem_gem_vm_ops = {
- .fault = vgem_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
@@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
kfree(vfile);
}
-static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
- unsigned long size)
-{
- struct drm_vgem_gem_object *obj;
- int ret;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- obj->base.funcs = &vgem_gem_object_funcs;
-
- ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
- if (ret) {
- kfree(obj);
- return ERR_PTR(ret);
- }
-
- mutex_init(&obj->pages_lock);
-
- return obj;
-}
-
-static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
-{
- drm_gem_object_release(&obj->base);
- kfree(obj);
-}
-
-static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
- struct drm_file *file,
- unsigned int *handle,
- unsigned long size)
-{
- struct drm_vgem_gem_object *obj;
- int ret;
-
- obj = __vgem_gem_create(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, handle);
- if (ret) {
- drm_gem_object_put(&obj->base);
- return ERR_PTR(ret);
- }
-
- return &obj->base;
-}
-
-static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_object *gem_object;
- u64 pitch, size;
-
- pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- size = args->height * pitch;
- if (size == 0)
- return -EINVAL;
-
- gem_object = vgem_gem_create(dev, file, &args->handle, size);
- if (IS_ERR(gem_object))
- return PTR_ERR(gem_object);
-
- args->size = gem_object->size;
- args->pitch = pitch;
-
- drm_gem_object_put(gem_object);
-
- DRM_DEBUG("Created object of size %llu\n", args->size);
-
- return 0;
-}
-
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
-static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long flags = vma->vm_flags;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- /* Keep the WC mmaping set by drm_gem_mmap() but our pages
- * are ordinary and not special.
- */
- vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
- return 0;
-}
+DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
-static const struct file_operations vgem_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = vgem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
-};
-
-static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
-{
- mutex_lock(&bo->pages_lock);
- if (bo->pages_pin_count++ == 0) {
- struct page **pages;
-
- pages = drm_gem_get_pages(&bo->base);
- if (IS_ERR(pages)) {
- bo->pages_pin_count--;
- mutex_unlock(&bo->pages_lock);
- return pages;
- }
-
- bo->pages = pages;
- }
- mutex_unlock(&bo->pages_lock);
-
- return bo->pages;
-}
-
-static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
-{
- mutex_lock(&bo->pages_lock);
- if (--bo->pages_pin_count == 0) {
- drm_gem_put_pages(&bo->base, bo->pages, true, true);
- bo->pages = NULL;
- }
- mutex_unlock(&bo->pages_lock);
-}
-
-static int vgem_prime_pin(struct drm_gem_object *obj)
+static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages;
+ struct drm_gem_shmem_object *obj;
- pages = vgem_pin_pages(bo);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
+ /*
+ * vgem doesn't have any begin/end cpu access ioctls, therefore must use
+ * coherent memory or dma-buf sharing just wont work.
*/
- drm_clflush_pages(pages, n_pages);
-
- return 0;
-}
-
-static void vgem_prime_unpin(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- vgem_unpin_pages(bo);
-}
-
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
-}
-
-static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
-
- return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
-}
-
-static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach, struct sg_table *sg)
-{
- struct drm_vgem_gem_object *obj;
- int npages;
-
- obj = __vgem_gem_create(dev, attach->dmabuf->size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
+ obj->map_wc = true;
- npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
-
- obj->table = sg;
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!obj->pages) {
- __vgem_gem_destroy(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- obj->pages_pin_count++; /* perma-pinned */
- drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
-static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages;
- void *vaddr;
-
- pages = vgem_pin_pages(bo);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
- if (!vaddr)
- return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
-
- return 0;
-}
-
-static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- vunmap(map->vaddr);
- vgem_unpin_pages(bo);
-}
-
-static int vgem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- if (obj->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!obj->filp)
- return -ENODEV;
-
- ret = call_mmap(obj->filp, vma);
- if (ret)
- return ret;
-
- vma_set_file(vma, obj->filp);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- return 0;
-}
-
-static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
- .free = vgem_gem_free_object,
- .pin = vgem_prime_pin,
- .unpin = vgem_prime_unpin,
- .get_sg_table = vgem_prime_get_sg_table,
- .vmap = vgem_prime_vmap,
- .vunmap = vgem_prime_vunmap,
- .vm_ops = &vgem_gem_vm_ops,
-};
-
static const struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
@@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = {
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
- .dumb_create = vgem_gem_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = vgem_prime_import,
- .gem_prime_import_sg_table = vgem_prime_import_sg_table,
- .gem_prime_mmap = vgem_prime_mmap,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_create_object = vgem_gem_create_object,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index c2b20e0ee030..b6954e2f75e6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -52,6 +52,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
vgdev->has_resource_assign_uuid);
virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ed85a7863256..749db18dcfa2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -29,6 +29,8 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
#include <drm/drm.h>
#include <drm/drm_aperture.h>
@@ -155,6 +157,35 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
+static __poll_t virtio_gpu_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct drm_file *drm_file = filp->private_data;
+ struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct drm_device *dev = drm_file->minor->dev;
+ struct drm_pending_event *e = NULL;
+ __poll_t mask = 0;
+
+ if (!vfpriv->ring_idx_mask)
+ return drm_poll(filp, wait);
+
+ poll_wait(filp, &drm_file->event_wait, wait);
+
+ if (!list_empty(&drm_file->event_list)) {
+ spin_lock_irq(&dev->event_lock);
+ e = list_first_entry(&drm_file->event_list,
+ struct drm_pending_event, link);
+ drm_file->event_space += e->event->length;
+ list_del(&e->link);
+ spin_unlock_irq(&dev->event_lock);
+
+ kfree(e);
+ mask |= EPOLLIN | EPOLLRDNORM;
+ }
+
+ return mask;
+}
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -172,6 +203,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
VIRTIO_GPU_F_RESOURCE_BLOB,
+ VIRTIO_GPU_F_CONTEXT_INIT,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
@@ -193,7 +225,17 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
+static const struct file_operations virtio_gpu_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = virtio_gpu_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap
+};
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d4e610a44e12..e0265fe74aa5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -26,6 +26,7 @@
#ifndef VIRTIO_DRV_H
#define VIRTIO_DRV_H
+#include <linux/dma-direction.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -54,6 +55,9 @@
#define STATE_OK 1
#define STATE_ERR 2
+#define MAX_CAPSET_ID 63
+#define MAX_RINGS 64
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -134,9 +138,18 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
+#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
+struct virtio_gpu_fence_event {
+ struct drm_pending_event base;
+ struct drm_event event;
+};
+
struct virtio_gpu_fence {
struct dma_fence f;
+ uint32_t ring_idx;
uint64_t fence_id;
+ bool emit_fence_info;
+ struct virtio_gpu_fence_event *e;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
@@ -233,6 +246,7 @@ struct virtio_gpu_device {
bool has_resource_assign_uuid;
bool has_resource_blob;
bool has_host_visible;
+ bool has_context_init;
struct virtio_shm_region host_visible_region;
struct drm_mm host_visible_mm;
@@ -244,6 +258,7 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
+ uint64_t capset_id_mask;
struct list_head cap_cache;
/* protects uuid state when exporting */
@@ -254,12 +269,16 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ uint32_t context_init;
bool context_created;
+ uint32_t num_rings;
+ uint64_t base_fence_ctx;
+ uint64_t ring_idx_mask;
struct mutex context_lock;
};
/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 11
+#define DRM_VIRTIO_NUM_IOCTLS 12
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -337,7 +356,8 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
struct virtio_gpu_drv_cap_cache **cache_p);
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name);
+ uint32_t context_init, uint32_t nlen,
+ const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
@@ -417,8 +437,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
int index);
/* virtgpu_fence.c */
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(
- struct virtio_gpu_device *vgdev);
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
@@ -459,4 +480,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr);
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir);
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index d28e25e8409b..f28357dbde35 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -71,22 +71,29 @@ static const struct dma_fence_ops virtio_gpu_fence_ops = {
.timeline_value_str = virtio_gpu_timeline_value_str,
};
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx)
{
+ uint64_t fence_context = base_fence_ctx + ring_idx;
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
GFP_KERNEL);
+
if (!fence)
return fence;
fence->drv = drv;
+ fence->ring_idx = ring_idx;
+ fence->emit_fence_info = !(base_fence_ctx == drv->context);
/* This only partially initializes the fence because the seqno is
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
- 0);
+
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
+ fence_context, 0);
return fence;
}
@@ -108,6 +115,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
+
+ /* Only currently defined fence param. */
+ if (fence->emit_fence_info) {
+ cmd_hdr->flags |=
+ cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
+ cmd_hdr->ring_idx = (u8)fence->ring_idx;
+ }
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -138,11 +152,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
continue;
dma_fence_signal_locked(&curr->f);
+ if (curr->e) {
+ drm_send_event(vgdev->ddev, &curr->e->base);
+ curr->e = NULL;
+ }
+
list_del(&curr->node);
dma_fence_put(&curr->f);
}
dma_fence_signal_locked(&signaled->f);
+ if (signaled->e) {
+ drm_send_event(vgdev->ddev, &signaled->e->base);
+ signaled->e = NULL;
+ }
+
list_del(&signaled->node);
dma_fence_put(&signaled->f);
break;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5c1ad1596889..5618a1d5879c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -38,20 +38,60 @@
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct virtio_gpu_fence *fence,
+ uint32_t ring_idx)
+{
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_fence_event *e = NULL;
+ int ret;
+
+ if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+ return 0;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+ e->event.length = sizeof(e->event);
+
+ ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
+ if (ret)
+ goto free;
+
+ fence->e = e;
+ return 0;
+free:
+ kfree(e);
+ return ret;
+}
+
+/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
+static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ char dbgname[TASK_COMM_LEN];
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+
+ vfpriv->context_created = true;
+}
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- char dbgname[TASK_COMM_LEN];
mutex_lock(&vfpriv->context_lock);
if (vfpriv->context_created)
goto out_unlock;
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- strlen(dbgname), dbgname);
- vfpriv->context_created = true;
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
out_unlock:
mutex_unlock(&vfpriv->context_lock);
@@ -89,6 +129,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
+ uint64_t fence_ctx;
+ uint32_t ring_idx;
+
+ fence_ctx = vgdev->fence_drv.context;
+ ring_idx = 0;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
@@ -96,6 +141,17 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
return -EINVAL;
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
+ if (exbuf->ring_idx >= vfpriv->num_rings)
+ return -EINVAL;
+
+ if (!vfpriv->base_fence_ctx)
+ return -EINVAL;
+
+ fence_ctx = vfpriv->base_fence_ctx;
+ ring_idx = exbuf->ring_idx;
+ }
+
exbuf->fence_fd = -1;
virtio_gpu_create_context(dev, file);
@@ -163,12 +219,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_memdup;
}
- out_fence = virtio_gpu_fence_alloc(vgdev);
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
if(!out_fence) {
ret = -ENOMEM;
goto out_unresv;
}
+ ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (ret)
+ goto out_unresv;
+
if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);
if (!sync_file) {
@@ -226,6 +286,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_CROSS_DEVICE:
value = vgdev->has_resource_assign_uuid ? 1 : 0;
break;
+ case VIRTGPU_PARAM_CONTEXT_INIT:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
+ value = vgdev->capset_id_mask;
+ break;
default:
return -EINVAL;
}
@@ -278,7 +344,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (params.size == 0)
params.size = PAGE_SIZE;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence)
return -ENOMEM;
ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
@@ -357,7 +423,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
goto err_unlock;
@@ -417,7 +483,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free;
ret = -ENOMEM;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!fence)
goto err_unlock;
@@ -662,6 +729,113 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
return 0;
}
+static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t num_params, i, param, value;
+ uint64_t valid_ring_mask;
+ size_t len;
+ struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_context_init *args = data;
+
+ num_params = args->num_params;
+ len = num_params * sizeof(struct drm_virtgpu_context_set_param);
+
+ if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Number of unique parameters supported at this time. */
+ if (num_params > 3)
+ return -EINVAL;
+
+ ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
+ len);
+
+ if (IS_ERR(ctx_set_params))
+ return PTR_ERR(ctx_set_params);
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < num_params; i++) {
+ param = ctx_set_params[i].param;
+ value = ctx_set_params[i].value;
+
+ switch (param) {
+ case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
+ if (value > MAX_CAPSET_ID) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((vgdev->capset_id_mask & (1 << value)) == 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Context capset ID already set */
+ if (vfpriv->context_init &
+ VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->context_init |= value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
+ if (vfpriv->base_fence_ctx) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (value > MAX_RINGS) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
+ vfpriv->num_rings = value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
+ if (vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->ring_idx_mask = value;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ if (vfpriv->ring_idx_mask) {
+ valid_ring_mask = 0;
+ for (i = 0; i < vfpriv->num_rings; i++)
+ valid_ring_mask |= 1 << i;
+
+ if (~valid_ring_mask & vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
+ virtio_gpu_notify(vgdev);
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+ kfree(ctx_set_params);
+ return ret;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -698,4 +872,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index f3379059f324..21f410901694 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -65,6 +65,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
int num_capsets)
{
int i, ret;
+ bool invalid_capset_id = false;
vgdev->capsets = kcalloc(num_capsets,
sizeof(struct virtio_gpu_drv_capset),
@@ -78,19 +79,34 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
- if (ret == 0) {
+ /*
+ * Capability ids are defined in the virtio-gpu spec and are
+ * between 1 to 63, inclusive.
+ */
+ if (!vgdev->capsets[i].id ||
+ vgdev->capsets[i].id > MAX_CAPSET_ID)
+ invalid_capset_id = true;
+
+ if (ret == 0)
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ else if (invalid_capset_id)
+ DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
+
+ if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
kfree(vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
}
+
+ vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
i, vgdev->capsets[i].id,
vgdev->capsets[i].max_version,
vgdev->capsets[i].max_size);
}
+
vgdev->num_capsets = num_capsets;
}
@@ -175,13 +191,19 @@ int virtio_gpu_init(struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+ vgdev->has_context_init = true;
+ }
- DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+ DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
vgdev->has_edid ? '+' : '-',
vgdev->has_resource_blob ? '+' : '-',
vgdev->has_host_visible ? '+' : '-');
+ DRM_INFO("features: %ccontext_init\n",
+ vgdev->has_context_init ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a49fd9480381..6d3cc9e238a4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -256,7 +256,8 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+ vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!vgfb->fence)
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index e45dbf14b307..55d80b77d9b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
return 0;
}
+static struct sg_table *
+virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo))
+ return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
+
+ return drm_gem_map_dma_buf(attach, dir);
+}
+
+static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo)) {
+ virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
+ return;
+ }
+
+ drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
.cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .map_dma_buf = virtgpu_gem_map_dma_buf,
+ .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 2e71e91278b4..7c052efe8836 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -91,9 +91,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE ||
size < sizeof(struct virtio_gpu_ctrl_hdr));
@@ -147,10 +145,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
@@ -205,7 +199,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
struct virtio_gpu_ctrl_hdr *resp;
- u64 fence_id = 0;
+ u64 fence_id;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->ctrlq.qlock);
@@ -232,23 +226,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
- u64 f = le64_to_cpu(resp->fence_id);
-
- if (fence_id > f) {
- DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
- __func__, fence_id, f);
- } else {
- fence_id = f;
- }
+ fence_id = le64_to_cpu(resp->fence_id);
+ virtio_gpu_fence_event_process(vgdev, fence_id);
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
- if (fence_id)
- virtio_gpu_fence_event_process(vgdev, fence_id);
-
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
@@ -917,7 +902,8 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
}
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name)
+ uint32_t context_init, uint32_t nlen,
+ const char *name)
{
struct virtio_gpu_ctx_create *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -928,6 +914,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
+ cmd_p->context_init = cpu_to_le32(context_init);
strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 5cc34e7330fa..6b45b0429fef 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
+#include <linux/dma-mapping.h>
+
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return ret;
}
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ struct sg_table *sgt;
+ dma_addr_t addr;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
+ // Virtio devices can access the dma-buf via its UUID. Return a stub
+ // sg_table so the dma-buf API still works.
+ if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
+ ret = -EIO;
+ goto out;
+ }
+ return sgt;
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ addr = dma_map_resource(dev, vram->vram_node.start,
+ vram->vram_node.size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ goto out;
+
+ sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
+ sg_dma_address(sgt->sgl) = addr;
+ sg_dma_len(sgt->sgl) = vram->vram_node.size;
+
+ return sgt;
+out:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (sgt->nents) {
+ dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
+ sg_dma_len(sgt->sgl), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
index edd17c30d5a5..7f7fe35fc21d 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
@@ -468,7 +468,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
struct ttm_mem_zone *zone;
unsigned int i;
- flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 04789b2bb2a2..899945f54dc7 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -48,8 +48,11 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/module.h>
#include "ttm_object.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
/**
* struct ttm_object_file
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9e3e1429db94..fd007f1c1776 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -94,7 +94,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
- uint32_t new_flags;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -103,8 +102,8 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, placement)
+ ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
@@ -136,7 +135,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
- uint32_t new_flags;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -145,8 +143,8 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
+ ? 0 : -EINVAL;
goto out_unreserve;
}
@@ -208,7 +206,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
- uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->resource->num_pages;
@@ -236,8 +233,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
}
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, &placement)
+ ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ab9a1750e1df..bfd71c86faa5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,7 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e50fb82a3030..2aceac7856e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <asm/hypervisor.h>
#include <drm/drm_ioctl.h>
@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
- if (hb && !mem_encrypt_active()) {
+ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16);
@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */
- if (hb && !mem_encrypt_active()) {
+ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 8b8991e3ed2d..e899a936a42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -522,14 +522,8 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
- vmw_ttm_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ttm_tt_fini(&vmw_be->dma_ttm);
- else
- ttm_tt_fini(ttm);
-
+ ttm_tt_fini(ttm);
if (vmw_be->mob)
vmw_mob_destroy(vmw_be->mob);
@@ -574,6 +568,8 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
dma_ttm);
unsigned int i;
+ vmw_ttm_unbind(bdev, ttm);
+
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
vmw_tt->mob = NULL;
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
deleted file mode 100644
index aa8594190b50..000000000000
--- a/drivers/gpu/drm/zte/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_ZTE
- tristate "DRM Support for ZTE SoCs"
- depends on DRM && ARCH_ZX
- select DRM_KMS_CMA_HELPER
- select DRM_KMS_HELPER
- select SND_SOC_HDMI_CODEC if SND_SOC
- select VIDEOMODE_HELPERS
- help
- Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
deleted file mode 100644
index b6d966d849dd..000000000000
--- a/drivers/gpu/drm/zte/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-zxdrm-y := \
- zx_drm_drv.o \
- zx_hdmi.o \
- zx_plane.o \
- zx_tvenc.o \
- zx_vga.o \
- zx_vou.o
-
-obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_common_regs.h b/drivers/gpu/drm/zte/zx_common_regs.h
deleted file mode 100644
index b7b996db129d..000000000000
--- a/drivers/gpu/drm/zte/zx_common_regs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __ZX_COMMON_REGS_H__
-#define __ZX_COMMON_REGS_H__
-
-/* CSC registers */
-#define CSC_CTRL0 0x30
-#define CSC_COV_MODE_SHIFT 16
-#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
-#define CSC_BT601_IMAGE_RGB2YCBCR 0
-#define CSC_BT601_IMAGE_YCBCR2RGB 1
-#define CSC_BT601_VIDEO_RGB2YCBCR 2
-#define CSC_BT601_VIDEO_YCBCR2RGB 3
-#define CSC_BT709_IMAGE_RGB2YCBCR 4
-#define CSC_BT709_IMAGE_YCBCR2RGB 5
-#define CSC_BT709_VIDEO_RGB2YCBCR 6
-#define CSC_BT709_VIDEO_YCBCR2RGB 7
-#define CSC_BT2020_IMAGE_RGB2YCBCR 8
-#define CSC_BT2020_IMAGE_YCBCR2RGB 9
-#define CSC_BT2020_VIDEO_RGB2YCBCR 10
-#define CSC_BT2020_VIDEO_YCBCR2RGB 11
-#define CSC_WORK_ENABLE BIT(0)
-
-#endif /* __ZX_COMMON_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
deleted file mode 100644
index 064056503ebb..000000000000
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "zx_drm_drv.h"
-#include "zx_vou.h"
-
-static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
-
-static const struct drm_driver zx_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
- .fops = &zx_drm_fops,
- .name = "zx-vou",
- .desc = "ZTE VOU Controller DRM",
- .date = "20160811",
- .major = 1,
- .minor = 0,
-};
-
-static int zx_drm_bind(struct device *dev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&zx_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- dev_set_drvdata(dev, drm);
-
- drm_mode_config_init(drm);
- drm->mode_config.min_width = 16;
- drm->mode_config.min_height = 16;
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
- drm->mode_config.funcs = &zx_drm_mode_config_funcs;
-
- ret = component_bind_all(dev, drm);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
- goto out_unregister;
- }
-
- ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
- goto out_unbind;
- }
-
- drm_mode_config_reset(drm);
- drm_kms_helper_poll_init(drm);
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto out_poll_fini;
-
- drm_fbdev_generic_setup(drm, 32);
-
- return 0;
-
-out_poll_fini:
- drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
-out_unbind:
- component_unbind_all(dev, drm);
-out_unregister:
- dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
- return ret;
-}
-
-static void zx_drm_unbind(struct device *dev)
-{
- struct drm_device *drm = dev_get_drvdata(dev);
-
- drm_dev_unregister(drm);
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
- component_unbind_all(dev, drm);
- dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
-}
-
-static const struct component_master_ops zx_drm_master_ops = {
- .bind = zx_drm_bind,
- .unbind = zx_drm_unbind,
-};
-
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int zx_drm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *parent = dev->of_node;
- struct device_node *child;
- struct component_match *match = NULL;
- int ret;
-
- ret = devm_of_platform_populate(dev);
- if (ret)
- return ret;
-
- for_each_available_child_of_node(parent, child)
- component_match_add(dev, &match, compare_of, child);
-
- return component_master_add_with_match(dev, &zx_drm_master_ops, match);
-}
-
-static int zx_drm_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &zx_drm_master_ops);
- return 0;
-}
-
-static const struct of_device_id zx_drm_of_match[] = {
- { .compatible = "zte,zx296718-vou", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_drm_of_match);
-
-static struct platform_driver zx_drm_platform_driver = {
- .probe = zx_drm_probe,
- .remove = zx_drm_remove,
- .driver = {
- .name = "zx-drm",
- .of_match_table = zx_drm_of_match,
- },
-};
-
-static struct platform_driver *drivers[] = {
- &zx_crtc_driver,
- &zx_hdmi_driver,
- &zx_tvenc_driver,
- &zx_vga_driver,
- &zx_drm_platform_driver,
-};
-
-static int zx_drm_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-module_init(zx_drm_init);
-
-static void zx_drm_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-module_exit(zx_drm_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
deleted file mode 100644
index 80cdaf479c74..000000000000
--- a/drivers/gpu/drm/zte/zx_drm_drv.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_DRM_DRV_H__
-#define __ZX_DRM_DRV_H__
-
-extern struct platform_driver zx_crtc_driver;
-extern struct platform_driver zx_hdmi_driver;
-extern struct platform_driver zx_tvenc_driver;
-extern struct platform_driver zx_vga_driver;
-
-static inline u32 zx_readl(void __iomem *reg)
-{
- return readl_relaxed(reg);
-}
-
-static inline void zx_writel(void __iomem *reg, u32 val)
-{
- writel_relaxed(val, reg);
-}
-
-static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
-{
- u32 tmp;
-
- tmp = zx_readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- zx_writel(reg, tmp);
-}
-
-#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
deleted file mode 100644
index cd79ca0a92a9..000000000000
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ /dev/null
@@ -1,760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/hdmi.h>
-#include <linux/irq.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include <sound/hdmi-codec.h>
-
-#include "zx_hdmi_regs.h"
-#include "zx_vou.h"
-
-#define ZX_HDMI_INFOFRAME_SIZE 31
-#define DDC_SEGMENT_ADDR 0x30
-
-struct zx_hdmi_i2c {
- struct i2c_adapter adap;
- struct mutex lock;
-};
-
-struct zx_hdmi {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct zx_hdmi_i2c *ddc;
- struct device *dev;
- struct drm_device *drm;
- void __iomem *mmio;
- struct clk *cec_clk;
- struct clk *osc_clk;
- struct clk *xclk;
- bool sink_is_hdmi;
- bool sink_has_audio;
- struct platform_device *audio_pdev;
-};
-
-#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
-
-static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
-{
- return readl_relaxed(hdmi->mmio + offset * 4);
-}
-
-static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
-{
- writel_relaxed(val, hdmi->mmio + offset * 4);
-}
-
-static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
- u8 mask, u8 val)
-{
- u8 tmp;
-
- tmp = hdmi_readb(hdmi, offset);
- tmp = (tmp & ~mask) | (val & mask);
- hdmi_writeb(hdmi, offset, tmp);
-}
-
-static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
- union hdmi_infoframe *frame, u8 fsel)
-{
- u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
- int num;
- int i;
-
- hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
-
- num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
- if (num < 0) {
- DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
- return num;
- }
-
- for (i = 0; i < num; i++)
- hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
-
- hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
- TPI_INFO_TRANS_RPT);
- hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
- TPI_INFO_TRANS_EN);
-
- return num;
-}
-
-static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int ret;
-
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- &hdmi->connector,
- mode);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
- ret);
- return ret;
- }
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
-}
-
-static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int ret;
-
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector,
- mode);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
- ret);
- return ret;
- }
-
- /* We always use YUV444 for HDMI output. */
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
-}
-
-static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- if (hdmi->sink_is_hdmi) {
- zx_hdmi_config_video_avi(hdmi, mode);
- zx_hdmi_config_video_vsi(hdmi, mode);
- }
-}
-
-static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
-{
- /* Copy from ZTE BSP code */
- hdmi_writeb(hdmi, 0x222, 0x0);
- hdmi_writeb(hdmi, 0x224, 0x4);
- hdmi_writeb(hdmi, 0x909, 0x0);
- hdmi_writeb(hdmi, 0x7b0, 0x90);
- hdmi_writeb(hdmi, 0x7b1, 0x00);
- hdmi_writeb(hdmi, 0x7b2, 0xa7);
- hdmi_writeb(hdmi, 0x7b8, 0xaa);
- hdmi_writeb(hdmi, 0x7b2, 0xa7);
- hdmi_writeb(hdmi, 0x7b3, 0x0f);
- hdmi_writeb(hdmi, 0x7b4, 0x0f);
- hdmi_writeb(hdmi, 0x7b5, 0x55);
- hdmi_writeb(hdmi, 0x7b7, 0x03);
- hdmi_writeb(hdmi, 0x7b9, 0x12);
- hdmi_writeb(hdmi, 0x7ba, 0x32);
- hdmi_writeb(hdmi, 0x7bc, 0x68);
- hdmi_writeb(hdmi, 0x7be, 0x40);
- hdmi_writeb(hdmi, 0x7bf, 0x84);
- hdmi_writeb(hdmi, 0x7c1, 0x0f);
- hdmi_writeb(hdmi, 0x7c8, 0x02);
- hdmi_writeb(hdmi, 0x7c9, 0x03);
- hdmi_writeb(hdmi, 0x7ca, 0x40);
- hdmi_writeb(hdmi, 0x7dc, 0x31);
- hdmi_writeb(hdmi, 0x7e2, 0x04);
- hdmi_writeb(hdmi, 0x7e0, 0x06);
- hdmi_writeb(hdmi, 0x7cb, 0x68);
- hdmi_writeb(hdmi, 0x7f9, 0x02);
- hdmi_writeb(hdmi, 0x7b6, 0x02);
- hdmi_writeb(hdmi, 0x7f3, 0x0);
-}
-
-static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
-{
- /* Enable pclk */
- hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
-
- /* Enable HDMI for TX */
- hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
-
- /* Enable deep color packet */
- hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
-
- /* Enable HDMI/MHL mode for output */
- hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
- TEST_TXCTRL_HDMI_MODE);
-
- /* Configure reg_qc_sel */
- hdmi_writeb(hdmi, HDMICTL4, 0x3);
-
- /* Enable interrupt */
- hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
- INTR1_MONITOR_DETECT);
-
- /* Start up phy */
- zx_hdmi_phy_start(hdmi);
-}
-
-static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
-{
- /* Disable interrupt */
- hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
-
- /* Disable deep color packet */
- hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
-
- /* Disable HDMI for TX */
- hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
-
- /* Disable pclk */
- hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
-}
-
-static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- clk_prepare_enable(hdmi->cec_clk);
- clk_prepare_enable(hdmi->osc_clk);
- clk_prepare_enable(hdmi->xclk);
-
- zx_hdmi_hw_enable(hdmi);
-
- vou_inf_enable(VOU_HDMI, encoder->crtc);
-}
-
-static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- vou_inf_disable(VOU_HDMI, encoder->crtc);
-
- zx_hdmi_hw_disable(hdmi);
-
- clk_disable_unprepare(hdmi->xclk);
- clk_disable_unprepare(hdmi->osc_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-}
-
-static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
- .enable = zx_hdmi_encoder_enable,
- .disable = zx_hdmi_encoder_disable,
- .mode_set = zx_hdmi_encoder_mode_set,
-};
-
-static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(connector);
- struct edid *edid;
- int ret;
-
- edid = drm_get_edid(connector, &hdmi->ddc->adap);
- if (!edid)
- return 0;
-
- hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
- hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return ret;
-}
-
-static enum drm_mode_status
-zx_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
- .get_modes = zx_hdmi_connector_get_modes,
- .mode_valid = zx_hdmi_connector_mode_valid,
-};
-
-static enum drm_connector_status
-zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(connector);
-
- return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
- connector_status_connected : connector_status_disconnected;
-}
-
-static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = zx_hdmi_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder;
-
- encoder->possible_crtcs = VOU_CRTC_MASK;
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &zx_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- &hdmi->ddc->adap);
- drm_connector_helper_add(&hdmi->connector,
- &zx_hdmi_connector_helper_funcs);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
-
-static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
-{
- struct zx_hdmi *hdmi = dev_id;
-
- drm_helper_hpd_irq_event(hdmi->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
-{
- struct zx_hdmi *hdmi = dev_id;
- u8 lstat;
-
- lstat = hdmi_readb(hdmi, L1_INTR_STAT);
-
- /* Monitor detect/HPD interrupt */
- if (lstat & L1_INTR_STAT_INTR1) {
- u8 stat;
-
- stat = hdmi_readb(hdmi, INTR1_STAT);
- hdmi_writeb(hdmi, INTR1_STAT, stat);
-
- if (stat & INTR1_MONITOR_DETECT)
- return IRQ_WAKE_THREAD;
- }
-
- return IRQ_NONE;
-}
-
-static int zx_hdmi_audio_startup(struct device *dev, void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &hdmi->encoder;
-
- vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF);
-
- return 0;
-}
-
-static void zx_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- /* Disable audio input */
- hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0);
-}
-
-static inline int zx_hdmi_audio_get_n(unsigned int fs)
-{
- unsigned int n;
-
- if (fs && (fs % 44100) == 0)
- n = 6272 * (fs / 44100);
- else
- n = fs * 128 / 1000;
-
- return n;
-}
-
-static int zx_hdmi_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_infoframe *cea = &params->cea;
- union hdmi_infoframe frame;
- int n;
-
- /* We only support spdif for now */
- if (daifmt->fmt != HDMI_SPDIF) {
- DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt);
- return -EINVAL;
- }
-
- switch (params->sample_width) {
- case 16:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_16BIT);
- break;
- case 20:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_20BIT);
- break;
- case 24:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_24BIT);
- break;
- default:
- DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n",
- params->sample_width);
- return -EINVAL;
- }
-
- /* CTS is calculated by hardware, and we only need to take care of N */
- n = zx_hdmi_audio_get_n(params->sample_rate);
- hdmi_writeb(hdmi, N_SVAL1, n & 0xff);
- hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff);
- hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf);
-
- /* Enable spdif mode */
- hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN);
-
- /* Enable audio input */
- hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN);
-
- memcpy(&frame.audio, cea, sizeof(*cea));
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO);
-}
-
-static int zx_hdmi_audio_mute(struct device *dev, void *data,
- bool enable, int direction)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- if (enable)
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE,
- TPI_AUD_MUTE);
- else
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0);
-
- return 0;
-}
-
-static int zx_hdmi_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &hdmi->connector;
-
- memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
-
- return 0;
-}
-
-static const struct hdmi_codec_ops zx_hdmi_codec_ops = {
- .audio_startup = zx_hdmi_audio_startup,
- .hw_params = zx_hdmi_audio_hw_params,
- .audio_shutdown = zx_hdmi_audio_shutdown,
- .mute_stream = zx_hdmi_audio_mute,
- .get_eld = zx_hdmi_audio_get_eld,
- .no_capture_mute = 1,
-};
-
-static struct hdmi_codec_pdata zx_hdmi_codec_pdata = {
- .ops = &zx_hdmi_codec_ops,
- .spdif = 1,
-};
-
-static int zx_hdmi_audio_register(struct zx_hdmi *hdmi)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &zx_hdmi_codec_pdata,
- sizeof(zx_hdmi_codec_pdata));
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- hdmi->audio_pdev = pdev;
-
- return 0;
-}
-
-static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
-{
- int len = msg->len;
- u8 *buf = msg->buf;
- int retry = 0;
- int ret = 0;
-
- /* Bits [9:8] of bytes */
- hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
- /* Bits [7:0] of bytes */
- hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
-
- /* Clear FIFO */
- hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
-
- /* Kick off the read */
- hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
- DDC_CMD_SEQUENTIAL_READ);
-
- while (len > 0) {
- int cnt, i;
-
- /* FIFO needs some time to get ready */
- usleep_range(500, 1000);
-
- cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
- if (cnt == 0) {
- if (++retry > 5) {
- DRM_DEV_ERROR(hdmi->dev,
- "DDC FIFO read timed out!");
- return -ETIMEDOUT;
- }
- continue;
- }
-
- for (i = 0; i < cnt; i++)
- *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
- len -= cnt;
- }
-
- return ret;
-}
-
-static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
-{
- /*
- * The DDC I2C adapter is only for reading EDID data, so we assume
- * that the write to this adapter must be the EDID data offset.
- */
- if ((msg->len != 1) ||
- ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
- return -EINVAL;
-
- if (msg->addr == DDC_SEGMENT_ADDR)
- hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
- else if (msg->addr == DDC_ADDR)
- hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
-
- hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
-
- return 0;
-}
-
-static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
- struct zx_hdmi_i2c *ddc = hdmi->ddc;
- int i, ret = 0;
-
- mutex_lock(&ddc->lock);
-
- /* Enable DDC master access */
- hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
-
- for (i = 0; i < num; i++) {
- DRM_DEV_DEBUG(hdmi->dev,
- "xfer: num: %d/%d, len: %d, flags: %#x\n",
- i + 1, num, msgs[i].len, msgs[i].flags);
-
- if (msgs[i].flags & I2C_M_RD)
- ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
- else
- ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
-
- if (ret < 0)
- break;
- }
-
- if (!ret)
- ret = num;
-
- /* Disable DDC master access */
- hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
-
- mutex_unlock(&ddc->lock);
-
- return ret;
-}
-
-static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm zx_hdmi_algorithm = {
- .master_xfer = zx_hdmi_i2c_xfer,
- .functionality = zx_hdmi_i2c_func,
-};
-
-static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
-{
- struct i2c_adapter *adap;
- struct zx_hdmi_i2c *ddc;
- int ret;
-
- ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
- if (!ddc)
- return -ENOMEM;
-
- hdmi->ddc = ddc;
- mutex_init(&ddc->lock);
-
- adap = &ddc->adap;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
- adap->dev.parent = hdmi->dev;
- adap->algo = &zx_hdmi_algorithm;
- snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
-
- ret = i2c_add_adapter(adap);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
- ret);
- return ret;
- }
-
- i2c_set_adapdata(adap, hdmi);
-
- return 0;
-}
-
-static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_hdmi *hdmi;
- int irq;
- int ret;
-
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
-
- hdmi->dev = dev;
- hdmi->drm = drm;
-
- dev_set_drvdata(dev, hdmi);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(hdmi->mmio)) {
- ret = PTR_ERR(hdmi->mmio);
- DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
- if (IS_ERR(hdmi->cec_clk)) {
- ret = PTR_ERR(hdmi->cec_clk);
- DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
- return ret;
- }
-
- hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
- if (IS_ERR(hdmi->osc_clk)) {
- ret = PTR_ERR(hdmi->osc_clk);
- DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
- return ret;
- }
-
- hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
- if (IS_ERR(hdmi->xclk)) {
- ret = PTR_ERR(hdmi->xclk);
- DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_ddc_register(hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_audio_register(hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_register(drm, hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
- return ret;
- }
-
- ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
- zx_hdmi_irq_thread, IRQF_SHARED,
- dev_name(dev), hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void zx_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- hdmi->connector.funcs->destroy(&hdmi->connector);
- hdmi->encoder.funcs->destroy(&hdmi->encoder);
-
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
-}
-
-static const struct component_ops zx_hdmi_component_ops = {
- .bind = zx_hdmi_bind,
- .unbind = zx_hdmi_unbind,
-};
-
-static int zx_hdmi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_hdmi_component_ops);
-}
-
-static int zx_hdmi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_hdmi_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_hdmi_of_match[] = {
- { .compatible = "zte,zx296718-hdmi", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
-
-struct platform_driver zx_hdmi_driver = {
- .probe = zx_hdmi_probe,
- .remove = zx_hdmi_remove,
- .driver = {
- .name = "zx-hdmi",
- .of_match_table = zx_hdmi_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
deleted file mode 100644
index 397949e64eff..000000000000
--- a/drivers/gpu/drm/zte/zx_hdmi_regs.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_HDMI_REGS_H__
-#define __ZX_HDMI_REGS_H__
-
-#define FUNC_SEL 0x000b
-#define FUNC_HDMI_EN BIT(0)
-#define CLKPWD 0x000d
-#define CLKPWD_PDIDCK BIT(2)
-#define P2T_CTRL 0x0066
-#define P2T_DC_PKT_EN BIT(7)
-#define L1_INTR_STAT 0x007e
-#define L1_INTR_STAT_INTR1 BIT(0)
-#define INTR1_STAT 0x008f
-#define INTR1_MASK 0x0095
-#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
-#define ZX_DDC_ADDR 0x00ed
-#define ZX_DDC_SEGM 0x00ee
-#define ZX_DDC_OFFSET 0x00ef
-#define ZX_DDC_DIN_CNT1 0x00f0
-#define ZX_DDC_DIN_CNT2 0x00f1
-#define ZX_DDC_CMD 0x00f3
-#define DDC_CMD_MASK 0xf
-#define DDC_CMD_CLEAR_FIFO 0x9
-#define DDC_CMD_SEQUENTIAL_READ 0x2
-#define ZX_DDC_DATA 0x00f4
-#define ZX_DDC_DOUT_CNT 0x00f5
-#define DDC_DOUT_CNT_MASK 0x1f
-#define TEST_TXCTRL 0x00f7
-#define TEST_TXCTRL_HDMI_MODE BIT(1)
-#define HDMICTL4 0x0235
-#define TPI_HPD_RSEN 0x063b
-#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
-#define TPI_INFO_FSEL 0x06bf
-#define FSEL_AVI 0
-#define FSEL_GBD 1
-#define FSEL_AUDIO 2
-#define FSEL_SPD 3
-#define FSEL_MPEG 4
-#define FSEL_VSIF 5
-#define TPI_INFO_B0 0x06c0
-#define TPI_INFO_EN 0x06df
-#define TPI_INFO_TRANS_EN BIT(7)
-#define TPI_INFO_TRANS_RPT BIT(6)
-#define TPI_DDC_MASTER_EN 0x06f8
-#define HW_DDC_MASTER BIT(7)
-#define N_SVAL1 0xa03
-#define N_SVAL2 0xa04
-#define N_SVAL3 0xa05
-#define AUD_EN 0xa13
-#define AUD_IN_EN BIT(0)
-#define AUD_MODE 0xa14
-#define SPDIF_EN BIT(1)
-#define TPI_AUD_CONFIG 0xa62
-#define SPDIF_SAMPLE_SIZE_SHIFT 6
-#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define TPI_AUD_MUTE BIT(4)
-
-#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
deleted file mode 100644
index 93bcca428e35..000000000000
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ /dev/null
@@ -1,537 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
-
-#include "zx_common_regs.h"
-#include "zx_drm_drv.h"
-#include "zx_plane.h"
-#include "zx_plane_regs.h"
-#include "zx_vou.h"
-
-static const uint32_t gl_formats[] = {
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ARGB4444,
-};
-
-static const uint32_t vl_formats[] = {
- DRM_FORMAT_NV12, /* Semi-planar YUV420 */
- DRM_FORMAT_YUV420, /* Planar YUV420 */
- DRM_FORMAT_YUYV, /* Packed YUV422 */
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YUV444, /* YUV444 8bit */
- /*
- * TODO: add formats below that HW supports:
- * - YUV420 P010
- * - YUV420 Hantro
- * - YUV444 10bit
- */
-};
-
-#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
-
-static int zx_vl_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
- int min_scale = FRAC_16_16(1, 8);
- int max_scale = FRAC_16_16(8, 1);
-
- if (!crtc || WARN_ON(!fb))
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- if (WARN_ON(!crtc_state))
- return -EINVAL;
-
- /* nothing to check when disabling or disabled */
- if (!crtc_state->enable)
- return 0;
-
- /* plane must be enabled */
- if (!plane_state->crtc)
- return -EINVAL;
-
- return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- min_scale, max_scale,
- true, true);
-}
-
-static int zx_vl_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- return VL_FMT_YUV420;
- case DRM_FORMAT_YUV420:
- return VL_YUV420_PLANAR | VL_FMT_YUV420;
- case DRM_FORMAT_YUYV:
- return VL_YUV422_YUYV | VL_FMT_YUV422;
- case DRM_FORMAT_YVYU:
- return VL_YUV422_YVYU | VL_FMT_YUV422;
- case DRM_FORMAT_UYVY:
- return VL_YUV422_UYVY | VL_FMT_YUV422;
- case DRM_FORMAT_VYUY:
- return VL_YUV422_VYUY | VL_FMT_YUV422;
- case DRM_FORMAT_YUV444:
- return VL_FMT_YUV444_8BIT;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline void zx_vl_set_update(struct zx_plane *zplane)
-{
- void __iomem *layer = zplane->layer;
-
- zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE);
-}
-
-static inline void zx_vl_rsz_set_update(struct zx_plane *zplane)
-{
- zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1);
-}
-
-static int zx_vl_rsz_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_YUV420:
- return RSZ_VL_FMT_YCBCR420;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return RSZ_VL_FMT_YCBCR422;
- case DRM_FORMAT_YUV444:
- return RSZ_VL_FMT_YCBCR444;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline u32 rsz_step_value(u32 src, u32 dst)
-{
- u32 val = 0;
-
- if (src == dst)
- val = 0;
- else if (src < dst)
- val = RSZ_PARA_STEP((src << 16) / dst);
- else if (src > dst)
- val = RSZ_DATA_STEP(src / dst) |
- RSZ_PARA_STEP(((src << 16) / dst) & 0xffff);
-
- return val;
-}
-
-static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format,
- u32 src_w, u32 src_h, u32 dst_w, u32 dst_h)
-{
- void __iomem *rsz = zplane->rsz;
- u32 src_chroma_w = src_w;
- u32 src_chroma_h = src_h;
- int fmt;
-
- /* Set up source and destination resolution */
- zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
- zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
-
- /* Configure data format for VL RSZ */
- fmt = zx_vl_rsz_get_fmt(format);
- if (fmt >= 0)
- zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt);
-
- /* Calculate Chroma height and width */
- if (fmt == RSZ_VL_FMT_YCBCR420) {
- src_chroma_w = src_w >> 1;
- src_chroma_h = src_h >> 1;
- } else if (fmt == RSZ_VL_FMT_YCBCR422) {
- src_chroma_w = src_w >> 1;
- }
-
- /* Set up Luma and Chroma step registers */
- zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w));
- zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h));
- zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w));
- zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h));
-
- zx_vl_rsz_set_update(zplane);
-}
-
-static void zx_vl_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_rect *src = &new_state->src;
- struct drm_rect *dst = &new_state->dst;
- struct drm_gem_cma_object *cma_obj;
- void __iomem *layer = zplane->layer;
- void __iomem *hbsc = zplane->hbsc;
- void __iomem *paddr_reg;
- dma_addr_t paddr;
- u32 src_x, src_y, src_w, src_h;
- u32 dst_x, dst_y, dst_w, dst_h;
- uint32_t format;
- int fmt;
- int i;
-
- if (!fb)
- return;
-
- format = fb->format->format;
-
- src_x = src->x1 >> 16;
- src_y = src->y1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- dst_x = dst->x1;
- dst_y = dst->y1;
- dst_w = drm_rect_width(dst);
- dst_h = drm_rect_height(dst);
-
- /* Set up data address registers for Y, Cb and Cr planes */
- paddr_reg = layer + VL_Y;
- for (i = 0; i < fb->format->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(fb, i);
- paddr = cma_obj->paddr + fb->offsets[i];
- paddr += src_y * fb->pitches[i];
- paddr += src_x * fb->format->cpp[i];
- zx_writel(paddr_reg, paddr);
- paddr_reg += 4;
- }
-
- /* Set up source height/width register */
- zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
-
- /* Set up start position register */
- zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
-
- /* Set up end position register */
- zx_writel(layer + VL_POS_END,
- GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
-
- /* Strides of Cb and Cr planes should be identical */
- zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) |
- CHROMA_STRIDE(fb->pitches[1]));
-
- /* Set up video layer data format */
- fmt = zx_vl_get_fmt(format);
- if (fmt >= 0)
- zx_writel(layer + VL_CTRL1, fmt);
-
- /* Always use scaler since it exists (set for not bypass) */
- zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE,
- VL_SCALER_BYPASS_MODE);
-
- zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h);
-
- /* Enable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
-
- zx_vou_layer_enable(plane);
-
- zx_vl_set_update(zplane);
-}
-
-static void zx_plane_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct zx_plane *zplane = to_zx_plane(plane);
- void __iomem *hbsc = zplane->hbsc;
-
- zx_vou_layer_disable(plane, old_state);
-
- /* Disable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
-}
-
-static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = {
- .atomic_check = zx_vl_plane_atomic_check,
- .atomic_update = zx_vl_plane_atomic_update,
- .atomic_disable = zx_plane_atomic_disable,
-};
-
-static int zx_gl_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
-
- if (!crtc || WARN_ON(!fb))
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- if (WARN_ON(!crtc_state))
- return -EINVAL;
-
- /* nothing to check when disabling or disabled */
- if (!crtc_state->enable)
- return 0;
-
- /* plane must be enabled */
- if (!plane_state->crtc)
- return -EINVAL;
-
- return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
-}
-
-static int zx_gl_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- return GL_FMT_ARGB8888;
- case DRM_FORMAT_RGB888:
- return GL_FMT_RGB888;
- case DRM_FORMAT_RGB565:
- return GL_FMT_RGB565;
- case DRM_FORMAT_ARGB1555:
- return GL_FMT_ARGB1555;
- case DRM_FORMAT_ARGB4444:
- return GL_FMT_ARGB4444;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline void zx_gl_set_update(struct zx_plane *zplane)
-{
- void __iomem *layer = zplane->layer;
-
- zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
-}
-
-static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
-{
- zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
-}
-
-static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
- u32 dst_w, u32 dst_h)
-{
- void __iomem *rsz = zplane->rsz;
-
- zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
- zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
-
- zx_gl_rsz_set_update(zplane);
-}
-
-static void zx_gl_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_cma_object *cma_obj;
- void __iomem *layer = zplane->layer;
- void __iomem *csc = zplane->csc;
- void __iomem *hbsc = zplane->hbsc;
- u32 src_x, src_y, src_w, src_h;
- u32 dst_x, dst_y, dst_w, dst_h;
- unsigned int bpp;
- uint32_t format;
- dma_addr_t paddr;
- u32 stride;
- int fmt;
-
- if (!fb)
- return;
-
- format = fb->format->format;
- stride = fb->pitches[0];
-
- src_x = new_state->src_x >> 16;
- src_y = new_state->src_y >> 16;
- src_w = new_state->src_w >> 16;
- src_h = new_state->src_h >> 16;
-
- dst_x = new_state->crtc_x;
- dst_y = new_state->crtc_y;
- dst_w = new_state->crtc_w;
- dst_h = new_state->crtc_h;
-
- bpp = fb->format->cpp[0];
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- paddr = cma_obj->paddr + fb->offsets[0];
- paddr += src_y * stride + src_x * bpp / 8;
- zx_writel(layer + GL_ADDR, paddr);
-
- /* Set up source height/width register */
- zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
-
- /* Set up start position register */
- zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
-
- /* Set up end position register */
- zx_writel(layer + GL_POS_END,
- GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
-
- /* Set up stride register */
- zx_writel(layer + GL_STRIDE, stride & 0xffff);
-
- /* Set up graphic layer data format */
- fmt = zx_gl_get_fmt(format);
- if (fmt >= 0)
- zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
- fmt << GL_DATA_FMT_SHIFT);
-
- /* Initialize global alpha with a sane value */
- zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
- 0xff << GL_GLOBAL_ALPHA_SHIFT);
-
- /* Setup CSC for the GL */
- if (dst_h > 720)
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
- else
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
-
- /* Always use scaler since it exists (set for not bypass) */
- zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
- GL_SCALER_BYPASS_MODE);
-
- zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
-
- /* Enable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
-
- zx_vou_layer_enable(plane);
-
- zx_gl_set_update(zplane);
-}
-
-static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
- .atomic_check = zx_gl_plane_atomic_check,
- .atomic_update = zx_gl_plane_atomic_update,
- .atomic_disable = zx_plane_atomic_disable,
-};
-
-static const struct drm_plane_funcs zx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-void zx_plane_set_update(struct drm_plane *plane)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
-
- /* Do nothing if the plane is not enabled */
- if (!plane->state->crtc)
- return;
-
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- zx_gl_rsz_set_update(zplane);
- zx_gl_set_update(zplane);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- zx_vl_rsz_set_update(zplane);
- zx_vl_set_update(zplane);
- break;
- default:
- WARN_ONCE(1, "unsupported plane type %d\n", plane->type);
- }
-}
-
-static void zx_plane_hbsc_init(struct zx_plane *zplane)
-{
- void __iomem *hbsc = zplane->hbsc;
-
- /*
- * Initialize HBSC block with a sane configuration per recommedation
- * from ZTE BSP code.
- */
- zx_writel(hbsc + HBSC_SATURATION, 0x200);
- zx_writel(hbsc + HBSC_HUE, 0x0);
- zx_writel(hbsc + HBSC_BRIGHT, 0x0);
- zx_writel(hbsc + HBSC_CONTRAST, 0x200);
-
- zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
- zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
- zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
-}
-
-int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
- enum drm_plane_type type)
-{
- const struct drm_plane_helper_funcs *helper;
- struct drm_plane *plane = &zplane->plane;
- struct device *dev = zplane->dev;
- const uint32_t *formats;
- unsigned int format_count;
- int ret;
-
- zx_plane_hbsc_init(zplane);
-
- switch (type) {
- case DRM_PLANE_TYPE_PRIMARY:
- helper = &zx_gl_plane_helper_funcs;
- formats = gl_formats;
- format_count = ARRAY_SIZE(gl_formats);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- helper = &zx_vl_plane_helper_funcs;
- formats = vl_formats;
- format_count = ARRAY_SIZE(vl_formats);
- break;
- default:
- return -ENODEV;
- }
-
- ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
- &zx_plane_funcs, formats, format_count,
- NULL, type, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
- return ret;
- }
-
- drm_plane_helper_add(plane, helper);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
deleted file mode 100644
index 5a7cc8b3b985..000000000000
--- a/drivers/gpu/drm/zte/zx_plane.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_PLANE_H__
-#define __ZX_PLANE_H__
-
-struct zx_plane {
- struct drm_plane plane;
- struct device *dev;
- void __iomem *layer;
- void __iomem *csc;
- void __iomem *hbsc;
- void __iomem *rsz;
- const struct vou_layer_bits *bits;
-};
-
-#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
-
-int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
- enum drm_plane_type type);
-void zx_plane_set_update(struct drm_plane *plane);
-
-#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
deleted file mode 100644
index ce830637a92d..000000000000
--- a/drivers/gpu/drm/zte/zx_plane_regs.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_PLANE_REGS_H__
-#define __ZX_PLANE_REGS_H__
-
-/* GL registers */
-#define GL_CTRL0 0x00
-#define GL_UPDATE BIT(5)
-#define GL_CTRL1 0x04
-#define GL_DATA_FMT_SHIFT 0
-#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
-#define GL_FMT_ARGB8888 0
-#define GL_FMT_RGB888 1
-#define GL_FMT_RGB565 2
-#define GL_FMT_ARGB1555 3
-#define GL_FMT_ARGB4444 4
-#define GL_CTRL2 0x08
-#define GL_GLOBAL_ALPHA_SHIFT 8
-#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
-#define GL_CTRL3 0x0c
-#define GL_SCALER_BYPASS_MODE BIT(0)
-#define GL_STRIDE 0x18
-#define GL_ADDR 0x1c
-#define GL_SRC_SIZE 0x38
-#define GL_SRC_W_SHIFT 16
-#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
-#define GL_SRC_H_SHIFT 0
-#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
-#define GL_POS_START 0x9c
-#define GL_POS_END 0xa0
-#define GL_POS_X_SHIFT 16
-#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
-#define GL_POS_Y_SHIFT 0
-#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
-
-#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
-#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
-#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
-#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
-
-/* VL registers */
-#define VL_CTRL0 0x00
-#define VL_UPDATE BIT(3)
-#define VL_CTRL1 0x04
-#define VL_YUV420_PLANAR BIT(5)
-#define VL_YUV422_SHIFT 3
-#define VL_YUV422_YUYV (0 << VL_YUV422_SHIFT)
-#define VL_YUV422_YVYU (1 << VL_YUV422_SHIFT)
-#define VL_YUV422_UYVY (2 << VL_YUV422_SHIFT)
-#define VL_YUV422_VYUY (3 << VL_YUV422_SHIFT)
-#define VL_FMT_YUV420 0
-#define VL_FMT_YUV422 1
-#define VL_FMT_YUV420_P010 2
-#define VL_FMT_YUV420_HANTRO 3
-#define VL_FMT_YUV444_8BIT 4
-#define VL_FMT_YUV444_10BIT 5
-#define VL_CTRL2 0x08
-#define VL_SCALER_BYPASS_MODE BIT(0)
-#define VL_STRIDE 0x0c
-#define LUMA_STRIDE_SHIFT 16
-#define LUMA_STRIDE_MASK (0xffff << LUMA_STRIDE_SHIFT)
-#define CHROMA_STRIDE_SHIFT 0
-#define CHROMA_STRIDE_MASK (0xffff << CHROMA_STRIDE_SHIFT)
-#define VL_SRC_SIZE 0x10
-#define VL_Y 0x14
-#define VL_POS_START 0x30
-#define VL_POS_END 0x34
-
-#define LUMA_STRIDE(x) (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK)
-#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK)
-
-/* RSZ registers */
-#define RSZ_SRC_CFG 0x00
-#define RSZ_DEST_CFG 0x04
-#define RSZ_ENABLE_CFG 0x14
-
-#define RSZ_VL_LUMA_HOR 0x08
-#define RSZ_VL_LUMA_VER 0x0c
-#define RSZ_VL_CHROMA_HOR 0x10
-#define RSZ_VL_CHROMA_VER 0x14
-#define RSZ_VL_CTRL_CFG 0x18
-#define RSZ_VL_FMT_SHIFT 3
-#define RSZ_VL_FMT_MASK (0x3 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR420 (0x0 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR422 (0x1 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR444 (0x2 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_ENABLE_CFG 0x1c
-
-#define RSZ_VER_SHIFT 16
-#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
-#define RSZ_HOR_SHIFT 0
-#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
-
-#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
-#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
-
-#define RSZ_DATA_STEP_SHIFT 16
-#define RSZ_DATA_STEP_MASK (0xffff << RSZ_DATA_STEP_SHIFT)
-#define RSZ_PARA_STEP_SHIFT 0
-#define RSZ_PARA_STEP_MASK (0xffff << RSZ_PARA_STEP_SHIFT)
-
-#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK)
-#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK)
-
-/* HBSC registers */
-#define HBSC_SATURATION 0x00
-#define HBSC_HUE 0x04
-#define HBSC_BRIGHT 0x08
-#define HBSC_CONTRAST 0x0c
-#define HBSC_THRESHOLD_COL1 0x10
-#define HBSC_THRESHOLD_COL2 0x14
-#define HBSC_THRESHOLD_COL3 0x18
-#define HBSC_CTRL0 0x28
-#define HBSC_CTRL_EN BIT(2)
-
-#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
deleted file mode 100644
index d8a89ba383bc..000000000000
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ /dev/null
@@ -1,400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2017 Linaro Ltd.
- * Copyright 2017 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "zx_drm_drv.h"
-#include "zx_tvenc_regs.h"
-#include "zx_vou.h"
-
-struct zx_tvenc_pwrctrl {
- struct regmap *regmap;
- u32 reg;
- u32 mask;
-};
-
-struct zx_tvenc {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct device *dev;
- void __iomem *mmio;
- const struct vou_inf *inf;
- struct zx_tvenc_pwrctrl pwrctrl;
-};
-
-#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x)
-
-struct zx_tvenc_mode {
- struct drm_display_mode mode;
- u32 video_info;
- u32 video_res;
- u32 field1_param;
- u32 field2_param;
- u32 burst_line_odd1;
- u32 burst_line_even1;
- u32 burst_line_odd2;
- u32 burst_line_even2;
- u32 line_timing_param;
- u32 weight_value;
- u32 blank_black_level;
- u32 burst_level;
- u32 control_param;
- u32 sub_carrier_phase1;
- u32 phase_line_incr_cvbs;
-};
-
-/*
- * The CRM cannot directly provide a suitable frequency, and we have to
- * ask a multiplied rate from CRM and use the divider in VOU to get the
- * desired one.
- */
-#define TVENC_CLOCK_MULTIPLIER 4
-
-static const struct zx_tvenc_mode tvenc_mode_pal = {
- .mode = {
- .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
- .hdisplay = 720,
- .hsync_start = 720 + 12,
- .hsync_end = 720 + 12 + 2,
- .htotal = 720 + 12 + 2 + 130,
- .vdisplay = 576,
- .vsync_start = 576 + 2,
- .vsync_end = 576 + 2 + 2,
- .vtotal = 576 + 2 + 2 + 20,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE,
- },
- .video_info = 0x00040040,
- .video_res = 0x05a9c760,
- .field1_param = 0x0004d416,
- .field2_param = 0x0009b94f,
- .burst_line_odd1 = 0x0004d406,
- .burst_line_even1 = 0x0009b53e,
- .burst_line_odd2 = 0x0004d805,
- .burst_line_even2 = 0x0009b93f,
- .line_timing_param = 0x06a96fdf,
- .weight_value = 0x00c188a0,
- .blank_black_level = 0x0000fcfc,
- .burst_level = 0x00001595,
- .control_param = 0x00000001,
- .sub_carrier_phase1 = 0x1504c566,
- .phase_line_incr_cvbs = 0xc068db8c,
-};
-
-static const struct zx_tvenc_mode tvenc_mode_ntsc = {
- .mode = {
- .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
- .hdisplay = 720,
- .hsync_start = 720 + 16,
- .hsync_end = 720 + 16 + 2,
- .htotal = 720 + 16 + 2 + 120,
- .vdisplay = 480,
- .vsync_start = 480 + 3,
- .vsync_end = 480 + 3 + 2,
- .vtotal = 480 + 3 + 2 + 17,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE,
- },
- .video_info = 0x00040080,
- .video_res = 0x05a8375a,
- .field1_param = 0x00041817,
- .field2_param = 0x0008351e,
- .burst_line_odd1 = 0x00041006,
- .burst_line_even1 = 0x0008290d,
- .burst_line_odd2 = 0x00000000,
- .burst_line_even2 = 0x00000000,
- .line_timing_param = 0x06a8ef9e,
- .weight_value = 0x00b68197,
- .blank_black_level = 0x0000f0f0,
- .burst_level = 0x0000009c,
- .control_param = 0x00000001,
- .sub_carrier_phase1 = 0x10f83e10,
- .phase_line_incr_cvbs = 0x80000000,
-};
-
-static const struct zx_tvenc_mode *tvenc_modes[] = {
- &tvenc_mode_pal,
- &tvenc_mode_ntsc,
-};
-
-static const struct zx_tvenc_mode *
-zx_tvenc_find_zmode(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
- const struct zx_tvenc_mode *zmode = tvenc_modes[i];
-
- if (drm_mode_equal(mode, &zmode->mode))
- return zmode;
- }
-
- return NULL;
-}
-
-static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- const struct zx_tvenc_mode *zmode;
- struct vou_div_config configs[] = {
- { VOU_DIV_INF, VOU_DIV_4 },
- { VOU_DIV_TVENC, VOU_DIV_1 },
- { VOU_DIV_LAYER, VOU_DIV_2 },
- };
-
- zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs));
-
- zmode = zx_tvenc_find_zmode(mode);
- if (!zmode) {
- DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n");
- return;
- }
-
- zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info);
- zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res);
- zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param);
- zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param);
- zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1);
- zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1);
- zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2);
- zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2);
- zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM,
- zmode->line_timing_param);
- zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value);
- zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL,
- zmode->blank_black_level);
- zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level);
- zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param);
- zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1,
- zmode->sub_carrier_phase1);
- zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS,
- zmode->phase_line_incr_cvbs);
-}
-
-static void zx_tvenc_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
-
- /* Set bit to power up TVENC DAC */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
- pwrctrl->mask);
-
- vou_inf_enable(VOU_TV_ENC, encoder->crtc);
-
- zx_writel(tvenc->mmio + VENC_ENABLE, 1);
-}
-
-static void zx_tvenc_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
-
- zx_writel(tvenc->mmio + VENC_ENABLE, 0);
-
- vou_inf_disable(VOU_TV_ENC, encoder->crtc);
-
- /* Clear bit to power down TVENC DAC */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
-}
-
-static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
- .enable = zx_tvenc_encoder_enable,
- .disable = zx_tvenc_encoder_disable,
- .mode_set = zx_tvenc_encoder_mode_set,
-};
-
-static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(connector);
- struct device *dev = tvenc->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
- const struct zx_tvenc_mode *zmode = tvenc_modes[i];
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &zmode->mode);
- if (!mode) {
- DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n");
- continue;
- }
-
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return i;
-}
-
-static enum drm_mode_status
-zx_tvenc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(connector);
- const struct zx_tvenc_mode *zmode;
-
- zmode = zx_tvenc_find_zmode(mode);
- if (!zmode) {
- DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name);
- return MODE_NOMODE;
- }
-
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = {
- .get_modes = zx_tvenc_connector_get_modes,
- .mode_valid = zx_tvenc_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs zx_tvenc_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
-{
- struct drm_encoder *encoder = &tvenc->encoder;
- struct drm_connector *connector = &tvenc->connector;
-
- /*
- * The tvenc is designed to use aux channel, as there is a deflicker
- * block for the channel.
- */
- encoder->possible_crtcs = BIT(1);
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
-
- connector->interlace_allowed = true;
-
- drm_connector_init(drm, connector, &zx_tvenc_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
- drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
-
- drm_connector_attach_encoder(connector, encoder);
-
- return 0;
-}
-
-static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc)
-{
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
- struct device *dev = tvenc->dev;
- struct of_phandle_args out_args;
- struct regmap *regmap;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,tvenc-power-control", 2, 0, &out_args);
- if (ret)
- return ret;
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto out;
- }
-
- pwrctrl->regmap = regmap;
- pwrctrl->reg = out_args.args[0];
- pwrctrl->mask = out_args.args[1];
-
-out:
- of_node_put(out_args.np);
- return ret;
-}
-
-static int zx_tvenc_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_tvenc *tvenc;
- int ret;
-
- tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL);
- if (!tvenc)
- return -ENOMEM;
-
- tvenc->dev = dev;
- dev_set_drvdata(dev, tvenc);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tvenc->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(tvenc->mmio)) {
- ret = PTR_ERR(tvenc->mmio);
- DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret);
- return ret;
- }
-
- ret = zx_tvenc_pwrctrl_init(tvenc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
- return ret;
- }
-
- ret = zx_tvenc_register(drm, tvenc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void zx_tvenc_unbind(struct device *dev, struct device *master,
- void *data)
-{
- /* Nothing to do */
-}
-
-static const struct component_ops zx_tvenc_component_ops = {
- .bind = zx_tvenc_bind,
- .unbind = zx_tvenc_unbind,
-};
-
-static int zx_tvenc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_tvenc_component_ops);
-}
-
-static int zx_tvenc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_tvenc_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_tvenc_of_match[] = {
- { .compatible = "zte,zx296718-tvenc", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_tvenc_of_match);
-
-struct platform_driver zx_tvenc_driver = {
- .probe = zx_tvenc_probe,
- .remove = zx_tvenc_remove,
- .driver = {
- .name = "zx-tvenc",
- .of_match_table = zx_tvenc_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h
deleted file mode 100644
index 40f033109374..000000000000
--- a/drivers/gpu/drm/zte/zx_tvenc_regs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2017 Linaro Ltd.
- * Copyright 2017 ZTE Corporation.
- */
-
-#ifndef __ZX_TVENC_REGS_H__
-#define __ZX_TVENC_REGS_H__
-
-#define VENC_VIDEO_INFO 0x04
-#define VENC_VIDEO_RES 0x08
-#define VENC_FIELD1_PARAM 0x10
-#define VENC_FIELD2_PARAM 0x14
-#define VENC_LINE_O_1 0x18
-#define VENC_LINE_E_1 0x1c
-#define VENC_LINE_O_2 0x20
-#define VENC_LINE_E_2 0x24
-#define VENC_LINE_TIMING_PARAM 0x28
-#define VENC_WEIGHT_VALUE 0x2c
-#define VENC_BLANK_BLACK_LEVEL 0x30
-#define VENC_BURST_LEVEL 0x34
-#define VENC_CONTROL_PARAM 0x3c
-#define VENC_SUB_CARRIER_PHASE1 0x40
-#define VENC_PHASE_LINE_INCR_CVBS 0x48
-#define VENC_ENABLE 0xa8
-
-#endif /* __ZX_TVENC_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
deleted file mode 100644
index 0f9bbb7e3b8d..000000000000
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ /dev/null
@@ -1,527 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "zx_drm_drv.h"
-#include "zx_vga_regs.h"
-#include "zx_vou.h"
-
-struct zx_vga_pwrctrl {
- struct regmap *regmap;
- u32 reg;
- u32 mask;
-};
-
-struct zx_vga_i2c {
- struct i2c_adapter adap;
- struct mutex lock;
-};
-
-struct zx_vga {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct zx_vga_i2c *ddc;
- struct device *dev;
- void __iomem *mmio;
- struct clk *i2c_wclk;
- struct zx_vga_pwrctrl pwrctrl;
- struct completion complete;
- bool connected;
-};
-
-#define to_zx_vga(x) container_of(x, struct zx_vga, x)
-
-static void zx_vga_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_vga *vga = to_zx_vga(encoder);
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
-
- /* Set bit to power up VGA DACs */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
- pwrctrl->mask);
-
- vou_inf_enable(VOU_VGA, encoder->crtc);
-}
-
-static void zx_vga_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_vga *vga = to_zx_vga(encoder);
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
-
- vou_inf_disable(VOU_VGA, encoder->crtc);
-
- /* Clear bit to power down VGA DACs */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
-}
-
-static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
- .enable = zx_vga_encoder_enable,
- .disable = zx_vga_encoder_disable,
-};
-
-static int zx_vga_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_vga *vga = to_zx_vga(connector);
- struct edid *edid;
- int ret;
-
- /*
- * Clear both detection bits to switch I2C bus from device
- * detecting to EDID reading.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, 0);
-
- edid = drm_get_edid(connector, &vga->ddc->adap);
- if (!edid) {
- /*
- * If EDID reading fails, we set the device state into
- * disconnected. Locking is not required here, since the
- * VGA_AUTO_DETECT_SEL register write in irq handler cannot
- * be triggered when both detection bits are cleared as above.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL,
- VGA_DETECT_SEL_NO_DEVICE);
- vga->connected = false;
- return 0;
- }
-
- /*
- * As edid reading succeeds, device must be connected, so we set
- * up detection bit for unplug interrupt here.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return ret;
-}
-
-static enum drm_mode_status
-zx_vga_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_vga_connector_helper_funcs = {
- .get_modes = zx_vga_connector_get_modes,
- .mode_valid = zx_vga_connector_mode_valid,
-};
-
-static enum drm_connector_status
-zx_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct zx_vga *vga = to_zx_vga(connector);
-
- return vga->connected ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static const struct drm_connector_funcs zx_vga_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = zx_vga_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
-{
- struct drm_encoder *encoder = &vga->encoder;
- struct drm_connector *connector = &vga->connector;
- struct device *dev = vga->dev;
- int ret;
-
- encoder->possible_crtcs = VOU_CRTC_MASK;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
- return ret;
- }
-
- drm_encoder_helper_add(encoder, &zx_vga_encoder_helper_funcs);
-
- vga->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init_with_ddc(drm, connector,
- &zx_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &vga->ddc->adap);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret);
- goto clean_encoder;
- }
-
- drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
- goto clean_connector;
- }
-
- return 0;
-
-clean_connector:
- drm_connector_cleanup(connector);
-clean_encoder:
- drm_encoder_cleanup(encoder);
- return ret;
-}
-
-static int zx_vga_pwrctrl_init(struct zx_vga *vga)
-{
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
- struct device *dev = vga->dev;
- struct of_phandle_args out_args;
- struct regmap *regmap;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,vga-power-control", 2, 0, &out_args);
- if (ret)
- return ret;
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto out;
- }
-
- pwrctrl->regmap = regmap;
- pwrctrl->reg = out_args.args[0];
- pwrctrl->mask = out_args.args[1];
-
-out:
- of_node_put(out_args.np);
- return ret;
-}
-
-static int zx_vga_i2c_read(struct zx_vga *vga, struct i2c_msg *msg)
-{
- int len = msg->len;
- u8 *buf = msg->buf;
- u32 offset = 0;
- int i;
-
- reinit_completion(&vga->complete);
-
- /* Select combo write */
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_COMBO, VGA_CMD_COMBO);
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_RW, 0);
-
- while (len > 0) {
- u32 cnt;
-
- /* Clear RX FIFO */
- zx_writel_mask(vga->mmio + VGA_RXF_CTRL, VGA_RX_FIFO_CLEAR,
- VGA_RX_FIFO_CLEAR);
-
- /* Data offset to read from */
- zx_writel(vga->mmio + VGA_SUB_ADDR, offset);
-
- /* Kick off the transfer */
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS,
- VGA_CMD_TRANS);
-
- if (!wait_for_completion_timeout(&vga->complete,
- msecs_to_jiffies(1000))) {
- DRM_DEV_ERROR(vga->dev, "transfer timeout\n");
- return -ETIMEDOUT;
- }
-
- cnt = zx_readl(vga->mmio + VGA_RXF_STATUS);
- cnt = (cnt & VGA_RXF_COUNT_MASK) >> VGA_RXF_COUNT_SHIFT;
- /* FIFO status may report more data than we need to read */
- cnt = min_t(u32, len, cnt);
-
- for (i = 0; i < cnt; i++)
- *buf++ = zx_readl(vga->mmio + VGA_DATA);
-
- len -= cnt;
- offset += cnt;
- }
-
- return 0;
-}
-
-static int zx_vga_i2c_write(struct zx_vga *vga, struct i2c_msg *msg)
-{
- /*
- * The DDC I2C adapter is only for reading EDID data, so we assume
- * that the write to this adapter must be the EDID data offset.
- */
- if ((msg->len != 1) || ((msg->addr != DDC_ADDR)))
- return -EINVAL;
-
- /* Hardware will take care of the slave address shifting */
- zx_writel(vga->mmio + VGA_DEVICE_ADDR, msg->addr);
-
- return 0;
-}
-
-static int zx_vga_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct zx_vga *vga = i2c_get_adapdata(adap);
- struct zx_vga_i2c *ddc = vga->ddc;
- int ret = 0;
- int i;
-
- mutex_lock(&ddc->lock);
-
- for (i = 0; i < num; i++) {
- if (msgs[i].flags & I2C_M_RD)
- ret = zx_vga_i2c_read(vga, &msgs[i]);
- else
- ret = zx_vga_i2c_write(vga, &msgs[i]);
-
- if (ret < 0)
- break;
- }
-
- if (!ret)
- ret = num;
-
- mutex_unlock(&ddc->lock);
-
- return ret;
-}
-
-static u32 zx_vga_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm zx_vga_algorithm = {
- .master_xfer = zx_vga_i2c_xfer,
- .functionality = zx_vga_i2c_func,
-};
-
-static int zx_vga_ddc_register(struct zx_vga *vga)
-{
- struct device *dev = vga->dev;
- struct i2c_adapter *adap;
- struct zx_vga_i2c *ddc;
- int ret;
-
- ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL);
- if (!ddc)
- return -ENOMEM;
-
- vga->ddc = ddc;
- mutex_init(&ddc->lock);
-
- adap = &ddc->adap;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
- adap->dev.parent = dev;
- adap->algo = &zx_vga_algorithm;
- snprintf(adap->name, sizeof(adap->name), "zx vga i2c");
-
- ret = i2c_add_adapter(adap);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to add I2C adapter: %d\n", ret);
- return ret;
- }
-
- i2c_set_adapdata(adap, vga);
-
- return 0;
-}
-
-static irqreturn_t zx_vga_irq_thread(int irq, void *dev_id)
-{
- struct zx_vga *vga = dev_id;
-
- drm_helper_hpd_irq_event(vga->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t zx_vga_irq_handler(int irq, void *dev_id)
-{
- struct zx_vga *vga = dev_id;
- u32 status;
-
- status = zx_readl(vga->mmio + VGA_I2C_STATUS);
-
- /* Clear interrupt status */
- zx_writel_mask(vga->mmio + VGA_I2C_STATUS, VGA_CLEAR_IRQ,
- VGA_CLEAR_IRQ);
-
- if (status & VGA_DEVICE_CONNECTED) {
- /*
- * Since VGA_DETECT_SEL bits need to be reset for switching DDC
- * bus from device detection to EDID read, rather than setting
- * up HAS_DEVICE bit here, we need to do that in .get_modes
- * hook for unplug detecting after EDID read succeeds.
- */
- vga->connected = true;
- return IRQ_WAKE_THREAD;
- }
-
- if (status & VGA_DEVICE_DISCONNECTED) {
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL,
- VGA_DETECT_SEL_NO_DEVICE);
- vga->connected = false;
- return IRQ_WAKE_THREAD;
- }
-
- if (status & VGA_TRANS_DONE) {
- complete(&vga->complete);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void zx_vga_hw_init(struct zx_vga *vga)
-{
- unsigned long ref = clk_get_rate(vga->i2c_wclk);
- int div;
-
- /*
- * Set up I2C fast speed divider per formula below to get 400kHz.
- * scl = ref / ((div + 1) * 4)
- */
- div = DIV_ROUND_UP(ref / 1000, 400 * 4) - 1;
- zx_writel(vga->mmio + VGA_CLK_DIV_FS, div);
-
- /* Set up device detection */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_PARA, 0x80);
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_NO_DEVICE);
-
- /*
- * We need to poke monitor via DDC bus to get connection irq
- * start working.
- */
- zx_writel(vga->mmio + VGA_DEVICE_ADDR, DDC_ADDR);
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS, VGA_CMD_TRANS);
-}
-
-static int zx_vga_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_vga *vga;
- int irq;
- int ret;
-
- vga = devm_kzalloc(dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
-
- vga->dev = dev;
- dev_set_drvdata(dev, vga);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vga->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(vga->mmio))
- return PTR_ERR(vga->mmio);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- vga->i2c_wclk = devm_clk_get(dev, "i2c_wclk");
- if (IS_ERR(vga->i2c_wclk)) {
- ret = PTR_ERR(vga->i2c_wclk);
- DRM_DEV_ERROR(dev, "failed to get i2c_wclk: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_pwrctrl_init(vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_ddc_register(vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_register(drm, vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register vga: %d\n", ret);
- return ret;
- }
-
- init_completion(&vga->complete);
-
- ret = devm_request_threaded_irq(dev, irq, zx_vga_irq_handler,
- zx_vga_irq_thread, IRQF_SHARED,
- dev_name(dev), vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(vga->i2c_wclk);
- if (ret)
- return ret;
-
- zx_vga_hw_init(vga);
-
- return 0;
-}
-
-static void zx_vga_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_vga *vga = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vga->i2c_wclk);
-}
-
-static const struct component_ops zx_vga_component_ops = {
- .bind = zx_vga_bind,
- .unbind = zx_vga_unbind,
-};
-
-static int zx_vga_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_vga_component_ops);
-}
-
-static int zx_vga_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_vga_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_vga_of_match[] = {
- { .compatible = "zte,zx296718-vga", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_vga_of_match);
-
-struct platform_driver zx_vga_driver = {
- .probe = zx_vga_probe,
- .remove = zx_vga_remove,
- .driver = {
- .name = "zx-vga",
- .of_match_table = zx_vga_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_vga_regs.h b/drivers/gpu/drm/zte/zx_vga_regs.h
deleted file mode 100644
index 1e8825ae70a5..000000000000
--- a/drivers/gpu/drm/zte/zx_vga_regs.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __ZX_VGA_REGS_H__
-#define __ZX_VGA_REGS_H__
-
-#define VGA_CMD_CFG 0x04
-#define VGA_CMD_TRANS BIT(6)
-#define VGA_CMD_COMBO BIT(5)
-#define VGA_CMD_RW BIT(4)
-#define VGA_SUB_ADDR 0x0c
-#define VGA_DEVICE_ADDR 0x10
-#define VGA_CLK_DIV_FS 0x14
-#define VGA_RXF_CTRL 0x20
-#define VGA_RX_FIFO_CLEAR BIT(7)
-#define VGA_DATA 0x24
-#define VGA_I2C_STATUS 0x28
-#define VGA_DEVICE_DISCONNECTED BIT(7)
-#define VGA_DEVICE_CONNECTED BIT(6)
-#define VGA_CLEAR_IRQ BIT(4)
-#define VGA_TRANS_DONE BIT(0)
-#define VGA_RXF_STATUS 0x30
-#define VGA_RXF_COUNT_SHIFT 2
-#define VGA_RXF_COUNT_MASK GENMASK(7, 2)
-#define VGA_AUTO_DETECT_PARA 0x34
-#define VGA_AUTO_DETECT_SEL 0x38
-#define VGA_DETECT_SEL_HAS_DEVICE BIT(1)
-#define VGA_DETECT_SEL_NO_DEVICE BIT(0)
-
-#endif /* __ZX_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
deleted file mode 100644
index 904f62f3bfc1..000000000000
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ /dev/null
@@ -1,921 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "zx_common_regs.h"
-#include "zx_drm_drv.h"
-#include "zx_plane.h"
-#include "zx_vou.h"
-#include "zx_vou_regs.h"
-
-#define GL_NUM 2
-#define VL_NUM 3
-
-enum vou_chn_type {
- VOU_CHN_MAIN,
- VOU_CHN_AUX,
-};
-
-struct zx_crtc_regs {
- u32 fir_active;
- u32 fir_htiming;
- u32 fir_vtiming;
- u32 sec_vtiming;
- u32 timing_shift;
- u32 timing_pi_shift;
-};
-
-static const struct zx_crtc_regs main_crtc_regs = {
- .fir_active = FIR_MAIN_ACTIVE,
- .fir_htiming = FIR_MAIN_H_TIMING,
- .fir_vtiming = FIR_MAIN_V_TIMING,
- .sec_vtiming = SEC_MAIN_V_TIMING,
- .timing_shift = TIMING_MAIN_SHIFT,
- .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
-};
-
-static const struct zx_crtc_regs aux_crtc_regs = {
- .fir_active = FIR_AUX_ACTIVE,
- .fir_htiming = FIR_AUX_H_TIMING,
- .fir_vtiming = FIR_AUX_V_TIMING,
- .sec_vtiming = SEC_AUX_V_TIMING,
- .timing_shift = TIMING_AUX_SHIFT,
- .timing_pi_shift = TIMING_AUX_PI_SHIFT,
-};
-
-struct zx_crtc_bits {
- u32 polarity_mask;
- u32 polarity_shift;
- u32 int_frame_mask;
- u32 tc_enable;
- u32 sec_vactive_shift;
- u32 sec_vactive_mask;
- u32 interlace_select;
- u32 pi_enable;
- u32 div_vga_shift;
- u32 div_pic_shift;
- u32 div_tvenc_shift;
- u32 div_hdmi_pnx_shift;
- u32 div_hdmi_shift;
- u32 div_inf_shift;
- u32 div_layer_shift;
-};
-
-static const struct zx_crtc_bits main_crtc_bits = {
- .polarity_mask = MAIN_POL_MASK,
- .polarity_shift = MAIN_POL_SHIFT,
- .int_frame_mask = TIMING_INT_MAIN_FRAME,
- .tc_enable = MAIN_TC_EN,
- .sec_vactive_shift = SEC_VACT_MAIN_SHIFT,
- .sec_vactive_mask = SEC_VACT_MAIN_MASK,
- .interlace_select = MAIN_INTERLACE_SEL,
- .pi_enable = MAIN_PI_EN,
- .div_vga_shift = VGA_MAIN_DIV_SHIFT,
- .div_pic_shift = PIC_MAIN_DIV_SHIFT,
- .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT,
- .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT,
- .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT,
- .div_inf_shift = INF_MAIN_DIV_SHIFT,
- .div_layer_shift = LAYER_MAIN_DIV_SHIFT,
-};
-
-static const struct zx_crtc_bits aux_crtc_bits = {
- .polarity_mask = AUX_POL_MASK,
- .polarity_shift = AUX_POL_SHIFT,
- .int_frame_mask = TIMING_INT_AUX_FRAME,
- .tc_enable = AUX_TC_EN,
- .sec_vactive_shift = SEC_VACT_AUX_SHIFT,
- .sec_vactive_mask = SEC_VACT_AUX_MASK,
- .interlace_select = AUX_INTERLACE_SEL,
- .pi_enable = AUX_PI_EN,
- .div_vga_shift = VGA_AUX_DIV_SHIFT,
- .div_pic_shift = PIC_AUX_DIV_SHIFT,
- .div_tvenc_shift = TVENC_AUX_DIV_SHIFT,
- .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT,
- .div_hdmi_shift = HDMI_AUX_DIV_SHIFT,
- .div_inf_shift = INF_AUX_DIV_SHIFT,
- .div_layer_shift = LAYER_AUX_DIV_SHIFT,
-};
-
-struct zx_crtc {
- struct drm_crtc crtc;
- struct drm_plane *primary;
- struct zx_vou_hw *vou;
- void __iomem *chnreg;
- void __iomem *chncsc;
- void __iomem *dither;
- const struct zx_crtc_regs *regs;
- const struct zx_crtc_bits *bits;
- enum vou_chn_type chn_type;
- struct clk *pixclk;
-};
-
-#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
-
-struct vou_layer_bits {
- u32 enable;
- u32 chnsel;
- u32 clksel;
-};
-
-static const struct vou_layer_bits zx_gl_bits[GL_NUM] = {
- {
- .enable = OSD_CTRL0_GL0_EN,
- .chnsel = OSD_CTRL0_GL0_SEL,
- .clksel = VOU_CLK_GL0_SEL,
- }, {
- .enable = OSD_CTRL0_GL1_EN,
- .chnsel = OSD_CTRL0_GL1_SEL,
- .clksel = VOU_CLK_GL1_SEL,
- },
-};
-
-static const struct vou_layer_bits zx_vl_bits[VL_NUM] = {
- {
- .enable = OSD_CTRL0_VL0_EN,
- .chnsel = OSD_CTRL0_VL0_SEL,
- .clksel = VOU_CLK_VL0_SEL,
- }, {
- .enable = OSD_CTRL0_VL1_EN,
- .chnsel = OSD_CTRL0_VL1_SEL,
- .clksel = VOU_CLK_VL1_SEL,
- }, {
- .enable = OSD_CTRL0_VL2_EN,
- .chnsel = OSD_CTRL0_VL2_SEL,
- .clksel = VOU_CLK_VL2_SEL,
- },
-};
-
-struct zx_vou_hw {
- struct device *dev;
- void __iomem *osd;
- void __iomem *timing;
- void __iomem *vouctl;
- void __iomem *otfppu;
- void __iomem *dtrc;
- struct clk *axi_clk;
- struct clk *ppu_clk;
- struct clk *main_clk;
- struct clk *aux_clk;
- struct zx_crtc *main_crtc;
- struct zx_crtc *aux_crtc;
-};
-
-enum vou_inf_data_sel {
- VOU_YUV444 = 0,
- VOU_RGB_101010 = 1,
- VOU_RGB_888 = 2,
- VOU_RGB_666 = 3,
-};
-
-struct vou_inf {
- enum vou_inf_id id;
- enum vou_inf_data_sel data_sel;
- u32 clocks_en_bits;
- u32 clocks_sel_bits;
-};
-
-static struct vou_inf vou_infs[] = {
- [VOU_HDMI] = {
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
- .clocks_sel_bits = BIT(13) | BIT(2),
- },
- [VOU_TV_ENC] = {
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(15),
- .clocks_sel_bits = BIT(11) | BIT(0),
- },
- [VOU_VGA] = {
- .data_sel = VOU_RGB_888,
- .clocks_en_bits = BIT(1),
- .clocks_sel_bits = BIT(10),
- },
-};
-
-static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
-
- return zcrtc->vou;
-}
-
-void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
- enum vou_inf_hdmi_audio aud)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
-
- zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud);
-}
-
-void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct vou_inf *inf = &vou_infs[id];
- void __iomem *dither = zcrtc->dither;
- void __iomem *csc = zcrtc->chncsc;
- bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
- u32 data_sel_shift = id << 1;
-
- if (inf->data_sel != VOU_YUV444) {
- /* Enable channel CSC for RGB output */
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT709_IMAGE_YCBCR2RGB << CSC_COV_MODE_SHIFT);
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE,
- CSC_WORK_ENABLE);
-
- /* Bypass Dither block for RGB output */
- zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS,
- DITHER_BYSPASS);
- } else {
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, 0);
- zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS, 0);
- }
-
- /* Select data format */
- zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
- inf->data_sel << data_sel_shift);
-
- /* Select channel */
- zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id,
- zcrtc->chn_type << id);
-
- /* Select interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
- is_main ? 0 : inf->clocks_sel_bits);
-
- /* Enable interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
- inf->clocks_en_bits);
-
- /* Enable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id);
-}
-
-void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc)
-{
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
- struct vou_inf *inf = &vou_infs[id];
-
- /* Disable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0);
-
- /* Disable interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
-}
-
-void zx_vou_config_dividers(struct drm_crtc *crtc,
- struct vou_div_config *configs, int num)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- const struct zx_crtc_bits *bits = zcrtc->bits;
- int i;
-
- /* Clear update flag bit */
- zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0);
-
- for (i = 0; i < num; i++) {
- struct vou_div_config *cfg = configs + i;
- u32 reg, shift;
-
- switch (cfg->id) {
- case VOU_DIV_VGA:
- reg = VOU_CLK_SEL;
- shift = bits->div_vga_shift;
- break;
- case VOU_DIV_PIC:
- reg = VOU_CLK_SEL;
- shift = bits->div_pic_shift;
- break;
- case VOU_DIV_TVENC:
- reg = VOU_DIV_PARA;
- shift = bits->div_tvenc_shift;
- break;
- case VOU_DIV_HDMI_PNX:
- reg = VOU_DIV_PARA;
- shift = bits->div_hdmi_pnx_shift;
- break;
- case VOU_DIV_HDMI:
- reg = VOU_DIV_PARA;
- shift = bits->div_hdmi_shift;
- break;
- case VOU_DIV_INF:
- reg = VOU_DIV_PARA;
- shift = bits->div_inf_shift;
- break;
- case VOU_DIV_LAYER:
- reg = VOU_DIV_PARA;
- shift = bits->div_layer_shift;
- break;
- default:
- continue;
- }
-
- /* Each divider occupies 3 bits */
- zx_writel_mask(vou->vouctl + reg, 0x7 << shift,
- cfg->val << shift);
- }
-
- /* Set update flag bit to get dividers effected */
- zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE,
- DIV_PARA_UPDATE);
-}
-
-static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
-{
- zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
-}
-
-static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- const struct zx_crtc_regs *regs = zcrtc->regs;
- const struct zx_crtc_bits *bits = zcrtc->bits;
- struct videomode vm;
- u32 scan_mask;
- u32 pol = 0;
- u32 val;
- int ret;
-
- drm_display_mode_to_videomode(mode, &vm);
-
- /* Set up timing parameters */
- val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1);
- val |= H_ACTIVE(vm.hactive - 1);
- zx_writel(vou->timing + regs->fir_active, val);
-
- val = SYNC_WIDE(vm.hsync_len - 1);
- val |= BACK_PORCH(vm.hback_porch - 1);
- val |= FRONT_PORCH(vm.hfront_porch - 1);
- zx_writel(vou->timing + regs->fir_htiming, val);
-
- val = SYNC_WIDE(vm.vsync_len - 1);
- val |= BACK_PORCH(vm.vback_porch - 1);
- val |= FRONT_PORCH(vm.vfront_porch - 1);
- zx_writel(vou->timing + regs->fir_vtiming, val);
-
- if (interlaced) {
- u32 shift = bits->sec_vactive_shift;
- u32 mask = bits->sec_vactive_mask;
-
- val = zx_readl(vou->timing + SEC_V_ACTIVE);
- val &= ~mask;
- val |= ((vm.vactive / 2 - 1) << shift) & mask;
- zx_writel(vou->timing + SEC_V_ACTIVE, val);
-
- val = SYNC_WIDE(vm.vsync_len - 1);
- /*
- * The vback_porch for the second field needs to shift one on
- * the value for the first field.
- */
- val |= BACK_PORCH(vm.vback_porch);
- val |= FRONT_PORCH(vm.vfront_porch - 1);
- zx_writel(vou->timing + regs->sec_vtiming, val);
- }
-
- /* Set up polarities */
- if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
- pol |= 1 << POL_VSYNC_SHIFT;
- if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
- pol |= 1 << POL_HSYNC_SHIFT;
-
- zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
- pol << bits->polarity_shift);
-
- /* Setup SHIFT register by following what ZTE BSP does */
- val = H_SHIFT_VAL;
- if (interlaced)
- val |= V_SHIFT_VAL << 16;
- zx_writel(vou->timing + regs->timing_shift, val);
- zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
-
- /* Progressive or interlace scan select */
- scan_mask = bits->interlace_select | bits->pi_enable;
- zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask,
- interlaced ? scan_mask : 0);
-
- /* Enable TIMING_CTRL */
- zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
- bits->tc_enable);
-
- /* Configure channel screen size */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
- vm.hactive << CHN_SCREEN_W_SHIFT);
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
- vm.vactive << CHN_SCREEN_H_SHIFT);
-
- /* Configure channel interlace buffer control */
- zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN,
- interlaced ? CHN_INTERLACE_EN : 0);
-
- /* Update channel */
- vou_chn_set_update(zcrtc);
-
- /* Enable channel */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
-
- drm_crtc_vblank_on(crtc);
-
- ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
- if (ret) {
- DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(zcrtc->pixclk);
- if (ret)
- DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
-}
-
-static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- const struct zx_crtc_bits *bits = zcrtc->bits;
- struct zx_vou_hw *vou = zcrtc->vou;
-
- clk_disable_unprepare(zcrtc->pixclk);
-
- drm_crtc_vblank_off(crtc);
-
- /* Disable channel */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
-
- /* Disable TIMING_CTRL */
- zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
-}
-
-static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (!event)
- return;
-
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
-}
-
-static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
- .atomic_flush = zx_crtc_atomic_flush,
- .atomic_enable = zx_crtc_atomic_enable,
- .atomic_disable = zx_crtc_atomic_disable,
-};
-
-static int zx_vou_enable_vblank(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
- u32 int_frame_mask = zcrtc->bits->int_frame_mask;
-
- zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
- int_frame_mask);
-
- return 0;
-}
-
-static void zx_vou_disable_vblank(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
-
- zx_writel_mask(vou->timing + TIMING_INT_CTRL,
- zcrtc->bits->int_frame_mask, 0);
-}
-
-static const struct drm_crtc_funcs zx_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
- .enable_vblank = zx_vou_enable_vblank,
- .disable_vblank = zx_vou_disable_vblank,
-};
-
-static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
- enum vou_chn_type chn_type)
-{
- struct device *dev = vou->dev;
- struct zx_plane *zplane;
- struct zx_crtc *zcrtc;
- int ret;
-
- zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
- if (!zcrtc)
- return -ENOMEM;
-
- zcrtc->vou = vou;
- zcrtc->chn_type = chn_type;
-
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane)
- return -ENOMEM;
-
- zplane->dev = dev;
-
- if (chn_type == VOU_CHN_MAIN) {
- zplane->layer = vou->osd + MAIN_GL_OFFSET;
- zplane->csc = vou->osd + MAIN_GL_CSC_OFFSET;
- zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET;
- zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET;
- zplane->bits = &zx_gl_bits[0];
- zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
- zcrtc->chncsc = vou->osd + MAIN_CHN_CSC_OFFSET;
- zcrtc->dither = vou->osd + MAIN_DITHER_OFFSET;
- zcrtc->regs = &main_crtc_regs;
- zcrtc->bits = &main_crtc_bits;
- } else {
- zplane->layer = vou->osd + AUX_GL_OFFSET;
- zplane->csc = vou->osd + AUX_GL_CSC_OFFSET;
- zplane->hbsc = vou->osd + AUX_HBSC_OFFSET;
- zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET;
- zplane->bits = &zx_gl_bits[1];
- zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
- zcrtc->chncsc = vou->osd + AUX_CHN_CSC_OFFSET;
- zcrtc->dither = vou->osd + AUX_DITHER_OFFSET;
- zcrtc->regs = &aux_crtc_regs;
- zcrtc->bits = &aux_crtc_bits;
- }
-
- zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
- "main_wclk" : "aux_wclk");
- if (IS_ERR(zcrtc->pixclk)) {
- ret = PTR_ERR(zcrtc->pixclk);
- DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
- return ret;
- }
-
- ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
- return ret;
- }
-
- zcrtc->primary = &zplane->plane;
-
- ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
- &zx_crtc_funcs, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
- return ret;
- }
-
- drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
-
- if (chn_type == VOU_CHN_MAIN)
- vou->main_crtc = zcrtc;
- else
- vou->aux_crtc = zcrtc;
-
- return 0;
-}
-
-void zx_vou_layer_enable(struct drm_plane *plane)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct zx_plane *zplane = to_zx_plane(plane);
- const struct vou_layer_bits *bits = zplane->bits;
-
- if (zcrtc->chn_type == VOU_CHN_MAIN) {
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0);
- } else {
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel,
- bits->chnsel);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel,
- bits->clksel);
- }
-
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
-}
-
-void zx_vou_layer_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct zx_plane *zplane = to_zx_plane(plane);
- const struct vou_layer_bits *bits = zplane->bits;
-
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0);
-}
-
-static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou)
-{
- struct device *dev = vou->dev;
- struct zx_plane *zplane;
- int i;
- int ret;
-
- /*
- * VL0 has some quirks on scaling support which need special handling.
- * Let's leave it out for now.
- */
- for (i = 1; i < VL_NUM; i++) {
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane) {
- DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i);
- return;
- }
-
- zplane->layer = vou->osd + OSD_VL_OFFSET(i);
- zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i);
- zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i);
- zplane->bits = &zx_vl_bits[i];
-
- ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i);
- continue;
- }
- }
-}
-
-static inline void zx_osd_int_update(struct zx_crtc *zcrtc)
-{
- struct drm_crtc *crtc = &zcrtc->crtc;
- struct drm_plane *plane;
-
- vou_chn_set_update(zcrtc);
-
- drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask)
- zx_plane_set_update(plane);
-}
-
-static irqreturn_t vou_irq_handler(int irq, void *dev_id)
-{
- struct zx_vou_hw *vou = dev_id;
- u32 state;
-
- /* Handle TIMING_CTRL frame interrupts */
- state = zx_readl(vou->timing + TIMING_INT_STATE);
- zx_writel(vou->timing + TIMING_INT_STATE, state);
-
- if (state & TIMING_INT_MAIN_FRAME)
- drm_crtc_handle_vblank(&vou->main_crtc->crtc);
-
- if (state & TIMING_INT_AUX_FRAME)
- drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
-
- /* Handle OSD interrupts */
- state = zx_readl(vou->osd + OSD_INT_STA);
- zx_writel(vou->osd + OSD_INT_CLRSTA, state);
-
- if (state & OSD_INT_MAIN_UPT)
- zx_osd_int_update(vou->main_crtc);
-
- if (state & OSD_INT_AUX_UPT)
- zx_osd_int_update(vou->aux_crtc);
-
- if (state & OSD_INT_ERROR)
- DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
-
- return IRQ_HANDLED;
-}
-
-static void vou_dtrc_init(struct zx_vou_hw *vou)
-{
- /* Clear bit for bypass by ID */
- zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
- TILE2RASTESCAN_BYPASS_MODE, 0);
-
- /* Select ARIDR mode */
- zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
- DETILE_ARID_IN_ARIDR);
-
- /* Bypass decompression for both frames */
- zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
- DTRC_DECOMPRESS_BYPASS);
- zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
- DTRC_DECOMPRESS_BYPASS);
-
- /* Set up ARID register */
- zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
- DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
-}
-
-static void vou_hw_init(struct zx_vou_hw *vou)
-{
- /* Release reset for all VOU modules */
- zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
-
- /* Enable all VOU module clocks */
- zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
-
- /* Clear both OSD and TIMING_CTRL interrupt state */
- zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
- zx_writel(vou->timing + TIMING_INT_STATE, ~0);
-
- /* Enable OSD and TIMING_CTRL interrrupts */
- zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
- zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
-
- /* Select GPC as input to gl/vl scaler as a sane default setting */
- zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
-
- /*
- * Needs to reset channel and layer logic per frame when frame starts
- * to get VOU work properly.
- */
- zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
-
- vou_dtrc_init(vou);
-}
-
-static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct zx_vou_hw *vou;
- struct resource *res;
- int irq;
- int ret;
-
- vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
- if (!vou)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
- vou->osd = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->osd)) {
- ret = PTR_ERR(vou->osd);
- DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
- vou->timing = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->timing)) {
- ret = PTR_ERR(vou->timing);
- DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
- ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
- vou->dtrc = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->dtrc)) {
- ret = PTR_ERR(vou->dtrc);
- DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
- vou->vouctl = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->vouctl)) {
- ret = PTR_ERR(vou->vouctl);
- DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
- ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
- vou->otfppu = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->otfppu)) {
- ret = PTR_ERR(vou->otfppu);
- DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- vou->axi_clk = devm_clk_get(dev, "aclk");
- if (IS_ERR(vou->axi_clk)) {
- ret = PTR_ERR(vou->axi_clk);
- DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
- return ret;
- }
-
- vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
- if (IS_ERR(vou->ppu_clk)) {
- ret = PTR_ERR(vou->ppu_clk);
- DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(vou->axi_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
- return ret;
- }
-
- clk_prepare_enable(vou->ppu_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
- goto disable_axi_clk;
- }
-
- vou->dev = dev;
- dev_set_drvdata(dev, vou);
-
- vou_hw_init(vou);
-
- ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
- goto disable_ppu_clk;
- }
-
- ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
- ret);
- goto disable_ppu_clk;
- }
-
- ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
- ret);
- goto disable_ppu_clk;
- }
-
- zx_overlay_init(drm, vou);
-
- return 0;
-
-disable_ppu_clk:
- clk_disable_unprepare(vou->ppu_clk);
-disable_axi_clk:
- clk_disable_unprepare(vou->axi_clk);
- return ret;
-}
-
-static void zx_crtc_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_vou_hw *vou = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vou->axi_clk);
- clk_disable_unprepare(vou->ppu_clk);
-}
-
-static const struct component_ops zx_crtc_component_ops = {
- .bind = zx_crtc_bind,
- .unbind = zx_crtc_unbind,
-};
-
-static int zx_crtc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_crtc_component_ops);
-}
-
-static int zx_crtc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_crtc_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_crtc_of_match[] = {
- { .compatible = "zte,zx296718-dpc", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
-
-struct platform_driver zx_crtc_driver = {
- .probe = zx_crtc_probe,
- .remove = zx_crtc_remove,
- .driver = {
- .name = "zx-crtc",
- .of_match_table = zx_crtc_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
deleted file mode 100644
index b25f34f865ae..000000000000
--- a/drivers/gpu/drm/zte/zx_vou.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_VOU_H__
-#define __ZX_VOU_H__
-
-#define VOU_CRTC_MASK 0x3
-
-/* VOU output interfaces */
-enum vou_inf_id {
- VOU_HDMI = 0,
- VOU_RGB_LCD = 1,
- VOU_TV_ENC = 2,
- VOU_MIPI_DSI = 3,
- VOU_LVDS = 4,
- VOU_VGA = 5,
-};
-
-enum vou_inf_hdmi_audio {
- VOU_HDMI_AUD_SPDIF = BIT(0),
- VOU_HDMI_AUD_I2S = BIT(1),
- VOU_HDMI_AUD_DSD = BIT(2),
- VOU_HDMI_AUD_HBR = BIT(3),
- VOU_HDMI_AUD_PARALLEL = BIT(4),
-};
-
-void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
- enum vou_inf_hdmi_audio aud);
-void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc);
-void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc);
-
-enum vou_div_id {
- VOU_DIV_VGA,
- VOU_DIV_PIC,
- VOU_DIV_TVENC,
- VOU_DIV_HDMI_PNX,
- VOU_DIV_HDMI,
- VOU_DIV_INF,
- VOU_DIV_LAYER,
-};
-
-enum vou_div_val {
- VOU_DIV_1 = 0,
- VOU_DIV_2 = 1,
- VOU_DIV_4 = 3,
- VOU_DIV_8 = 7,
-};
-
-struct vou_div_config {
- enum vou_div_id id;
- enum vou_div_val val;
-};
-
-void zx_vou_config_dividers(struct drm_crtc *crtc,
- struct vou_div_config *configs, int num);
-
-void zx_vou_layer_enable(struct drm_plane *plane);
-void zx_vou_layer_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state);
-
-#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
deleted file mode 100644
index 2ddb199cb912..000000000000
--- a/drivers/gpu/drm/zte/zx_vou_regs.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_VOU_REGS_H__
-#define __ZX_VOU_REGS_H__
-
-/* Sub-module offset */
-#define MAIN_GL_OFFSET 0x130
-#define MAIN_GL_CSC_OFFSET 0x580
-#define MAIN_CHN_CSC_OFFSET 0x6c0
-#define MAIN_HBSC_OFFSET 0x820
-#define MAIN_DITHER_OFFSET 0x960
-#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
-
-#define AUX_GL_OFFSET 0x200
-#define AUX_GL_CSC_OFFSET 0x5d0
-#define AUX_CHN_CSC_OFFSET 0x710
-#define AUX_HBSC_OFFSET 0x860
-#define AUX_DITHER_OFFSET 0x970
-#define AUX_RSZ_OFFSET 0x800
-
-#define OSD_VL0_OFFSET 0x040
-#define OSD_VL_OFFSET(i) (OSD_VL0_OFFSET + 0x050 * (i))
-
-#define HBSC_VL0_OFFSET 0x760
-#define HBSC_VL_OFFSET(i) (HBSC_VL0_OFFSET + 0x040 * (i))
-
-#define RSZ_VL1_U0 0xa00
-#define RSZ_VL_OFFSET(i) (RSZ_VL1_U0 + 0x200 * (i))
-
-/* OSD (GPC_GLOBAL) registers */
-#define OSD_INT_STA 0x04
-#define OSD_INT_CLRSTA 0x08
-#define OSD_INT_MSK 0x0c
-#define OSD_INT_AUX_UPT BIT(14)
-#define OSD_INT_MAIN_UPT BIT(13)
-#define OSD_INT_GL1_LBW BIT(10)
-#define OSD_INT_GL0_LBW BIT(9)
-#define OSD_INT_VL2_LBW BIT(8)
-#define OSD_INT_VL1_LBW BIT(7)
-#define OSD_INT_VL0_LBW BIT(6)
-#define OSD_INT_BUS_ERR BIT(3)
-#define OSD_INT_CFG_ERR BIT(2)
-#define OSD_INT_ERROR (\
- OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
- OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
- OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
-)
-#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
-#define OSD_CTRL0 0x10
-#define OSD_CTRL0_VL0_EN BIT(13)
-#define OSD_CTRL0_VL0_SEL BIT(12)
-#define OSD_CTRL0_VL1_EN BIT(11)
-#define OSD_CTRL0_VL1_SEL BIT(10)
-#define OSD_CTRL0_VL2_EN BIT(9)
-#define OSD_CTRL0_VL2_SEL BIT(8)
-#define OSD_CTRL0_GL0_EN BIT(7)
-#define OSD_CTRL0_GL0_SEL BIT(6)
-#define OSD_CTRL0_GL1_EN BIT(5)
-#define OSD_CTRL0_GL1_SEL BIT(4)
-#define OSD_RST_CLR 0x1c
-#define RST_PER_FRAME BIT(19)
-
-/* Main/Aux channel registers */
-#define OSD_MAIN_CHN 0x470
-#define OSD_AUX_CHN 0x4d0
-#define CHN_CTRL0 0x00
-#define CHN_ENABLE BIT(0)
-#define CHN_CTRL1 0x04
-#define CHN_SCREEN_W_SHIFT 18
-#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
-#define CHN_SCREEN_H_SHIFT 5
-#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
-#define CHN_UPDATE 0x08
-#define CHN_INTERLACE_BUF_CTRL 0x24
-#define CHN_INTERLACE_EN BIT(2)
-
-/* Dither registers */
-#define OSD_DITHER_CTRL0 0x00
-#define DITHER_BYSPASS BIT(31)
-
-/* TIMING_CTRL registers */
-#define TIMING_TC_ENABLE 0x04
-#define AUX_TC_EN BIT(1)
-#define MAIN_TC_EN BIT(0)
-#define FIR_MAIN_ACTIVE 0x08
-#define FIR_AUX_ACTIVE 0x0c
-#define V_ACTIVE_SHIFT 16
-#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
-#define H_ACTIVE_SHIFT 0
-#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
-#define FIR_MAIN_H_TIMING 0x10
-#define FIR_MAIN_V_TIMING 0x14
-#define FIR_AUX_H_TIMING 0x18
-#define FIR_AUX_V_TIMING 0x1c
-#define SYNC_WIDE_SHIFT 22
-#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
-#define BACK_PORCH_SHIFT 11
-#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
-#define FRONT_PORCH_SHIFT 0
-#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
-#define TIMING_CTRL 0x20
-#define AUX_POL_SHIFT 3
-#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
-#define MAIN_POL_SHIFT 0
-#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
-#define POL_DE_SHIFT 2
-#define POL_VSYNC_SHIFT 1
-#define POL_HSYNC_SHIFT 0
-#define TIMING_INT_CTRL 0x24
-#define TIMING_INT_STATE 0x28
-#define TIMING_INT_AUX_FRAME BIT(3)
-#define TIMING_INT_MAIN_FRAME BIT(1)
-#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
-#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
-#define TIMING_INT_ENABLE (\
- TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
- TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
-)
-#define TIMING_MAIN_SHIFT 0x2c
-#define TIMING_AUX_SHIFT 0x30
-#define H_SHIFT_VAL 0x0048
-#define V_SHIFT_VAL 0x0001
-#define SCAN_CTRL 0x34
-#define AUX_PI_EN BIT(19)
-#define MAIN_PI_EN BIT(18)
-#define AUX_INTERLACE_SEL BIT(1)
-#define MAIN_INTERLACE_SEL BIT(0)
-#define SEC_V_ACTIVE 0x38
-#define SEC_VACT_MAIN_SHIFT 0
-#define SEC_VACT_MAIN_MASK (0xffff << SEC_VACT_MAIN_SHIFT)
-#define SEC_VACT_AUX_SHIFT 16
-#define SEC_VACT_AUX_MASK (0xffff << SEC_VACT_AUX_SHIFT)
-#define SEC_MAIN_V_TIMING 0x3c
-#define SEC_AUX_V_TIMING 0x40
-#define TIMING_MAIN_PI_SHIFT 0x68
-#define TIMING_AUX_PI_SHIFT 0x6c
-#define H_PI_SHIFT_VAL 0x000f
-
-#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
-#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
-
-#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
-#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
-#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
-
-/* DTRC registers */
-#define DTRC_F0_CTRL 0x2c
-#define DTRC_F1_CTRL 0x5c
-#define DTRC_DECOMPRESS_BYPASS BIT(17)
-#define DTRC_DETILE_CTRL 0x68
-#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
-#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
-#define DETILE_ARID_ALL 0
-#define DETILE_ARID_IN_ARIDR 1
-#define DETILE_ARID_BYP_BUT_ARIDR 2
-#define DETILE_ARID_IN_ARIDR2 3
-#define DTRC_ARID 0x6c
-#define DTRC_ARID3_SHIFT 24
-#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
-#define DTRC_ARID2_SHIFT 16
-#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
-#define DTRC_ARID1_SHIFT 8
-#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
-#define DTRC_ARID0_SHIFT 0
-#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
-#define DTRC_DEC2DDR_ARID 0x70
-
-#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
-#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
-#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
-#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
-
-/* VOU_CTRL registers */
-#define VOU_INF_EN 0x00
-#define VOU_INF_CH_SEL 0x04
-#define VOU_INF_DATA_SEL 0x08
-#define VOU_SOFT_RST 0x14
-#define VOU_CLK_SEL 0x18
-#define VGA_AUX_DIV_SHIFT 29
-#define VGA_MAIN_DIV_SHIFT 26
-#define PIC_MAIN_DIV_SHIFT 23
-#define PIC_AUX_DIV_SHIFT 20
-#define VOU_CLK_VL2_SEL BIT(8)
-#define VOU_CLK_VL1_SEL BIT(7)
-#define VOU_CLK_VL0_SEL BIT(6)
-#define VOU_CLK_GL1_SEL BIT(5)
-#define VOU_CLK_GL0_SEL BIT(4)
-#define VOU_DIV_PARA 0x1c
-#define DIV_PARA_UPDATE BIT(31)
-#define TVENC_AUX_DIV_SHIFT 28
-#define HDMI_AUX_PNX_DIV_SHIFT 25
-#define HDMI_MAIN_PNX_DIV_SHIFT 22
-#define HDMI_AUX_DIV_SHIFT 19
-#define HDMI_MAIN_DIV_SHIFT 16
-#define TVENC_MAIN_DIV_SHIFT 13
-#define INF_AUX_DIV_SHIFT 9
-#define INF_MAIN_DIV_SHIFT 6
-#define LAYER_AUX_DIV_SHIFT 3
-#define LAYER_MAIN_DIV_SHIFT 0
-#define VOU_CLK_REQEN 0x20
-#define VOU_CLK_EN 0x24
-#define VOU_INF_HDMI_CTRL 0x30
-#define VOU_HDMI_AUD_MASK 0x1f
-
-/* OTFPPU_CTRL registers */
-#define OTFPPU_RSZ_DATA_SOURCE 0x04
-
-#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 8ae301eef643..a9639d098893 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -259,10 +259,24 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
+ if (mbus_type == V4L2_MBUS_BT656) {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ } else {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ }
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ break;
case MEDIA_BUS_FMT_YUYV8_1X16:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ if (mbus_type == V4L2_MBUS_BT656) {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ } else {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ }
cfg->mipi_dt = MIPI_DT_YUV422;
- cfg->data_width = IPU_CSI_DATA_WIDTH_16;
break;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
@@ -332,7 +346,7 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
const struct v4l2_mbus_config *mbus_cfg,
const struct v4l2_mbus_framefmt *mbus_fmt)
{
- int ret;
+ int ret, is_bt1120;
memset(csicfg, 0, sizeof(*csicfg));
@@ -353,11 +367,18 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
+ /* UYVY10_1X20 etc. should be supported as well */
+ is_bt1120 = mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ mbus_fmt->code == MEDIA_BUS_FMT_YUYV8_1X16;
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
mbus_fmt->field == V4L2_FIELD_ALTERNATE)
- csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
+ csicfg->clk_mode = is_bt1120 ?
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR :
+ IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
- csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+ csicfg->clk_mode = is_bt1120 ?
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR :
+ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
break;
case V4L2_MBUS_CSI2_DPHY:
/*
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 477baa30889c..ece147d1a278 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -129,10 +129,12 @@ struct cp2112_xfer_status_report {
struct cp2112_string_report {
u8 dummy; /* force .string to be aligned */
- u8 report; /* CP2112_*_STRING */
- u8 length; /* length in bytes of everyting after .report */
- u8 type; /* USB_DT_STRING */
- wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+ struct_group_attr(contents, __packed,
+ u8 report; /* CP2112_*_STRING */
+ u8 length; /* length in bytes of everything after .report */
+ u8 type; /* USB_DT_STRING */
+ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+ );
} __packed;
/* Number of times to request transfer status before giving up waiting for a
@@ -986,8 +988,8 @@ static ssize_t pstr_show(struct device *kdev,
u8 length;
int ret;
- ret = cp2112_hid_get(hdev, attr->report, &report.report,
- sizeof(report) - 1, HID_FEATURE_REPORT);
+ ret = cp2112_hid_get(hdev, attr->report, (u8 *)&report.contents,
+ sizeof(report.contents), HID_FEATURE_REPORT);
if (ret < 3) {
hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name,
ret);
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 1ca64481145e..ea17abc7ad52 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -857,7 +857,7 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
memcpy(&kone->last_mouse_event, event,
sizeof(struct kone_mouse_event));
else
- memset(&event->tilt, 0, 5);
+ memset(&event->wipe, 0, sizeof(event->wipe));
kone_keep_values_up_to_date(kone, event);
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 4a1a9cb76b08..65c800e3addc 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -152,11 +152,13 @@ struct kone_mouse_event {
uint16_t x;
uint16_t y;
uint8_t wheel; /* up = 1, down = -1 */
- uint8_t tilt; /* right = 1, left = -1 */
- uint8_t unknown;
- uint8_t event;
- uint8_t value; /* press = 0, release = 1 */
- uint8_t macro_key; /* 0 to 8 */
+ struct_group(wipe,
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+ );
} __attribute__ ((__packed__));
enum kone_mouse_events {
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
index a3a70e4f3f6c..d4aa8c81903a 100644
--- a/drivers/hid/surface-hid/surface_hid.c
+++ b/drivers/hid/surface-hid/surface_hid.c
@@ -209,7 +209,7 @@ static int surface_hid_probe(struct ssam_device *sdev)
shid->notif.base.priority = 1;
shid->notif.base.fn = ssam_hid_event_fn;
- shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG;
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG(sdev->uid.target);
shid->notif.event.id.target_category = sdev->uid.category;
shid->notif.event.id.instance = sdev->uid.instance;
shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
@@ -230,7 +230,7 @@ static void surface_hid_remove(struct ssam_device *sdev)
}
static const struct ssam_device_id surface_hid_match[] = {
- { SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
+ { SSAM_SDEV(HID, SSAM_ANY_TID, SSAM_ANY_IID, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_hid_match);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 96d0eccca3aa..21f11a5b965b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1055,14 +1055,16 @@ static const struct net_device_ops ssip_pn_ops = {
static void ssip_pn_setup(struct net_device *dev)
{
+ static const u8 addr = PN_MEDIA_SOS;
+
dev->features = 0;
dev->netdev_ops = &ssip_pn_ops;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = SSIP_DEFAULT_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_SOS;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = SSIP_TXQUEUE_LEN;
dev->needs_free_netdev = true;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index d1123ceb38f3..dd12af20e467 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -8,6 +8,7 @@ config HYPERV
|| (ARM64 && !CPU_BIG_ENDIAN))
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
+ select VMAP_PFN
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index f3761c73b074..dc5c35210c16 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -17,6 +17,7 @@
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
+#include <linux/set_memory.h>
#include <asm/page.h>
#include <asm/mshyperv.h>
@@ -456,7 +457,7 @@ nomem:
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
- u32 *gpadl_handle)
+ struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
@@ -474,6 +475,15 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
if (ret)
return ret;
+ ret = set_memory_decrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+ if (ret) {
+ dev_warn(&channel->device_obj->device,
+ "Failed to set host visibility for new GPADL %d.\n",
+ ret);
+ return ret;
+ }
+
init_completion(&msginfo->waitevent);
msginfo->waiting_channel = channel;
@@ -537,7 +547,10 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
}
/* At this point, we received the gpadl created msg */
- *gpadl_handle = gpadlmsg->gpadl;
+ gpadl->gpadl_handle = gpadlmsg->gpadl;
+ gpadl->buffer = kbuffer;
+ gpadl->size = size;
+
cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
@@ -549,6 +562,11 @@ cleanup:
}
kfree(msginfo);
+
+ if (ret)
+ set_memory_encrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+
return ret;
}
@@ -561,10 +579,10 @@ cleanup:
* @gpadl_handle: some funky thing
*/
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
- u32 size, u32 *gpadl_handle)
+ u32 size, struct vmbus_gpadl *gpadl)
{
return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
- 0U, gpadl_handle);
+ 0U, gpadl);
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
@@ -665,17 +683,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (!newchannel->max_pkt_size)
newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
- err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages, 0);
- if (err)
- goto error_clean_ring;
-
- err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
- recv_pages, newchannel->max_pkt_size);
- if (err)
- goto error_clean_ring;
-
/* Establish the gpadl for the ring buffer */
- newchannel->ringbuffer_gpadlhandle = 0;
+ newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
page_address(newchannel->ringbuffer_page),
@@ -685,6 +694,16 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (err)
goto error_clean_ring;
+ err = hv_ringbuffer_init(&newchannel->outbound,
+ page, send_pages, 0);
+ if (err)
+ goto error_free_gpadl;
+
+ err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
+ recv_pages, newchannel->max_pkt_size);
+ if (err)
+ goto error_free_gpadl;
+
/* Create and init the channel open message */
open_info = kzalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
@@ -701,7 +720,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
- open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+ open_msg->ringbuffer_gpadlhandle
+ = newchannel->ringbuffer_gpadlhandle.gpadl_handle;
/*
* The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
* the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
@@ -759,8 +779,7 @@ error_clean_msglist:
error_free_info:
kfree(open_info);
error_free_gpadl:
- vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- newchannel->ringbuffer_gpadlhandle = 0;
+ vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
@@ -806,7 +825,7 @@ EXPORT_SYMBOL_GPL(vmbus_open);
/*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/
-int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_teardown *msg;
struct vmbus_channel_msginfo *info;
@@ -825,7 +844,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
msg->child_relid = channel->offermsg.child_relid;
- msg->gpadl = gpadl_handle;
+ msg->gpadl = gpadl->gpadl_handle;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&info->msglistentry,
@@ -845,6 +864,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
wait_for_completion(&info->waitevent);
+ gpadl->gpadl_handle = 0;
+
post_msg_err:
/*
* If the channel has been rescinded;
@@ -859,6 +880,12 @@ post_msg_err:
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
kfree(info);
+
+ ret = set_memory_encrypted((unsigned long)gpadl->buffer,
+ PFN_UP(gpadl->size));
+ if (ret)
+ pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
@@ -933,9 +960,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
/* Tear down the gpadl for the channel's ring buffer */
- else if (channel->ringbuffer_gpadlhandle) {
- ret = vmbus_teardown_gpadl(channel,
- channel->ringbuffer_gpadlhandle);
+ else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
+ ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
if (ret) {
pr_err("Close failed: teardown gpadl return %d\n", ret);
/*
@@ -943,8 +969,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* it is perhaps better to leak memory.
*/
}
-
- channel->ringbuffer_gpadlhandle = 0;
}
if (!ret)
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 142308526ec6..2829575fd9b7 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1581,21 +1581,6 @@ cleanup:
return ret;
}
-static void invoke_sc_cb(struct vmbus_channel *primary_channel)
-{
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_channel;
-
- if (primary_channel->sc_creation_callback == NULL)
- return;
-
- list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
-
- primary_channel->sc_creation_callback(cur_channel);
- }
-}
-
void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void (*sc_cr_cb)(struct vmbus_channel *new_sc))
{
@@ -1603,25 +1588,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
}
EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
-{
- bool ret;
-
- ret = !list_empty(&primary->sc_list);
-
- if (ret) {
- /*
- * Invoke the callback on sub-channel creation.
- * This will present a uniform interface to the
- * clients.
- */
- invoke_sc_cb(primary);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
-
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *))
{
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 5e479d54918c..a3d8be8d6cfb 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -19,6 +19,8 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/set_memory.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -102,8 +104,9 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID;
}
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
- msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ msg->monitor_page1 = vmbus_connection.monitor_pages_pa[0];
+ msg->monitor_page2 = vmbus_connection.monitor_pages_pa[1];
+
msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
@@ -216,6 +219,65 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.monitor_pages_original[0]
+ = vmbus_connection.monitor_pages[0];
+ vmbus_connection.monitor_pages_original[1]
+ = vmbus_connection.monitor_pages[1];
+ vmbus_connection.monitor_pages_pa[0]
+ = virt_to_phys(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages_pa[1]
+ = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+ if (hv_is_isolation_supported()) {
+ ret = set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[0],
+ 1);
+ ret |= set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[1],
+ 1);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Isolation VM with AMD SNP needs to access monitor page via
+ * address space above shared gpa boundary.
+ */
+ if (hv_isolation_type_snp()) {
+ vmbus_connection.monitor_pages_pa[0] +=
+ ms_hyperv.shared_gpa_boundary;
+ vmbus_connection.monitor_pages_pa[1] +=
+ ms_hyperv.shared_gpa_boundary;
+
+ vmbus_connection.monitor_pages[0]
+ = memremap(vmbus_connection.monitor_pages_pa[0],
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+ if (!vmbus_connection.monitor_pages[0]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.monitor_pages[1]
+ = memremap(vmbus_connection.monitor_pages_pa[1],
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+ if (!vmbus_connection.monitor_pages[1]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Set memory host visibility hvcall smears memory
+ * and so zero monitor pages here.
+ */
+ memset(vmbus_connection.monitor_pages[0], 0x00,
+ HV_HYP_PAGE_SIZE);
+ memset(vmbus_connection.monitor_pages[1], 0x00,
+ HV_HYP_PAGE_SIZE);
+
+ }
+
msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_initiate_contact),
GFP_KERNEL);
@@ -303,10 +365,31 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
- hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
- hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
- vmbus_connection.monitor_pages[0] = NULL;
- vmbus_connection.monitor_pages[1] = NULL;
+ if (hv_is_isolation_supported()) {
+ /*
+ * memunmap() checks input address is ioremap address or not
+ * inside. It doesn't unmap any thing in the non-SNP CVM and
+ * so not check CVM type here.
+ */
+ memunmap(vmbus_connection.monitor_pages[0]);
+ memunmap(vmbus_connection.monitor_pages[1]);
+
+ set_memory_encrypted((unsigned long)
+ vmbus_connection.monitor_pages_original[0],
+ 1);
+ set_memory_encrypted((unsigned long)
+ vmbus_connection.monitor_pages_original[1],
+ 1);
+ }
+
+ hv_free_hyperv_page((unsigned long)
+ vmbus_connection.monitor_pages_original[0]);
+ hv_free_hyperv_page((unsigned long)
+ vmbus_connection.monitor_pages_original[1]);
+ vmbus_connection.monitor_pages_original[0] =
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages_original[1] =
+ vmbus_connection.monitor_pages[1] = NULL;
}
/*
@@ -447,6 +530,10 @@ void vmbus_set_event(struct vmbus_channel *channel)
++channel->sig_events;
- hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+ if (hv_isolation_type_snp())
+ hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
+ NULL, sizeof(channel->sig_event));
+ else
+ hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index e83507f49676..4d6480d57546 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -8,6 +8,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -97,7 +98,13 @@ int hv_post_message(union hv_connection_id connection_id,
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
+ if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ (void *)aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = hv_do_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL);
/* Preemption must remain disabled until after the hypercall
* so some other thread can't get scheduled onto this cpu and
@@ -136,17 +143,24 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
- hv_cpu->synic_message_page =
- (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_message_page == NULL) {
- pr_err("Unable to allocate SYNIC message page\n");
- goto err;
- }
+ /*
+ * Synic message and event pages are allocated by paravisor.
+ * Skip these pages allocation here.
+ */
+ if (!hv_isolation_type_snp()) {
+ hv_cpu->synic_message_page =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_message_page == NULL) {
+ pr_err("Unable to allocate SYNIC message page\n");
+ goto err;
+ }
- hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_event_page == NULL) {
- pr_err("Unable to allocate SYNIC event page\n");
- goto err;
+ hv_cpu->synic_event_page =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_event_page == NULL) {
+ pr_err("Unable to allocate SYNIC event page\n");
+ goto err;
+ }
}
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -201,16 +215,35 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Setup the Synic's message page */
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
- simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
- >> HV_HYP_PAGE_SHIFT;
+
+ if (hv_isolation_type_snp()) {
+ hv_cpu->synic_message_page
+ = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+ if (!hv_cpu->synic_message_page)
+ pr_err("Fail to map syinc message page.\n");
+ } else {
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
- siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
- >> HV_HYP_PAGE_SHIFT;
+
+ if (hv_isolation_type_snp()) {
+ hv_cpu->synic_event_page =
+ memremap(siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+
+ if (!hv_cpu->synic_event_page)
+ pr_err("Fail to map syinc event page.\n");
+ } else {
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
@@ -257,6 +290,8 @@ int hv_synic_init(unsigned int cpu)
*/
void hv_synic_disable_regs(unsigned int cpu)
{
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
@@ -273,14 +308,27 @@ void hv_synic_disable_regs(unsigned int cpu)
shared_sint.as_uint64);
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ /*
+ * In Isolation VM, sim and sief pages are allocated by
+ * paravisor. These pages also will be used by kdump
+ * kernel. So just reset enable bit here and keep page
+ * addresses.
+ */
simp.simp_enabled = 0;
- simp.base_simp_gpa = 0;
+ if (hv_isolation_type_snp())
+ memunmap(hv_cpu->synic_message_page);
+ else
+ simp.base_simp_gpa = 0;
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
- siefp.base_siefp_gpa = 0;
+
+ if (hv_isolation_type_snp())
+ memunmap(hv_cpu->synic_event_page);
+ else
+ siefp.base_siefp_gpa = 0;
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index c0d9048a4112..7be173a99f27 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -249,6 +249,12 @@ bool __weak hv_is_isolation_supported(void)
}
EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
+bool __weak hv_isolation_type_snp(void)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+
void __weak hv_setup_vmbus_handler(void (*handler)(void))
{
}
@@ -283,3 +289,9 @@ void __weak hyperv_cleanup(void)
{
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
+
+u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
+{
+ return HV_STATUS_INVALID_PARAMETER;
+}
+EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 42f3d9d123a1..3a1f007b678a 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -13,6 +13,7 @@
#define _HYPERV_VMBUS_H
#include <linux/list.h>
+#include <linux/bitops.h>
#include <asm/sync_bitops.h>
#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>
@@ -240,6 +241,8 @@ struct vmbus_connection {
* is child->parent notification
*/
struct hv_monitor_page *monitor_pages[2];
+ void *monitor_pages_original[2];
+ phys_addr_t monitor_pages_pa[2];
struct list_head chn_msg_list;
spinlock_t channelmsg_lock;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 314015d9e912..71efacb90965 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -17,6 +17,8 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/io.h>
+#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -183,8 +185,10 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt, u32 max_pkt_size)
{
- int i;
struct page **pages_wraparound;
+ unsigned long *pfns_wraparound;
+ u64 pfn;
+ int i;
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
@@ -192,23 +196,50 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
*/
- pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
- GFP_KERNEL);
- if (!pages_wraparound)
- return -ENOMEM;
+ if (hv_isolation_type_snp()) {
+ pfn = page_to_pfn(pages) +
+ PFN_DOWN(ms_hyperv.shared_gpa_boundary);
+
+ pfns_wraparound = kcalloc(page_cnt * 2 - 1,
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!pfns_wraparound)
+ return -ENOMEM;
- pages_wraparound[0] = pages;
- for (i = 0; i < 2 * (page_cnt - 1); i++)
- pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
+ pfns_wraparound[0] = pfn;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;
- ring_info->ring_buffer = (struct hv_ring_buffer *)
- vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
+ PAGE_KERNEL);
+ kfree(pfns_wraparound);
- kfree(pages_wraparound);
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+ /* Zero ring buffer after setting memory host visibility. */
+ memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
+ } else {
+ pages_wraparound = kcalloc(page_cnt * 2 - 1,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_wraparound)
+ return -ENOMEM;
+
+ pages_wraparound[0] = pages;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pages_wraparound[i + 1] =
+ &pages[i % (page_cnt - 1) + 1];
+
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
+ PAGE_KERNEL);
+
+ kfree(pages_wraparound);
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+ }
- if (!ring_info->ring_buffer)
- return -ENOMEM;
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index c4578e8f34bb..64bd3dfba2c4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1032,6 +1032,16 @@ config SENSORS_MAX31730
This driver can also be built as a module. If so, the module
will be called max31730.
+config SENSORS_MAX6620
+ tristate "Maxim MAX6620 fan controller"
+ depends on I2C
+ help
+ If you say yes here you get support for the MAX6620
+ fan controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6620.
+
config SENSORS_MAX6621
tristate "Maxim MAX6621 sensor chip"
depends on I2C
@@ -1317,7 +1327,7 @@ config SENSORS_LM90
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658,
MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696,
ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG,
- Philips SA56004, GMT G781, and Texas Instruments TMP451
+ Philips SA56004, GMT G781, Texas Instruments TMP451 and TMP461
sensor chips.
This driver can also be built as a module. If so, the module
@@ -1433,6 +1443,7 @@ config SENSORS_NCT6683
config SENSORS_NCT6775
tristate "Nuvoton NCT6775F and compatibles"
depends on !PPC
+ depends on ACPI_WMI || ACPI_WMI=n
select HWMON_VID
help
If you say yes here you get support for the hardware monitoring
@@ -1930,7 +1941,7 @@ config SENSORS_TMP401
depends on I2C
help
If you say yes here you get support for Texas Instruments TMP401,
- TMP411, TMP431, TMP432, TMP435, and TMP461 temperature sensor chips.
+ TMP411, TMP431, TMP432, and TMP435 temperature sensor chips.
This driver can also be built as a module. If so, the module
will be called tmp401.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 162940270661..baee6a8d4dd1 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -135,6 +135,7 @@ obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
+obj-$(CONFIG_SENSORS_MAX6620) += max6620.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 112dd0d9377c..8229ad30c909 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -145,7 +145,7 @@ struct abituguru3_data {
struct device *hwmon_dev; /* hwmon registered device */
struct mutex update_lock; /* protect access to data and uGuru */
unsigned short addr; /* uguru base address */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/*
@@ -1083,7 +1083,7 @@ static struct abituguru3_data *abituguru3_update_device(struct device *dev)
mutex_lock(&data->update_lock);
if (!data->valid || time_after(jiffies, data->last_updated + HZ)) {
/* Clear data->valid while updating */
- data->valid = 0;
+ data->valid = false;
/* Read alarms */
if (abituguru3_read_increment_offset(data,
ABIT_UGURU3_SETTINGS_BANK,
@@ -1117,7 +1117,7 @@ static struct abituguru3_data *abituguru3_update_device(struct device *dev)
goto LEAVE_UPDATE;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
LEAVE_UPDATE:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 014505b1faf7..c405a5869581 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -535,7 +535,7 @@ static void remove_domain_devices(struct acpi_power_meter_resource *resource)
sysfs_remove_link(resource->holders_dir,
kobject_name(&obj->dev.kobj));
- put_device(&obj->dev);
+ acpi_dev_put(obj);
}
kfree(resource->domain_devices);
@@ -597,18 +597,15 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
continue;
/* Create a symlink to domain objects */
- resource->domain_devices[i] = NULL;
- if (acpi_bus_get_device(element->reference.handle,
- &resource->domain_devices[i]))
+ obj = acpi_bus_get_acpi_device(element->reference.handle);
+ resource->domain_devices[i] = obj;
+ if (!obj)
continue;
- obj = resource->domain_devices[i];
- get_device(&obj->dev);
-
res = sysfs_create_link(resource->holders_dir, &obj->dev.kobj,
kobject_name(&obj->dev.kobj));
if (res) {
- put_device(&obj->dev);
+ acpi_dev_put(obj);
resource->domain_devices[i] = NULL;
}
}
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 6a765755d061..0afb89c4629d 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -37,7 +37,7 @@ static u8 AD7414_REG_LIMIT[] = { AD7414_REG_T_HIGH, AD7414_REG_T_LOW };
struct ad7414_data {
struct i2c_client *client;
struct mutex lock; /* atomic read data updates */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long next_update; /* In jiffies */
s16 temp_input; /* Register values */
s8 temps[ARRAY_SIZE(AD7414_REG_LIMIT)];
@@ -95,7 +95,7 @@ static struct ad7414_data *ad7414_update_device(struct device *dev)
}
data->next_update = jiffies + HZ + HZ / 2;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index d618f6b2f382..22bdb7e5b9e0 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -46,7 +46,7 @@ struct ad7418_data {
enum chips type;
struct mutex lock;
int adc_max; /* number of ADC channels */
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values */
u16 in[4];
@@ -111,14 +111,14 @@ static int ad7418_update_device(struct device *dev)
goto abort;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->lock);
return 0;
abort:
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->lock);
return val;
}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 71deb2cd20f5..38b447c6e8cd 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -72,7 +72,7 @@ struct adm1021_data {
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
char low_power; /* !=0 if device in low power mode */
unsigned long last_updated; /* In jiffies */
@@ -135,7 +135,7 @@ static struct adm1021_data *adm1021_update_device(struct device *dev)
ADM1023_REG_REM_OFFSET_PREC);
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index de51e01c061b..4352f6a884e8 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -97,7 +97,7 @@ struct adm1025_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
u8 in[6]; /* register value */
@@ -148,7 +148,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
ADM1025_REG_VID4) & 0x01) << 4);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 49cefbadb156..69b3ec752944 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -259,7 +259,7 @@ struct adm1026_data {
const struct attribute_group *groups[3];
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_reading; /* In jiffies */
unsigned long last_config; /* In jiffies */
@@ -459,7 +459,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
data->last_config = jiffies;
} /* last_config */
- data->valid = 1;
+ data->valid = true;
mutex_unlock(&data->update_lock);
return data;
}
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 50b1df7b008c..3e1999413f32 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -99,7 +99,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
struct adm1029_data {
struct i2c_client *client;
struct mutex update_lock; /* protect register access */
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values, signed for temperature, unsigned for other stuff */
@@ -143,7 +143,7 @@ static struct adm1029_data *adm1029_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index b538ace2d292..257ec53ae723 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -65,7 +65,7 @@ struct adm1031_data {
const struct attribute_group *groups[3];
struct mutex update_lock;
int chip_type;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned int update_interval; /* In milliseconds */
/*
@@ -187,7 +187,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
ADM1031_REG_PWM) >> (4 * chan)) & 0x0f;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -650,7 +650,7 @@ static ssize_t fan_div_store(struct device *dev,
data->fan_min[nr]);
/* Invalidate the cache: fan speed is no longer valid */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index 9fad01191620..c40cac16af68 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -90,7 +90,8 @@ static int adt7310_spi_probe(struct spi_device *spi)
static int adt7310_spi_remove(struct spi_device *spi)
{
- return adt7x10_remove(&spi->dev, spi->irq);
+ adt7x10_remove(&spi->dev, spi->irq);
+ return 0;
}
static const struct spi_device_id adt7310_id[] = {
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 9d80895d0266..973db057427b 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -50,7 +50,8 @@ static int adt7410_i2c_probe(struct i2c_client *client)
static int adt7410_i2c_remove(struct i2c_client *client)
{
- return adt7x10_remove(&client->dev, client->irq);
+ adt7x10_remove(&client->dev, client->irq);
+ return 0;
}
static const struct i2c_device_id adt7410_ids[] = {
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index 3f03b4cf5858..e9d33aa78a19 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -444,7 +444,7 @@ exit_restore:
}
EXPORT_SYMBOL_GPL(adt7x10_probe);
-int adt7x10_remove(struct device *dev, int irq)
+void adt7x10_remove(struct device *dev, int irq)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
@@ -457,7 +457,6 @@ int adt7x10_remove(struct device *dev, int irq)
sysfs_remove_group(&dev->kobj, &adt7x10_group);
if (data->oldconfig != data->config)
adt7x10_write_byte(dev, ADT7X10_CONFIG, data->oldconfig);
- return 0;
}
EXPORT_SYMBOL_GPL(adt7x10_remove);
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index 21ad15ce3163..a1ae682eb32e 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -26,7 +26,7 @@ struct adt7x10_ops {
int adt7x10_probe(struct device *dev, const char *name, int irq,
const struct adt7x10_ops *ops);
-int adt7x10_remove(struct device *dev, int irq);
+void adt7x10_remove(struct device *dev, int irq);
#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops adt7x10_dev_pm_ops;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 6b1ce2242c61..0c16face3fd3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -141,7 +141,7 @@ static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI,
struct amc6821_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
@@ -258,7 +258,7 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
@@ -511,7 +511,7 @@ static ssize_t temp_auto_point_temp_store(struct device *dev,
}
mutex_lock(&data->update_lock);
- data->valid = 0;
+ data->valid = false;
switch (ix) {
case 0:
@@ -584,7 +584,7 @@ static ssize_t pwm1_auto_point_pwm_store(struct device *dev,
}
EXIT:
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index c31759794a29..fc6d6a9053ce 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -391,7 +391,7 @@ static const struct applesmc_entry *applesmc_get_entry_by_index(int index)
cache->len = info[0];
memcpy(cache->type, &info[1], 4);
cache->flags = info[5];
- cache->valid = 1;
+ cache->valid = true;
out:
mutex_unlock(&smcreg.mutex);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index ba9fcf6f9264..8cf0bcb85eb4 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -186,7 +186,7 @@ struct asb100_data {
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
u8 in[7]; /* Register value */
u8 in_max[7]; /* Register value */
u8 in_min[7]; /* Register value */
@@ -993,7 +993,7 @@ static struct asb100_data *asb100_update_device(struct device *dev)
(asb100_read_value(client, ASB100_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
dev_dbg(&client->dev, "... device update complete\n");
}
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 600ffc7e1900..e835605a7456 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -77,7 +77,7 @@ struct asc7621_data {
struct i2c_client client;
struct device *class_dev;
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_high_reading; /* In jiffies */
unsigned long last_low_reading; /* In jiffies */
/*
@@ -1032,7 +1032,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
data->last_low_reading = jiffies;
} /* last_reading */
- data->valid = 1;
+ data->valid = true;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 1e08a5431f12..4fd8de8022bc 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -37,7 +37,7 @@ struct atxp1_data {
struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated;
- u8 valid;
+ bool valid;
struct {
u8 vid; /* VID output register */
u8 cpu_vid; /* VID input from CPU */
@@ -63,7 +63,7 @@ static struct atxp1_data *atxp1_update_device(struct device *dev)
data->reg.gpio1 = i2c_smbus_read_byte_data(client, ATXP1_GPIO1);
data->reg.gpio2 = i2c_smbus_read_byte_data(client, ATXP1_GPIO2);
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -136,7 +136,7 @@ static ssize_t cpu0_vid_store(struct device *dev,
ATXP1_VID, cvid | ATXP1_VIDENA);
}
- data->valid = 0;
+ data->valid = false;
return count;
}
@@ -180,7 +180,7 @@ static ssize_t gpio1_store(struct device *dev, struct device_attribute *attr,
i2c_smbus_write_byte_data(client, ATXP1_GPIO1, value);
- data->valid = 0;
+ data->valid = false;
}
return count;
@@ -224,7 +224,7 @@ static ssize_t gpio2_store(struct device *dev, struct device_attribute *attr,
i2c_smbus_write_byte_data(client, ATXP1_GPIO2, value);
- data->valid = 0;
+ data->valid = false;
}
return count;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index bb9211215a68..ccf0af5b988a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -167,7 +167,7 @@ static ssize_t show_temp(struct device *dev,
* really help at all.
*/
tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
- tdata->valid = 1;
+ tdata->valid = true;
tdata->last_updated = jiffies;
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 774c1b0715d9..eaace478f508 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -12,24 +12,24 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/capability.h>
#include <linux/cpu.h>
+#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/dmi.h>
-#include <linux/capability.h>
-#include <linux/mutex.h>
-#include <linux/hwmon.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/i8k.h>
@@ -76,6 +76,7 @@ struct dell_smm_data {
int temp_type[DELL_SMM_NO_TEMP];
bool fan[DELL_SMM_NO_FANS];
int fan_type[DELL_SMM_NO_FANS];
+ int *fan_nominal_speed[DELL_SMM_NO_FANS];
};
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
@@ -326,7 +327,7 @@ static int i8k_enable_fan_auto_mode(const struct dell_smm_data *data, bool enabl
}
/*
- * Set the fan speed (off, low, high). Returns the new fan status.
+ * Set the fan speed (off, low, high, ...).
*/
static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
{
@@ -338,7 +339,7 @@ static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
- return i8k_smm(&regs) ? : i8k_get_fan_status(data, fan);
+ return i8k_smm(&regs);
}
static int __init i8k_get_temp_type(int sensor)
@@ -452,7 +453,7 @@ static int
i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd, unsigned long arg)
{
int val = 0;
- int speed;
+ int speed, err;
unsigned char buff[16];
int __user *argp = (int __user *)arg;
@@ -473,8 +474,7 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
if (restricted && !capable(CAP_SYS_ADMIN))
return -EPERM;
- memset(buff, 0, sizeof(buff));
- strscpy(buff, data->bios_machineid, sizeof(buff));
+ strscpy_pad(buff, data->bios_machineid, sizeof(buff));
break;
case I8K_FN_STATUS:
@@ -513,11 +513,15 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
- val = i8k_set_fan(data, val, speed);
+ err = i8k_set_fan(data, val, speed);
+ if (err < 0)
+ return err;
+
+ val = i8k_get_fan_status(data, val);
break;
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
if (val < 0)
@@ -674,6 +678,13 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
return 0444;
break;
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ case hwmon_fan_target:
+ if (data->fan_nominal_speed[channel])
+ return 0444;
+
+ break;
default:
break;
}
@@ -741,6 +752,25 @@ static int dell_smm_read(struct device *dev, enum hwmon_sensor_types type, u32 a
*val = ret;
return 0;
+ case hwmon_fan_min:
+ *val = data->fan_nominal_speed[channel][0];
+
+ return 0;
+ case hwmon_fan_max:
+ *val = data->fan_nominal_speed[channel][data->i8k_fan_max];
+
+ return 0;
+ case hwmon_fan_target:
+ ret = i8k_get_fan_status(data, channel);
+ if (ret < 0)
+ return ret;
+
+ if (ret > data->i8k_fan_max)
+ ret = data->i8k_fan_max;
+
+ *val = data->fan_nominal_speed[channel][ret];
+
+ return 0;
default:
break;
}
@@ -889,9 +919,12 @@ static const struct hwmon_channel_info *dell_smm_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL
),
HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT | HWMON_F_LABEL,
- HWMON_F_INPUT | HWMON_F_LABEL,
- HWMON_F_INPUT | HWMON_F_LABEL
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
@@ -910,7 +943,7 @@ static int __init dell_smm_init_hwmon(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
struct device *dell_smm_hwmon_dev;
- int i, err;
+ int i, state, err;
for (i = 0; i < DELL_SMM_NO_TEMP; i++) {
data->temp_type[i] = i8k_get_temp_type(i);
@@ -926,8 +959,27 @@ static int __init dell_smm_init_hwmon(struct device *dev)
err = i8k_get_fan_status(data, i);
if (err < 0)
err = i8k_get_fan_type(data, i);
- if (err >= 0)
- data->fan[i] = true;
+
+ if (err < 0)
+ continue;
+
+ data->fan[i] = true;
+ data->fan_nominal_speed[i] = devm_kmalloc_array(dev, data->i8k_fan_max + 1,
+ sizeof(*data->fan_nominal_speed[i]),
+ GFP_KERNEL);
+ if (!data->fan_nominal_speed[i])
+ continue;
+
+ for (state = 0; state <= data->i8k_fan_max; state++) {
+ err = i8k_get_fan_nominal_speed(data, i, state);
+ if (err < 0) {
+ /* Mark nominal speed table as invalid in case of error */
+ devm_kfree(dev, data->fan_nominal_speed[i]);
+ data->fan_nominal_speed[i] = NULL;
+ break;
+ }
+ data->fan_nominal_speed[i][state] = err;
+ }
}
dell_smm_hwmon_dev = devm_hwmon_device_register_with_info(dev, "dell_smm", data,
@@ -948,6 +1000,11 @@ enum i8k_configs {
DELL_XPS,
};
+/*
+ * Only use for machines which need some special configuration
+ * in order to work correctly (e.g. if autoconfig fails on this machines).
+ */
+
static const struct i8k_config_data i8k_config_data[] __initconst = {
[DELL_LATITUDE_D520] = {
.fan_mult = 1,
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index c1e4cfb40c3d..e3ad4c2d0038 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -203,7 +203,7 @@ struct dme1737_data {
unsigned int addr; /* for ISA devices only */
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_update; /* in jiffies */
unsigned long last_vbat; /* in jiffies */
enum chips type;
@@ -778,7 +778,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
}
data->last_update = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index bf1c4b7ecb40..0886abf6ebab 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -109,7 +109,7 @@ static const u8 DS1621_REG_TEMP[3] = {
struct ds1621_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
enum chips kind; /* device type */
@@ -213,7 +213,7 @@ static struct ds1621_data *ds1621_update_client(struct device *dev)
new_conf);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 9ec722798c4a..82d7c3d58f49 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -56,7 +56,7 @@ static const u8 DS620_REG_TEMP[3] = {
struct ds620_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values, word */
@@ -118,7 +118,7 @@ static struct ds620_data *ds620_update_client(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index ec5c98702bf5..29082c8463f4 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -45,7 +45,7 @@ enum subfeature { input, min, max };
struct emc6w201_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -162,7 +162,7 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 67b47de8263a..7f20edb0677c 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -165,7 +165,7 @@ struct f71805f_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -404,7 +404,7 @@ static struct f71805f_data *f71805f_update_device(struct device *dev)
+ (f71805f_read8(data, F71805F_REG_STATUS(2)) << 16);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 4dec793fd07d..4673d403759a 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -253,7 +253,7 @@ struct f71882fg_data {
struct mutex update_lock;
int temp_start; /* temp numbering start (0 or 1) */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
char auto_point_temp_signed;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -1359,7 +1359,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_IN(nr));
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 3e567be60fb1..57c8a473698d 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -85,7 +85,7 @@ struct f75375_data {
const char *name;
int kind;
struct mutex update_lock; /* protect register access */
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -228,7 +228,7 @@ static struct f75375_data *f75375_update_device(struct device *dev)
f75375_read8(client, F75375_REG_VOLT(nr));
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 5191cd85a8d1..c26195e3aad7 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -264,7 +264,7 @@ struct fschmd_data {
unsigned long watchdog_is_open;
char watchdog_expect_close;
char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
@@ -1356,7 +1356,7 @@ static struct fschmd_data *fschmd_update_device(struct device *dev)
FSCHMD_REG_VOLT[data->kind][i]);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index a692f7b2f6f7..36717b524dbd 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -95,7 +95,7 @@ static struct g760a_data *g760a_update_client(struct device *dev)
data->fan_sta = g760a_read_value(client, G760A_REG_FAN_STA);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 7aaee5a48243..dd683b0a648f 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -107,7 +107,7 @@ struct gl518_data {
enum chips type;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 voltage_in[4]; /* Register values; [0] = VDD */
@@ -211,7 +211,7 @@ static struct gl518_data *gl518_update_device(struct device *dev)
gl518_read_value(client, GL518_REG_VIN3);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 4ae1295cc3ea..096ba9797211 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -64,7 +64,7 @@ struct gl520_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until the following fields are valid */
+ bool valid; /* false until the following fields are valid */
unsigned long last_updated; /* in jiffies */
u8 vid;
@@ -174,7 +174,7 @@ static struct gl520_data *gl520_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 8d3b1dae31df..3501a3ead4ba 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -796,8 +796,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
err = device_register(hdev);
- if (err)
- goto free_hwmon;
+ if (err) {
+ put_device(hdev);
+ goto ida_remove;
+ }
INIT_LIST_HEAD(&hwdev->tzdata);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 360f5aee1394..05f68e9c9477 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -12,7 +13,6 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
@@ -29,69 +29,78 @@
#define REG_CTCTRL 0xF7
#define REG_TSTIMER 0xF8
-/*
- * Sysfs stuff
- */
-
-/* Sensor resolution : 0.5 degree C */
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static umode_t i5500_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
{
- struct pci_dev *pdev = to_pci_dev(dev->parent);
- long temp;
- u16 tsthrhi;
- s8 tsfsc;
-
- pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
- pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
- temp = ((long)tsthrhi - tsfsc) * 500;
-
- return sprintf(buf, "%ld\n", temp);
+ return 0444;
}
-static ssize_t thresh_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int i5500_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
{
struct pci_dev *pdev = to_pci_dev(dev->parent);
- int reg = to_sensor_dev_attr(devattr)->index;
- long temp;
u16 tsthr;
+ s8 tsfsc;
+ u8 ctsts;
- pci_read_config_word(pdev, reg, &tsthr);
- temp = tsthr * 500;
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ /* Sensor resolution : 0.5 degree C */
+ case hwmon_temp_input:
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthr);
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ *val = (tsthr - tsfsc) * 500;
+ return 0;
+ case hwmon_temp_max:
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthr);
+ *val = tsthr * 500;
+ return 0;
+ case hwmon_temp_max_hyst:
+ pci_read_config_word(pdev, REG_TSTHRLO, &tsthr);
+ *val = tsthr * 500;
+ return 0;
+ case hwmon_temp_crit:
+ pci_read_config_word(pdev, REG_TSTHRCATA, &tsthr);
+ *val = tsthr * 500;
+ return 0;
+ case hwmon_temp_max_alarm:
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ *val = !!(ctsts & BIT(1));
+ return 0;
+ case hwmon_temp_crit_alarm:
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ *val = !!(ctsts & BIT(0));
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- return sprintf(buf, "%ld\n", temp);
+ return -EOPNOTSUPP;
}
-static ssize_t alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev->parent);
- int nr = to_sensor_dev_attr(devattr)->index;
- u8 ctsts;
-
- pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
- return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
-}
+static const struct hwmon_ops i5500_ops = {
+ .is_visible = i5500_is_visible,
+ .read = i5500_read,
+};
-static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, thresh, 0xE2);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, thresh, 0xEC);
-static SENSOR_DEVICE_ATTR_RO(temp1_max, thresh, 0xEE);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1);
-
-static struct attribute *i5500_temp_attrs[] = {
- &dev_attr_temp1_input.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+static const struct hwmon_channel_info *i5500_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM
+ ),
NULL
};
-ATTRIBUTE_GROUPS(i5500_temp);
+static const struct hwmon_chip_info i5500_chip_info = {
+ .ops = &i5500_ops,
+ .info = i5500_info,
+};
static const struct pci_device_id i5500_temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
@@ -121,9 +130,8 @@ static int i5500_temp_probe(struct pci_dev *pdev,
return -ENODEV;
}
- hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
- "intel5500", NULL,
- i5500_temp_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "intel5500", NULL,
+ &i5500_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index a4ec85207782..de6baf6ca3d1 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -127,7 +127,7 @@ struct aem_data {
struct device *hwmon_dev;
struct platform_device *pdev;
struct mutex lock;
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
u8 ver_major;
u8 ver_minor;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index b2ab83c9fd9a..f6ec165c0fa8 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -66,7 +66,7 @@ struct ibmpex_bmc_data {
struct device *hwmon_dev;
struct device *bmc_device;
struct mutex lock;
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
struct ipmi_addr address;
@@ -239,7 +239,7 @@ static void ibmpex_update_device(struct ibmpex_bmc_data *data)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
out:
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1f93134afcb9..0e543dbe0a6b 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -519,7 +519,7 @@ struct it87_data {
unsigned short addr;
const char *name;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
@@ -844,7 +844,7 @@ static struct it87_data *it87_update_device(struct device *dev)
data->vid &= 0x3f;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -980,7 +980,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
regval |= 0x80;
it87_write_value(data, IT87_REG_BEEP_ENABLE, regval);
}
- data->valid = 0;
+ data->valid = false;
reg = IT87_REG_TEMP_OFFSET[nr];
break;
}
@@ -1079,7 +1079,7 @@ static ssize_t set_temp_type(struct device *dev, struct device_attribute *attr,
it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
if (has_temp_old_peci(data, nr))
it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
@@ -1834,7 +1834,7 @@ static ssize_t clear_intrusion(struct device *dev,
config |= BIT(5);
it87_write_value(data, IT87_REG_CONFIG, config);
/* Invalidate cache to force re-read */
- data->valid = 0;
+ data->valid = false;
}
mutex_unlock(&data->update_lock);
@@ -3229,7 +3229,7 @@ static int __maybe_unused it87_resume(struct device *dev)
it87_start_monitoring(data);
/* force update */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index 1109fffa76fb..ef5a49cd9149 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -191,7 +191,7 @@ static struct pem_data *pem_update_device(struct device *dev)
i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index c8f93c5d1ccc..339a145afc09 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -139,7 +139,7 @@ struct lm63_data {
struct i2c_client *client;
struct mutex update_lock;
const struct attribute_group *groups[5];
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
char lut_valid; /* zero until lut fields are valid */
unsigned long last_updated; /* in jiffies */
unsigned long lut_last_updated; /* in jiffies */
@@ -289,7 +289,7 @@ static struct lm63_data *lm63_update_device(struct device *dev)
LM63_REG_ALERT_STATUS) & 0x7F;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
lm63_update_lut(data);
@@ -714,7 +714,7 @@ static ssize_t temp2_type_store(struct device *dev,
reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02;
i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM,
reg | (data->trutherm ? 0x02 : 0x00));
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 7570c9d50ddc..df6af85e170a 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -55,7 +55,7 @@ static const u8 temp_regs[t_num_temp] = {
struct lm77_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
int temp[t_num_temp]; /* index using temp_index */
u8 alarms;
@@ -118,7 +118,7 @@ static struct lm77_data *lm77_update_device(struct device *dev)
data->alarms =
lm77_read_value(client, LM77_REG_TEMP) & 0x0007;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 1aa35ca0c6fe..5e129cbec1cb 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -117,7 +117,7 @@ struct lm78_data {
int isa_addr;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[7]; /* Register value */
@@ -772,7 +772,7 @@ static struct lm78_data *lm78_update_device(struct device *dev)
data->alarms = lm78_read_value(data, LM78_REG_ALARM1) +
(lm78_read_value(data, LM78_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
data->fan_div[2] = 1;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 97ab491d2922..e85e062bbf32 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -117,7 +117,7 @@ struct lm80_data {
struct i2c_client *client;
struct mutex update_lock;
char error; /* !=0 if error occurred during last update */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[i_num_in][7]; /* Register value, 1st index is enum in_index */
@@ -236,14 +236,14 @@ static struct lm80_data *lm80_update_device(struct device *dev)
data->alarms = prev_rv + (rv << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
data->error = 0;
}
goto done;
abort:
ret = ERR_PTR(rv);
- data->valid = 0;
+ data->valid = false;
data->error = 1;
done:
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 2ff5ecce608e..74fd7aa373a3 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -105,7 +105,7 @@ struct lm83_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -137,7 +137,7 @@ static struct lm83_data *lm83_update_device(struct device *dev)
<< 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index c7bf5de7b70f..88cf2012d34b 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -294,7 +294,7 @@ struct lm85_data {
bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_reading; /* In jiffies */
unsigned long last_config; /* In jiffies */
@@ -541,7 +541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
data->last_config = jiffies;
} /* last_config */
- data->valid = 1;
+ data->valid = true;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index b2d820125bb6..1750bc588856 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -141,7 +141,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
struct lm87_data {
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 channel; /* register value */
@@ -251,7 +251,7 @@ static struct lm87_data *lm87_update_device(struct device *dev)
data->aout = lm87_read_value(client, LM87_REG_AOUT);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 567b7c521f38..618052c6cdb6 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -69,10 +69,10 @@
* This driver also supports the G781 from GMT. This device is compatible
* with the ADM1032.
*
- * This driver also supports TMP451 from Texas Instruments. This device is
- * supported in both compatibility and extended mode. It's mostly compatible
- * with ADT7461 except for local temperature low byte register and max
- * conversion rate.
+ * This driver also supports TMP451 and TMP461 from Texas Instruments.
+ * Those devices are supported in both compatibility and extended mode.
+ * They are mostly compatible with ADT7461 except for local temperature
+ * low byte register and max conversion rate.
*
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
@@ -112,7 +112,7 @@ static const unsigned short normal_i2c[] = {
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781, tmp451, max6654 };
+ max6646, w83l771, max6696, sa56004, g781, tmp451, tmp461, max6654 };
/*
* The LM90 registers
@@ -168,8 +168,12 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
-/* TMP451 registers */
+/* TMP451/TMP461 registers */
#define TMP451_REG_R_LOCAL_TEMPL 0x15
+#define TMP451_REG_CONALERT 0x22
+
+#define TMP461_REG_CHEN 0x16
+#define TMP461_REG_DFC 0x24
/*
* Device flags
@@ -182,7 +186,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
-#define LM90_PAUSE_FOR_CONFIG (1 << 8) /* Pause conversion for config */
+#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/
+#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@@ -229,6 +234,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
{ "tmp451", tmp451 },
+ { "tmp461", tmp461 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -326,6 +332,10 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
.compatible = "ti,tmp451",
.data = (void *)tmp451
},
+ {
+ .compatible = "ti,tmp461",
+ .data = (void *)tmp461
+ },
{ },
};
MODULE_DEVICE_TABLE(of, lm90_of_match);
@@ -350,7 +360,7 @@ static const struct lm90_params lm90_params[] = {
},
[adt7461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
@@ -422,7 +432,14 @@ static const struct lm90_params lm90_params[] = {
},
[tmp451] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ .alert_alarms = 0x7c,
+ .max_convrate = 9,
+ .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+ },
+ [tmp461] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@@ -998,7 +1015,7 @@ static int lm90_get_temp11(struct lm90_data *data, int index)
s16 temp11 = data->temp11[index];
int temp;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u16_adt7461(data, temp11);
else if (data->kind == max6646)
temp = temp_from_u16(temp11);
@@ -1035,7 +1052,7 @@ static int lm90_set_temp11(struct lm90_data *data, int index, long val)
val -= 16000;
}
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
data->temp11[index] = temp_to_u16_adt7461(data, val);
else if (data->kind == max6646)
data->temp11[index] = temp_to_u8(val) << 8;
@@ -1062,7 +1079,7 @@ static int lm90_get_temp8(struct lm90_data *data, int index)
s8 temp8 = data->temp8[index];
int temp;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u8_adt7461(data, temp8);
else if (data->kind == max6646)
temp = temp_from_u8(temp8);
@@ -1098,7 +1115,7 @@ static int lm90_set_temp8(struct lm90_data *data, int index, long val)
val -= 16000;
}
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
data->temp8[index] = temp_to_u8_adt7461(data, val);
else if (data->kind == max6646)
data->temp8[index] = temp_to_u8(val);
@@ -1116,7 +1133,7 @@ static int lm90_get_temphyst(struct lm90_data *data, int index)
{
int temp;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u8_adt7461(data, data->temp8[index]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[index]);
@@ -1136,7 +1153,7 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
int temp;
int err;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
@@ -1627,18 +1644,26 @@ static int lm90_detect(struct i2c_client *client,
&& convrate <= 0x08)
name = "g781";
} else
- if (address == 0x4C
- && man_id == 0x55) { /* Texas Instruments */
- int local_ext;
+ if (man_id == 0x55 && chip_id == 0x00 &&
+ (config1 & 0x1B) == 0x00 && convrate <= 0x09) {
+ int local_ext, conalert, chen, dfc;
local_ext = i2c_smbus_read_byte_data(client,
TMP451_REG_R_LOCAL_TEMPL);
-
- if (chip_id == 0x00 /* TMP451 */
- && (config1 & 0x1B) == 0x00
- && convrate <= 0x09
- && (local_ext & 0x0F) == 0x00)
- name = "tmp451";
+ conalert = i2c_smbus_read_byte_data(client,
+ TMP451_REG_CONALERT);
+ chen = i2c_smbus_read_byte_data(client, TMP461_REG_CHEN);
+ dfc = i2c_smbus_read_byte_data(client, TMP461_REG_DFC);
+
+ if ((local_ext & 0x0F) == 0x00 &&
+ (conalert & 0xf1) == 0x01 &&
+ (chen & 0xfc) == 0x00 &&
+ (dfc & 0xfc) == 0x00) {
+ if (address == 0x4c && !(chen & 0x03))
+ name = "tmp451";
+ else if (address >= 0x48 && address <= 0x4f)
+ name = "tmp461";
+ }
}
if (!name) { /* identification failed */
@@ -1685,7 +1710,7 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
/* Check Temperature Range Select */
- if (data->kind == adt7461 || data->kind == tmp451) {
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP) {
if (config & 0x04)
data->flags |= LM90_FLAG_ADT7461_EXT;
}
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 9bf278cf0bd0..5bae6eedcaf1 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -99,7 +99,7 @@ static const u8 regs[t_num_regs] = {
struct lm92_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -126,7 +126,7 @@ static struct lm92_data *lm92_update_device(struct device *dev)
i2c_smbus_read_word_swapped(client, regs[i]);
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 78d6dfaf145b..dc67bf954b21 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -202,7 +202,7 @@ struct lm93_data {
/* client update function */
void (*update)(struct lm93_data *, struct i2c_client *);
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
/* register values, arranged by block read groups */
struct block1_t block1;
@@ -917,7 +917,7 @@ static struct lm93_data *lm93_update_device(struct device *dev)
data->update(data, client);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 00dbc170c8c6..8ea46ff20be5 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -78,7 +78,7 @@ struct lm95241_data {
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
unsigned long interval; /* in milli-seconds */
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
/* registers values */
u8 temp[ARRAY_SIZE(lm95241_reg_address)];
u8 status, config, model, trutherm;
@@ -118,7 +118,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
data->status = i2c_smbus_read_byte_data(client,
LM95241_REG_R_STATUS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -257,7 +257,7 @@ static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
else
data->config &= ~R2DF_MASK;
}
- data->valid = 0;
+ data->valid = false;
ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
data->config);
break;
@@ -273,7 +273,7 @@ static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
else
data->config &= ~R2DF_MASK;
}
- data->valid = 0;
+ data->valid = false;
ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
data->config);
break;
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 13b85367a21f..e3ac004c1ed1 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -77,7 +77,7 @@ static struct ltc4151_data *ltc4151_update_device(struct device *dev)
data->regs[i] = val;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 1d18c212054f..fa43d26ddd4f 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -64,7 +64,7 @@ static struct ltc4215_data *ltc4215_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index b81e9c3d297b..b91cc4fe84e5 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -73,13 +73,13 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
"Failed to read ADC value: error %d\n",
val);
ret = ERR_PTR(val);
- data->valid = 0;
+ data->valid = false;
goto abort;
}
data->regs[i] = val;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index ae3a6a7bdaa2..daa5d8af1e69 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -166,7 +166,7 @@ static struct max16065_data *max16065_update_device(struct device *dev)
= i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 8bd941cae4d1..eae9e68027bc 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -79,7 +79,7 @@ enum temp_index {
struct max1619_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -124,7 +124,7 @@ static struct max1619_data *max1619_update_device(struct device *dev)
data->alarms ^= 0x02;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 5c41c78f0458..78688e6cb87d 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -58,7 +58,7 @@ struct max1668_data {
enum chips type;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* 1x local and 4x remote */
@@ -120,7 +120,7 @@ static struct max1668_data *max1668_update_device(struct device *dev)
data->alarms |= val;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 613338cbcb17..4cf4fe6809a3 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -103,10 +103,16 @@ static int max31722_probe(struct spi_device *spi)
static int max31722_remove(struct spi_device *spi)
{
struct max31722_data *data = spi_get_drvdata(spi);
+ int ret;
hwmon_device_unregister(data->hwmon_dev);
- return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+ ret = max31722_set_mode(data, MAX31722_MODE_STANDBY);
+ if (ret)
+ /* There is nothing we can do about this ... */
+ dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
+
+ return 0;
}
static int __maybe_unused max31722_suspend(struct device *dev)
diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
new file mode 100644
index 000000000000..202b6438179d
--- /dev/null
+++ b/drivers/hwmon/max6620.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Maxim MAX6620
+ *
+ * Originally from L. Grunenberg.
+ * (C) 2012 by L. Grunenberg <contact@lgrunenberg.de>
+ *
+ * Copyright (c) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+ *
+ * based on code written by :
+ * 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * John Morris <john.morris@spirentcom.com>
+ * Copyright (c) 2003 Spirent Communications
+ * and Claus Gindhart <claus.gindhart@kontron.com>
+ *
+ * This module has only been tested with the MAX6620 chip.
+ *
+ * The datasheet was last seen at:
+ *
+ * http://pdfserv.maxim-ic.com/en/ds/MAX6620.pdf
+ *
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/*
+ * MAX 6620 registers
+ */
+
+#define MAX6620_REG_CONFIG 0x00
+#define MAX6620_REG_FAULT 0x01
+#define MAX6620_REG_CONF_FAN0 0x02
+#define MAX6620_REG_CONF_FAN1 0x03
+#define MAX6620_REG_CONF_FAN2 0x04
+#define MAX6620_REG_CONF_FAN3 0x05
+#define MAX6620_REG_DYN_FAN0 0x06
+#define MAX6620_REG_DYN_FAN1 0x07
+#define MAX6620_REG_DYN_FAN2 0x08
+#define MAX6620_REG_DYN_FAN3 0x09
+#define MAX6620_REG_TACH0 0x10
+#define MAX6620_REG_TACH1 0x12
+#define MAX6620_REG_TACH2 0x14
+#define MAX6620_REG_TACH3 0x16
+#define MAX6620_REG_VOLT0 0x18
+#define MAX6620_REG_VOLT1 0x1A
+#define MAX6620_REG_VOLT2 0x1C
+#define MAX6620_REG_VOLT3 0x1E
+#define MAX6620_REG_TAR0 0x20
+#define MAX6620_REG_TAR1 0x22
+#define MAX6620_REG_TAR2 0x24
+#define MAX6620_REG_TAR3 0x26
+#define MAX6620_REG_DAC0 0x28
+#define MAX6620_REG_DAC1 0x2A
+#define MAX6620_REG_DAC2 0x2C
+#define MAX6620_REG_DAC3 0x2E
+
+/*
+ * Config register bits
+ */
+
+#define MAX6620_CFG_RUN BIT(7)
+#define MAX6620_CFG_POR BIT(6)
+#define MAX6620_CFG_TIMEOUT BIT(5)
+#define MAX6620_CFG_FULLFAN BIT(4)
+#define MAX6620_CFG_OSC BIT(3)
+#define MAX6620_CFG_WD_MASK (BIT(2) | BIT(1))
+#define MAX6620_CFG_WD_2 BIT(1)
+#define MAX6620_CFG_WD_6 BIT(2)
+#define MAX6620_CFG_WD10 (BIT(2) | BIT(1))
+#define MAX6620_CFG_WD BIT(0)
+
+/*
+ * Failure status register bits
+ */
+
+#define MAX6620_FAIL_TACH0 BIT(4)
+#define MAX6620_FAIL_TACH1 BIT(5)
+#define MAX6620_FAIL_TACH2 BIT(6)
+#define MAX6620_FAIL_TACH3 BIT(7)
+#define MAX6620_FAIL_MASK0 BIT(0)
+#define MAX6620_FAIL_MASK1 BIT(1)
+#define MAX6620_FAIL_MASK2 BIT(2)
+#define MAX6620_FAIL_MASK3 BIT(3)
+
+#define MAX6620_CLOCK_FREQ 8192 /* Clock frequency in Hz */
+#define MAX6620_PULSE_PER_REV 2 /* Tachometer pulses per revolution */
+
+/* Minimum and maximum values of the FAN-RPM */
+#define FAN_RPM_MIN 240
+#define FAN_RPM_MAX 30000
+
+static const u8 config_reg[] = {
+ MAX6620_REG_CONF_FAN0,
+ MAX6620_REG_CONF_FAN1,
+ MAX6620_REG_CONF_FAN2,
+ MAX6620_REG_CONF_FAN3,
+};
+
+static const u8 dyn_reg[] = {
+ MAX6620_REG_DYN_FAN0,
+ MAX6620_REG_DYN_FAN1,
+ MAX6620_REG_DYN_FAN2,
+ MAX6620_REG_DYN_FAN3,
+};
+
+static const u8 tach_reg[] = {
+ MAX6620_REG_TACH0,
+ MAX6620_REG_TACH1,
+ MAX6620_REG_TACH2,
+ MAX6620_REG_TACH3,
+};
+
+static const u8 target_reg[] = {
+ MAX6620_REG_TAR0,
+ MAX6620_REG_TAR1,
+ MAX6620_REG_TAR2,
+ MAX6620_REG_TAR3,
+};
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct max6620_data {
+ struct i2c_client *client;
+ struct mutex update_lock;
+ bool valid; /* false until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+
+ /* register values */
+ u8 fancfg[4];
+ u8 fandyn[4];
+ u8 fault;
+ u16 tach[4];
+ u16 target[4];
+};
+
+static u8 max6620_fan_div_from_reg(u8 val)
+{
+ return BIT((val & 0xE0) >> 5);
+}
+
+static u16 max6620_fan_rpm_to_tach(u8 div, int rpm)
+{
+ return (60 * div * MAX6620_CLOCK_FREQ) / (rpm * MAX6620_PULSE_PER_REV);
+}
+
+static int max6620_fan_tach_to_rpm(u8 div, u16 tach)
+{
+ return (60 * div * MAX6620_CLOCK_FREQ) / (tach * MAX6620_PULSE_PER_REV);
+}
+
+static int max6620_update_device(struct device *dev)
+{
+ struct max6620_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int i;
+ int ret = 0;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ for (i = 0; i < 4; i++) {
+ ret = i2c_smbus_read_byte_data(client, config_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->fancfg[i] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, dyn_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->fandyn[i] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, tach_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->tach[i] = (ret << 3) & 0x7f8;
+ ret = i2c_smbus_read_byte_data(client, tach_reg[i] + 1);
+ if (ret < 0)
+ goto error;
+ data->tach[i] |= (ret >> 5) & 0x7;
+
+ ret = i2c_smbus_read_byte_data(client, target_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->target[i] = (ret << 3) & 0x7f8;
+ ret = i2c_smbus_read_byte_data(client, target_reg[i] + 1);
+ if (ret < 0)
+ goto error;
+ data->target[i] |= (ret >> 5) & 0x7;
+ }
+
+ /*
+ * Alarms are cleared on read in case the condition that
+ * caused the alarm is removed. Keep the value latched here
+ * for providing the register through different alarm files.
+ */
+ ret = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT);
+ if (ret < 0)
+ goto error;
+ data->fault |= (ret >> 4) & (ret & 0x0F);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static umode_t
+max6620_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_alarm:
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_div:
+ case hwmon_fan_target:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+max6620_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct max6620_data *data;
+ struct i2c_client *client;
+ int ret;
+ u8 div;
+ u8 val1;
+ u8 val2;
+
+ ret = max6620_update_device(dev);
+ if (ret < 0)
+ return ret;
+ data = dev_get_drvdata(dev);
+ client = data->client;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_alarm:
+ mutex_lock(&data->update_lock);
+ *val = !!(data->fault & BIT(channel));
+
+ /* Setting TACH count to re-enable fan fault detection */
+ if (*val == 1) {
+ val1 = (data->target[channel] >> 3) & 0xff;
+ val2 = (data->target[channel] << 5) & 0xe0;
+ ret = i2c_smbus_write_byte_data(client,
+ target_reg[channel], val1);
+ if (ret < 0) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+ ret = i2c_smbus_write_byte_data(client,
+ target_reg[channel] + 1, val2);
+ if (ret < 0) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+
+ data->fault &= ~BIT(channel);
+ }
+ mutex_unlock(&data->update_lock);
+
+ break;
+ case hwmon_fan_div:
+ *val = max6620_fan_div_from_reg(data->fandyn[channel]);
+ break;
+ case hwmon_fan_input:
+ if (data->tach[channel] == 0) {
+ *val = 0;
+ } else {
+ div = max6620_fan_div_from_reg(data->fandyn[channel]);
+ *val = max6620_fan_tach_to_rpm(div, data->tach[channel]);
+ }
+ break;
+ case hwmon_fan_target:
+ if (data->target[channel] == 0) {
+ *val = 0;
+ } else {
+ div = max6620_fan_div_from_reg(data->fandyn[channel]);
+ *val = max6620_fan_tach_to_rpm(div, data->target[channel]);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+max6620_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct max6620_data *data;
+ struct i2c_client *client;
+ int ret;
+ u8 div;
+ u16 tach;
+ u8 val1;
+ u8 val2;
+
+ ret = max6620_update_device(dev);
+ if (ret < 0)
+ return ret;
+ data = dev_get_drvdata(dev);
+ client = data->client;
+ mutex_lock(&data->update_lock);
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_div:
+ switch (val) {
+ case 1:
+ div = 0;
+ break;
+ case 2:
+ div = 1;
+ break;
+ case 4:
+ div = 2;
+ break;
+ case 8:
+ div = 3;
+ break;
+ case 16:
+ div = 4;
+ break;
+ case 32:
+ div = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ data->fandyn[channel] &= 0x1F;
+ data->fandyn[channel] |= div << 5;
+ ret = i2c_smbus_write_byte_data(client, dyn_reg[channel],
+ data->fandyn[channel]);
+ break;
+ case hwmon_fan_target:
+ val = clamp_val(val, FAN_RPM_MIN, FAN_RPM_MAX);
+ div = max6620_fan_div_from_reg(data->fandyn[channel]);
+ tach = max6620_fan_rpm_to_tach(div, val);
+ val1 = (tach >> 3) & 0xff;
+ val2 = (tach << 5) & 0xe0;
+ ret = i2c_smbus_write_byte_data(client, target_reg[channel], val1);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(client, target_reg[channel] + 1, val2);
+ if (ret < 0)
+ break;
+
+ /* Setting TACH count re-enables fan fault detection */
+ data->fault &= ~BIT(channel);
+
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static const struct hwmon_channel_info *max6620_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops max6620_hwmon_ops = {
+ .read = max6620_read,
+ .write = max6620_write,
+ .is_visible = max6620_is_visible,
+};
+
+static const struct hwmon_chip_info max6620_chip_info = {
+ .ops = &max6620_hwmon_ops,
+ .info = max6620_info,
+};
+
+static int max6620_init_client(struct max6620_data *data)
+{
+ struct i2c_client *client = data->client;
+ int config;
+ int err;
+ int i;
+ int reg;
+
+ config = i2c_smbus_read_byte_data(client, MAX6620_REG_CONFIG);
+ if (config < 0) {
+ dev_err(&client->dev, "Error reading config, aborting.\n");
+ return config;
+ }
+
+ /*
+ * Set bit 4, disable other fans from going full speed on a fail
+ * failure.
+ */
+ err = i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config | 0x10);
+ if (err < 0) {
+ dev_err(&client->dev, "Config write error, aborting.\n");
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg = i2c_smbus_read_byte_data(client, config_reg[i]);
+ if (reg < 0)
+ return reg;
+ data->fancfg[i] = reg;
+
+ /* Enable RPM mode */
+ data->fancfg[i] |= 0xa8;
+ err = i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]);
+ if (err < 0)
+ return err;
+
+ /* 2 counts (001) and Rate change 100 (0.125 secs) */
+ data->fandyn[i] = 0x30;
+ err = i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int max6620_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max6620_data *data;
+ struct device *hwmon_dev;
+ int err;
+
+ data = devm_kzalloc(dev, sizeof(struct max6620_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->update_lock);
+
+ err = max6620_init_client(data);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max6620_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max6620_id[] = {
+ { "max6620", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6620_id);
+
+static struct i2c_driver max6620_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6620",
+ },
+ .probe_new = max6620_probe,
+ .id_table = max6620_id,
+};
+
+module_i2c_driver(max6620_driver);
+
+MODULE_AUTHOR("Lucas Grunenberg");
+MODULE_DESCRIPTION("MAX6620 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index b71899c641fa..ccc0f047bd44 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -69,7 +69,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
struct max6639_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values sampled regularly */
@@ -141,7 +141,7 @@ static struct max6639_data *max6639_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 23d93142b0b3..699d265aae2e 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -181,7 +181,7 @@ static struct max6642_data *max6642_update_device(struct device *dev)
MAX6642_REG_R_STATUS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index 89fe7b9fe26b..4a8becdb0d58 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -12,7 +12,9 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
-#define MLXREG_FAN_MAX_TACHO 12
+#define MLXREG_FAN_MAX_TACHO 14
+#define MLXREG_FAN_MAX_PWM 4
+#define MLXREG_FAN_PWM_NOT_CONNECTED 0xff
#define MLXREG_FAN_MAX_STATE 10
#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
@@ -61,6 +63,8 @@
MLXREG_FAN_MAX_DUTY, \
MLXREG_FAN_MAX_STATE))
+struct mlxreg_fan;
+
/*
* struct mlxreg_fan_tacho - tachometer data (internal use):
*
@@ -79,12 +83,18 @@ struct mlxreg_fan_tacho {
/*
* struct mlxreg_fan_pwm - PWM data (internal use):
*
+ * @fan: private data;
* @connected: indicates if PWM is connected;
* @reg: register offset;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
*/
struct mlxreg_fan_pwm {
+ struct mlxreg_fan *fan;
bool connected;
u32 reg;
+ u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ struct thermal_cooling_device *cdev;
};
/*
@@ -97,20 +107,16 @@ struct mlxreg_fan_pwm {
* @tachos_per_drwr - number of tachometers per drawer;
* @samples: minimum allowed samples per pulse;
* @divider: divider value for tachometer RPM calculation;
- * @cooling: cooling device levels;
- * @cdev: cooling device;
*/
struct mlxreg_fan {
struct device *dev;
void *regmap;
struct mlxreg_core_platform_data *pdata;
struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
- struct mlxreg_fan_pwm pwm;
+ struct mlxreg_fan_pwm pwm[MLXREG_FAN_MAX_PWM];
int tachos_per_drwr;
int samples;
int divider;
- u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
- struct thermal_cooling_device *cdev;
};
static int
@@ -119,6 +125,7 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
{
struct mlxreg_fan *fan = dev_get_drvdata(dev);
struct mlxreg_fan_tacho *tacho;
+ struct mlxreg_fan_pwm *pwm;
u32 regval;
int err;
@@ -169,9 +176,10 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
case hwmon_pwm:
+ pwm = &fan->pwm[channel];
switch (attr) {
case hwmon_pwm_input:
- err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ err = regmap_read(fan->regmap, pwm->reg, &regval);
if (err)
return err;
@@ -195,6 +203,7 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
struct mlxreg_fan *fan = dev_get_drvdata(dev);
+ struct mlxreg_fan_pwm *pwm;
switch (type) {
case hwmon_pwm:
@@ -203,7 +212,8 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val < MLXREG_FAN_MIN_DUTY ||
val > MLXREG_FAN_MAX_DUTY)
return -EINVAL;
- return regmap_write(fan->regmap, fan->pwm.reg, val);
+ pwm = &fan->pwm[channel];
+ return regmap_write(fan->regmap, pwm->reg, val);
default:
return -EOPNOTSUPP;
}
@@ -235,7 +245,7 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
break;
case hwmon_pwm:
- if (!(((struct mlxreg_fan *)data)->pwm.connected))
+ if (!(((struct mlxreg_fan *)data)->pwm[channel].connected))
return 0;
switch (attr) {
@@ -253,6 +263,13 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
+static char *mlxreg_fan_name[] = {
+ "mlxreg_fan",
+ "mlxreg_fan1",
+ "mlxreg_fan2",
+ "mlxreg_fan3",
+};
+
static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_FAULT,
@@ -266,8 +283,13 @@ static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT),
HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
HWMON_PWM_INPUT),
NULL
};
@@ -294,11 +316,12 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- struct mlxreg_fan *fan = cdev->devdata;
+ struct mlxreg_fan_pwm *pwm = cdev->devdata;
+ struct mlxreg_fan *fan = pwm->fan;
u32 regval;
int err;
- err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ err = regmap_read(fan->regmap, pwm->reg, &regval);
if (err) {
dev_err(fan->dev, "Failed to query PWM duty\n");
return err;
@@ -313,7 +336,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct mlxreg_fan *fan = cdev->devdata;
+ struct mlxreg_fan_pwm *pwm = cdev->devdata;
+ struct mlxreg_fan *fan = pwm->fan;
unsigned long cur_state;
int i, config = 0;
u32 regval;
@@ -337,11 +361,11 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
config = 1;
state -= MLXREG_FAN_MAX_STATE;
for (i = 0; i < state; i++)
- fan->cooling_levels[i] = state;
+ pwm->cooling_levels[i] = state;
for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
- fan->cooling_levels[i] = i;
+ pwm->cooling_levels[i] = i;
- err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ err = regmap_read(fan->regmap, pwm->reg, &regval);
if (err) {
dev_err(fan->dev, "Failed to query PWM duty\n");
return err;
@@ -358,8 +382,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return -EINVAL;
/* Normalize the state to the valid speed range. */
- state = fan->cooling_levels[state];
- err = regmap_write(fan->regmap, fan->pwm.reg,
+ state = pwm->cooling_levels[state];
+ err = regmap_write(fan->regmap, pwm->reg,
MLXREG_FAN_PWM_STATE2DUTY(state));
if (err) {
dev_err(fan->dev, "Failed to write PWM duty\n");
@@ -390,6 +414,22 @@ static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
return !!(regval & data->bit);
}
+static int mlxreg_pwm_connect_verify(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query pwm register 0x%08x\n",
+ data->reg);
+ return err;
+ }
+
+ return regval != MLXREG_FAN_PWM_NOT_CONNECTED;
+}
+
static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
struct mlxreg_core_data *data)
{
@@ -418,8 +458,8 @@ static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
static int mlxreg_fan_config(struct mlxreg_fan *fan,
struct mlxreg_core_platform_data *pdata)
{
+ int tacho_num = 0, tacho_avail = 0, pwm_num = 0, i;
struct mlxreg_core_data *data = pdata->data;
- int tacho_num = 0, tacho_avail = 0, i;
bool configured = false;
int err;
@@ -449,13 +489,24 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
fan->tacho[tacho_num++].connected = true;
tacho_avail++;
} else if (strnstr(data->label, "pwm", sizeof(data->label))) {
- if (fan->pwm.connected) {
- dev_err(fan->dev, "duplicate pwm entry: %s\n",
+ if (pwm_num == MLXREG_FAN_MAX_TACHO) {
+ dev_err(fan->dev, "too many pwm entries: %s\n",
data->label);
return -EINVAL;
}
- fan->pwm.reg = data->reg;
- fan->pwm.connected = true;
+
+ /* Validate if more then one PWM is connected. */
+ if (pwm_num) {
+ err = mlxreg_pwm_connect_verify(fan, data);
+ if (err < 0)
+ return err;
+ else if (!err)
+ continue;
+ }
+
+ fan->pwm[pwm_num].reg = data->reg;
+ fan->pwm[pwm_num].connected = true;
+ pwm_num++;
} else if (strnstr(data->label, "conf", sizeof(data->label))) {
if (configured) {
dev_err(fan->dev, "duplicate conf entry: %s\n",
@@ -508,11 +559,32 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
fan->tachos_per_drwr = tacho_avail / drwr_avail;
}
- /* Init cooling levels per PWM state. */
- for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
- fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
- for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
- fan->cooling_levels[i] = i;
+ return 0;
+}
+
+static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
+{
+ int i, j;
+
+ for (i = 0; i < MLXREG_FAN_MAX_PWM; i++) {
+ struct mlxreg_fan_pwm *pwm = &fan->pwm[i];
+
+ if (!pwm->connected)
+ continue;
+ pwm->fan = fan;
+ pwm->cdev = devm_thermal_of_cooling_device_register(dev, NULL, mlxreg_fan_name[i],
+ pwm, &mlxreg_fan_cooling_ops);
+ if (IS_ERR(pwm->cdev)) {
+ dev_err(dev, "Failed to register cooling device\n");
+ return PTR_ERR(pwm->cdev);
+ }
+
+ /* Init cooling levels per PWM state. */
+ for (j = 0; j < MLXREG_FAN_SPEED_MIN_LEVEL; j++)
+ pwm->cooling_levels[j] = MLXREG_FAN_SPEED_MIN_LEVEL;
+ for (j = MLXREG_FAN_SPEED_MIN_LEVEL; j <= MLXREG_FAN_MAX_STATE; j++)
+ pwm->cooling_levels[j] = j;
+ }
return 0;
}
@@ -551,16 +623,10 @@ static int mlxreg_fan_probe(struct platform_device *pdev)
return PTR_ERR(hwm);
}
- if (IS_REACHABLE(CONFIG_THERMAL)) {
- fan->cdev = devm_thermal_of_cooling_device_register(dev,
- NULL, "mlxreg_fan", fan, &mlxreg_fan_cooling_ops);
- if (IS_ERR(fan->cdev)) {
- dev_err(dev, "Failed to register cooling device\n");
- return PTR_ERR(fan->cdev);
- }
- }
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ err = mlxreg_fan_cooling_config(dev, fan);
- return 0;
+ return err;
}
static struct platform_driver mlxreg_fan_driver = {
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 35f8635dc7f3..6a9f420e7d32 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -174,6 +174,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MITAC 0xa0e
#define NCT6683_CUSTOMER_ID_MSI 0x201
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
+#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1221,6 +1222,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK2:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 5bd15622a85f..93dca471972e 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -55,6 +55,7 @@
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/nospec.h>
+#include <linux/wmi.h>
#include "lm75.h"
#define USE_ALTERNATE
@@ -132,31 +133,135 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_ID_MASK 0xFFF8
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
+enum sensor_access { access_direct, access_asuswmi };
-static inline void
-superio_outb(int ioreg, int reg, int val)
+struct nct6775_sio_data {
+ int sioreg;
+ int ld;
+ enum kinds kind;
+ enum sensor_access access;
+
+ /* superio_() callbacks */
+ void (*sio_outb)(struct nct6775_sio_data *sio_data, int reg, int val);
+ int (*sio_inb)(struct nct6775_sio_data *sio_data, int reg);
+ void (*sio_select)(struct nct6775_sio_data *sio_data, int ld);
+ int (*sio_enter)(struct nct6775_sio_data *sio_data);
+ void (*sio_exit)(struct nct6775_sio_data *sio_data);
+};
+
+#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
+#define ASUSWMI_METHODID_RSIO 0x5253494F
+#define ASUSWMI_METHODID_WSIO 0x5753494F
+#define ASUSWMI_METHODID_RHWM 0x5248574D
+#define ASUSWMI_METHODID_WHWM 0x5748574D
+#define ASUSWMI_UNSUPPORTED_METHOD 0xFFFFFFFE
+
+static int nct6775_asuswmi_evaluate_method(u32 method_id, u8 bank, u8 reg, u8 val, u32 *retval)
+{
+#if IS_ENABLED(CONFIG_ACPI_WMI)
+ u32 args = bank | (reg << 8) | (val << 16);
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ u32 tmp = ASUSWMI_UNSUPPORTED_METHOD;
+
+ status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0,
+ method_id, &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = obj->integer.value;
+
+ if (retval)
+ *retval = tmp;
+
+ kfree(obj);
+
+ if (tmp == ASUSWMI_UNSUPPORTED_METHOD)
+ return -ENODEV;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static inline int nct6775_asuswmi_write(u8 bank, u8 reg, u8 val)
+{
+ return nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_WHWM, bank,
+ reg, val, NULL);
+}
+
+static inline int nct6775_asuswmi_read(u8 bank, u8 reg, u8 *val)
+{
+ u32 ret, tmp = 0;
+
+ ret = nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_RHWM, bank,
+ reg, 0, &tmp);
+ *val = tmp;
+ return ret;
+}
+
+static int superio_wmi_inb(struct nct6775_sio_data *sio_data, int reg)
+{
+ int tmp = 0;
+
+ nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_RSIO, sio_data->ld,
+ reg, 0, &tmp);
+ return tmp;
+}
+
+static void superio_wmi_outb(struct nct6775_sio_data *sio_data, int reg, int val)
{
+ nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_WSIO, sio_data->ld,
+ reg, val, NULL);
+}
+
+static void superio_wmi_select(struct nct6775_sio_data *sio_data, int ld)
+{
+ sio_data->ld = ld;
+}
+
+static int superio_wmi_enter(struct nct6775_sio_data *sio_data)
+{
+ return 0;
+}
+
+static void superio_wmi_exit(struct nct6775_sio_data *sio_data)
+{
+}
+
+static void superio_outb(struct nct6775_sio_data *sio_data, int reg, int val)
+{
+ int ioreg = sio_data->sioreg;
+
outb(reg, ioreg);
outb(val, ioreg + 1);
}
-static inline int
-superio_inb(int ioreg, int reg)
+static int superio_inb(struct nct6775_sio_data *sio_data, int reg)
{
+ int ioreg = sio_data->sioreg;
+
outb(reg, ioreg);
return inb(ioreg + 1);
}
-static inline void
-superio_select(int ioreg, int ld)
+static void superio_select(struct nct6775_sio_data *sio_data, int ld)
{
+ int ioreg = sio_data->sioreg;
+
outb(SIO_REG_LDSEL, ioreg);
outb(ld, ioreg + 1);
}
-static inline int
-superio_enter(int ioreg)
+static int superio_enter(struct nct6775_sio_data *sio_data)
{
+ int ioreg = sio_data->sioreg;
+
/*
* Try to reserve <ioreg> and <ioreg + 1> for exclusive access.
*/
@@ -169,9 +274,10 @@ superio_enter(int ioreg)
return 0;
}
-static inline void
-superio_exit(int ioreg)
+static void superio_exit(struct nct6775_sio_data *sio_data)
{
+ int ioreg = sio_data->sioreg;
+
outb(0xaa, ioreg);
outb(0x02, ioreg);
outb(0x02, ioreg + 1);
@@ -190,6 +296,7 @@ superio_exit(int ioreg)
#define NCT6775_REG_BANK 0x4E
#define NCT6775_REG_CONFIG 0x40
+#define NCT6775_PORT_CHIPID 0x58
/*
* Not currently used:
@@ -1215,11 +1322,10 @@ struct nct6775_data {
u8 fandiv1;
u8 fandiv2;
u8 sio_reg_enable;
-};
-struct nct6775_sio_data {
- int sioreg;
- enum kinds kind;
+ /* nct6775_*() callbacks */
+ u16 (*read_value)(struct nct6775_data *data, u16 reg);
+ int (*write_value)(struct nct6775_data *data, u16 reg, u16 value);
};
struct sensor_device_template {
@@ -1407,6 +1513,54 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
return false;
}
+static inline void nct6775_wmi_set_bank(struct nct6775_data *data, u16 reg)
+{
+ u8 bank = reg >> 8;
+
+ data->bank = bank;
+}
+
+static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
+{
+ int res, err, word_sized = is_word_sized(data, reg);
+ u8 tmp = 0;
+
+ nct6775_wmi_set_bank(data, reg);
+
+ err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+ if (err)
+ return 0;
+
+ res = tmp;
+ if (word_sized) {
+ err = nct6775_asuswmi_read(data->bank, (reg & 0xff) + 1, &tmp);
+ if (err)
+ return 0;
+
+ res = (res << 8) + tmp;
+ }
+ return res;
+}
+
+static int nct6775_wmi_write_value(struct nct6775_data *data, u16 reg, u16 value)
+{
+ int res, word_sized = is_word_sized(data, reg);
+
+ nct6775_wmi_set_bank(data, reg);
+
+ if (word_sized) {
+ res = nct6775_asuswmi_write(data->bank, reg & 0xff, value >> 8);
+ if (res)
+ return res;
+
+ res = nct6775_asuswmi_write(data->bank, (reg & 0xff) + 1, value);
+ } else {
+ res = nct6775_asuswmi_write(data->bank, reg & 0xff, value);
+ }
+
+ return res;
+}
+
/*
* On older chips, only registers 0x50-0x5f are banked.
* On more recent chips, all registers are banked.
@@ -1459,7 +1613,7 @@ static u16 nct6775_read_temp(struct nct6775_data *data, u16 reg)
{
u16 res;
- res = nct6775_read_value(data, reg);
+ res = data->read_value(data, reg);
if (!is_word_sized(data, reg))
res <<= 8;
@@ -1470,7 +1624,7 @@ static int nct6775_write_temp(struct nct6775_data *data, u16 reg, u16 value)
{
if (!is_word_sized(data, reg))
value >>= 8;
- return nct6775_write_value(data, reg, value);
+ return data->write_value(data, reg, value);
}
/* This function assumes that the caller holds data->update_lock */
@@ -1480,24 +1634,24 @@ static void nct6775_write_fan_div(struct nct6775_data *data, int nr)
switch (nr) {
case 0:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV1) & 0x70)
| (data->fan_div[0] & 0x7);
- nct6775_write_value(data, NCT6775_REG_FANDIV1, reg);
+ data->write_value(data, NCT6775_REG_FANDIV1, reg);
break;
case 1:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV1) & 0x7)
| ((data->fan_div[1] << 4) & 0x70);
- nct6775_write_value(data, NCT6775_REG_FANDIV1, reg);
+ data->write_value(data, NCT6775_REG_FANDIV1, reg);
break;
case 2:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV2) & 0x70)
| (data->fan_div[2] & 0x7);
- nct6775_write_value(data, NCT6775_REG_FANDIV2, reg);
+ data->write_value(data, NCT6775_REG_FANDIV2, reg);
break;
case 3:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV2) & 0x7)
| ((data->fan_div[3] << 4) & 0x70);
- nct6775_write_value(data, NCT6775_REG_FANDIV2, reg);
+ data->write_value(data, NCT6775_REG_FANDIV2, reg);
break;
}
}
@@ -1512,10 +1666,10 @@ static void nct6775_update_fan_div(struct nct6775_data *data)
{
u8 i;
- i = nct6775_read_value(data, NCT6775_REG_FANDIV1);
+ i = data->read_value(data, NCT6775_REG_FANDIV1);
data->fan_div[0] = i & 0x7;
data->fan_div[1] = (i & 0x70) >> 4;
- i = nct6775_read_value(data, NCT6775_REG_FANDIV2);
+ i = data->read_value(data, NCT6775_REG_FANDIV2);
data->fan_div[2] = i & 0x7;
if (data->has_fan & BIT(3))
data->fan_div[3] = (i & 0x70) >> 4;
@@ -1563,11 +1717,11 @@ static void nct6775_init_fan_common(struct device *dev,
*/
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
if (data->has_fan_min & BIT(i)) {
- reg = nct6775_read_value(data, data->REG_FAN_MIN[i]);
+ reg = data->read_value(data, data->REG_FAN_MIN[i]);
if (!reg)
- nct6775_write_value(data, data->REG_FAN_MIN[i],
- data->has_fan_div ? 0xff
- : 0xff1f);
+ data->write_value(data, data->REG_FAN_MIN[i],
+ data->has_fan_div ? 0xff
+ : 0xff1f);
}
}
}
@@ -1611,8 +1765,8 @@ static void nct6775_select_fan_div(struct device *dev,
}
if (fan_min != data->fan_min[nr]) {
data->fan_min[nr] = fan_min;
- nct6775_write_value(data, data->REG_FAN_MIN[nr],
- fan_min);
+ data->write_value(data, data->REG_FAN_MIN[nr],
+ fan_min);
}
}
data->fan_div[nr] = fan_div;
@@ -1632,16 +1786,15 @@ static void nct6775_update_pwm(struct device *dev)
continue;
duty_is_dc = data->REG_PWM_MODE[i] &&
- (nct6775_read_value(data, data->REG_PWM_MODE[i])
+ (data->read_value(data, data->REG_PWM_MODE[i])
& data->PWM_MODE_MASK[i]);
data->pwm_mode[i] = !duty_is_dc;
- fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
+ fanmodecfg = data->read_value(data, data->REG_FAN_MODE[i]);
for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
if (data->REG_PWM[j] && data->REG_PWM[j][i]) {
- data->pwm[j][i]
- = nct6775_read_value(data,
- data->REG_PWM[j][i]);
+ data->pwm[j][i] = data->read_value(data,
+ data->REG_PWM[j][i]);
}
}
@@ -1656,17 +1809,17 @@ static void nct6775_update_pwm(struct device *dev)
u8 t = fanmodecfg & 0x0f;
if (data->REG_TOLERANCE_H) {
- t |= (nct6775_read_value(data,
+ t |= (data->read_value(data,
data->REG_TOLERANCE_H[i]) & 0x70) >> 1;
}
data->target_speed_tolerance[i] = t;
}
data->temp_tolerance[1][i] =
- nct6775_read_value(data,
- data->REG_CRITICAL_TEMP_TOLERANCE[i]);
+ data->read_value(data,
+ data->REG_CRITICAL_TEMP_TOLERANCE[i]);
- reg = nct6775_read_value(data, data->REG_TEMP_SEL[i]);
+ reg = data->read_value(data, data->REG_TEMP_SEL[i]);
data->pwm_temp_sel[i] = reg & 0x1f;
/* If fan can stop, report floor as 0 */
if (reg & 0x80)
@@ -1675,7 +1828,7 @@ static void nct6775_update_pwm(struct device *dev)
if (!data->REG_WEIGHT_TEMP_SEL[i])
continue;
- reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
+ reg = data->read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
if (!(reg & 0x80))
@@ -1683,9 +1836,8 @@ static void nct6775_update_pwm(struct device *dev)
/* Weight temp data */
for (j = 0; j < ARRAY_SIZE(data->weight_temp); j++) {
- data->weight_temp[j][i]
- = nct6775_read_value(data,
- data->REG_WEIGHT_TEMP[j][i]);
+ data->weight_temp[j][i] = data->read_value(data,
+ data->REG_WEIGHT_TEMP[j][i]);
}
}
}
@@ -1703,10 +1855,10 @@ static void nct6775_update_pwm_limits(struct device *dev)
for (j = 0; j < ARRAY_SIZE(data->fan_time); j++) {
data->fan_time[j][i] =
- nct6775_read_value(data, data->REG_FAN_TIME[j][i]);
+ data->read_value(data, data->REG_FAN_TIME[j][i]);
}
- reg_t = nct6775_read_value(data, data->REG_TARGET[i]);
+ reg_t = data->read_value(data, data->REG_TARGET[i]);
/* Update only in matching mode or if never updated */
if (!data->target_temp[i] ||
data->pwm_enable[i] == thermal_cruise)
@@ -1714,7 +1866,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
if (!data->target_speed[i] ||
data->pwm_enable[i] == speed_cruise) {
if (data->REG_TOLERANCE_H) {
- reg_t |= (nct6775_read_value(data,
+ reg_t |= (data->read_value(data,
data->REG_TOLERANCE_H[i]) & 0x0f) << 8;
}
data->target_speed[i] = reg_t;
@@ -1722,21 +1874,21 @@ static void nct6775_update_pwm_limits(struct device *dev)
for (j = 0; j < data->auto_pwm_num; j++) {
data->auto_pwm[i][j] =
- nct6775_read_value(data,
- NCT6775_AUTO_PWM(data, i, j));
+ data->read_value(data,
+ NCT6775_AUTO_PWM(data, i, j));
data->auto_temp[i][j] =
- nct6775_read_value(data,
- NCT6775_AUTO_TEMP(data, i, j));
+ data->read_value(data,
+ NCT6775_AUTO_TEMP(data, i, j));
}
/* critical auto_pwm temperature data */
data->auto_temp[i][data->auto_pwm_num] =
- nct6775_read_value(data, data->REG_CRITICAL_TEMP[i]);
+ data->read_value(data, data->REG_CRITICAL_TEMP[i]);
switch (data->kind) {
case nct6775:
- reg = nct6775_read_value(data,
- NCT6775_REG_CRITICAL_ENAB[i]);
+ reg = data->read_value(data,
+ NCT6775_REG_CRITICAL_ENAB[i]);
data->auto_pwm[i][data->auto_pwm_num] =
(reg & 0x02) ? 0xff : 0x00;
break;
@@ -1753,10 +1905,10 @@ static void nct6775_update_pwm_limits(struct device *dev)
case nct6796:
case nct6797:
case nct6798:
- reg = nct6775_read_value(data,
+ reg = data->read_value(data,
data->REG_CRITICAL_PWM_ENABLE[i]);
if (reg & data->CRITICAL_PWM_ENABLE_MASK)
- reg = nct6775_read_value(data,
+ reg = data->read_value(data,
data->REG_CRITICAL_PWM[i]);
else
reg = 0xff;
@@ -1783,11 +1935,11 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!(data->have_in & BIT(i)))
continue;
- data->in[i][0] = nct6775_read_value(data,
- data->REG_VIN[i]);
- data->in[i][1] = nct6775_read_value(data,
+ data->in[i][0] = data->read_value(data,
+ data->REG_VIN[i]);
+ data->in[i][1] = data->read_value(data,
data->REG_IN_MINMAX[0][i]);
- data->in[i][2] = nct6775_read_value(data,
+ data->in[i][2] = data->read_value(data,
data->REG_IN_MINMAX[1][i]);
}
@@ -1798,18 +1950,18 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!(data->has_fan & BIT(i)))
continue;
- reg = nct6775_read_value(data, data->REG_FAN[i]);
+ reg = data->read_value(data, data->REG_FAN[i]);
data->rpm[i] = data->fan_from_reg(reg,
data->fan_div[i]);
if (data->has_fan_min & BIT(i))
- data->fan_min[i] = nct6775_read_value(data,
+ data->fan_min[i] = data->read_value(data,
data->REG_FAN_MIN[i]);
if (data->REG_FAN_PULSES[i]) {
data->fan_pulses[i] =
- (nct6775_read_value(data,
- data->REG_FAN_PULSES[i])
+ (data->read_value(data,
+ data->REG_FAN_PULSES[i])
>> data->FAN_PULSE_SHIFT[i]) & 0x03;
}
@@ -1825,15 +1977,14 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
continue;
for (j = 0; j < ARRAY_SIZE(data->reg_temp); j++) {
if (data->reg_temp[j][i])
- data->temp[j][i]
- = nct6775_read_temp(data,
- data->reg_temp[j][i]);
+ data->temp[j][i] = nct6775_read_temp(data,
+ data->reg_temp[j][i]);
}
if (i >= NUM_TEMP_FIXED ||
!(data->have_temp_fixed & BIT(i)))
continue;
- data->temp_offset[i]
- = nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
+ data->temp_offset[i] = data->read_value(data,
+ data->REG_TEMP_OFFSET[i]);
}
data->alarms = 0;
@@ -1842,7 +1993,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!data->REG_ALARM[i])
continue;
- alarm = nct6775_read_value(data, data->REG_ALARM[i]);
+ alarm = data->read_value(data, data->REG_ALARM[i]);
data->alarms |= ((u64)alarm) << (i << 3);
}
@@ -1852,7 +2003,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!data->REG_BEEP[i])
continue;
- beep = nct6775_read_value(data, data->REG_BEEP[i]);
+ beep = data->read_value(data, data->REG_BEEP[i]);
data->beeps |= ((u64)beep) << (i << 3);
}
@@ -1894,8 +2045,8 @@ store_in_reg(struct device *dev, struct device_attribute *attr, const char *buf,
return err;
mutex_lock(&data->update_lock);
data->in[nr][index] = in_to_reg(val, nr);
- nct6775_write_value(data, data->REG_IN_MINMAX[index - 1][nr],
- data->in[nr][index]);
+ data->write_value(data, data->REG_IN_MINMAX[index - 1][nr],
+ data->in[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1919,8 +2070,8 @@ static int find_temp_source(struct nct6775_data *data, int index, int count)
for (nr = 0; nr < count; nr++) {
int src;
- src = nct6775_read_value(data,
- data->REG_TEMP_SOURCE[nr]) & 0x1f;
+ src = data->read_value(data,
+ data->REG_TEMP_SOURCE[nr]) & 0x1f;
if (src == source)
return nr;
}
@@ -1981,8 +2132,8 @@ store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
data->beeps |= (1ULL << nr);
else
data->beeps &= ~(1ULL << nr);
- nct6775_write_value(data, data->REG_BEEP[regindex],
- (data->beeps >> (regindex << 3)) & 0xff);
+ data->write_value(data, data->REG_BEEP[regindex],
+ (data->beeps >> (regindex << 3)) & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
@@ -2037,8 +2188,8 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
data->beeps |= (1ULL << bit);
else
data->beeps &= ~(1ULL << bit);
- nct6775_write_value(data, data->REG_BEEP[regindex],
- (data->beeps >> (regindex << 3)) & 0xff);
+ data->write_value(data, data->REG_BEEP[regindex],
+ (data->beeps >> (regindex << 3)) & 0xff);
mutex_unlock(&data->update_lock);
return count;
@@ -2205,7 +2356,7 @@ write_div:
}
write_min:
- nct6775_write_value(data, data->REG_FAN_MIN[nr], data->fan_min[nr]);
+ data->write_value(data, data->REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -2241,10 +2392,10 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->fan_pulses[nr] = val & 3;
- reg = nct6775_read_value(data, data->REG_FAN_PULSES[nr]);
+ reg = data->read_value(data, data->REG_FAN_PULSES[nr]);
reg &= ~(0x03 << data->FAN_PULSE_SHIFT[nr]);
reg |= (val & 3) << data->FAN_PULSE_SHIFT[nr];
- nct6775_write_value(data, data->REG_FAN_PULSES[nr], reg);
+ data->write_value(data, data->REG_FAN_PULSES[nr], reg);
mutex_unlock(&data->update_lock);
return count;
@@ -2378,7 +2529,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->temp_offset[nr] = val;
- nct6775_write_value(data, data->REG_TEMP_OFFSET[nr], val);
+ data->write_value(data, data->REG_TEMP_OFFSET[nr], val);
mutex_unlock(&data->update_lock);
return count;
@@ -2417,8 +2568,8 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
data->temp_type[nr] = val;
vbit = 0x02 << nr;
dbit = data->DIODE_MASK << nr;
- vbat = nct6775_read_value(data, data->REG_VBAT) & ~vbit;
- diode = nct6775_read_value(data, data->REG_DIODE) & ~dbit;
+ vbat = data->read_value(data, data->REG_VBAT) & ~vbit;
+ diode = data->read_value(data, data->REG_DIODE) & ~dbit;
switch (val) {
case 1: /* CPU diode (diode, current mode) */
vbat |= vbit;
@@ -2430,8 +2581,8 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
case 4: /* thermistor */
break;
}
- nct6775_write_value(data, data->REG_VBAT, vbat);
- nct6775_write_value(data, data->REG_DIODE, diode);
+ data->write_value(data, data->REG_VBAT, vbat);
+ data->write_value(data, data->REG_DIODE, diode);
mutex_unlock(&data->update_lock);
return count;
@@ -2555,11 +2706,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->pwm_mode[nr] = val;
- reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
+ reg = data->read_value(data, data->REG_PWM_MODE[nr]);
reg &= ~data->PWM_MODE_MASK[nr];
if (!val)
reg |= data->PWM_MODE_MASK[nr];
- nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
+ data->write_value(data, data->REG_PWM_MODE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -2578,7 +2729,7 @@ show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
* Otherwise, show the configured value.
*/
if (index == 0 && data->pwm_enable[nr] > manual)
- pwm = nct6775_read_value(data, data->REG_PWM_READ[nr]);
+ pwm = data->read_value(data, data->REG_PWM_READ[nr]);
else
pwm = data->pwm[index][nr];
@@ -2607,13 +2758,13 @@ store_pwm(struct device *dev, struct device_attribute *attr, const char *buf,
mutex_lock(&data->update_lock);
data->pwm[index][nr] = val;
- nct6775_write_value(data, data->REG_PWM[index][nr], val);
+ data->write_value(data, data->REG_PWM[index][nr], val);
if (index == 2) { /* floor: disable if val == 0 */
- reg = nct6775_read_value(data, data->REG_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_TEMP_SEL[nr]);
reg &= 0x7f;
if (val)
reg |= 0x80;
- nct6775_write_value(data, data->REG_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_TEMP_SEL[nr], reg);
}
mutex_unlock(&data->update_lock);
return count;
@@ -2652,29 +2803,29 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case manual:
break;
case speed_cruise:
- reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
+ reg = data->read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
(data->target_speed_tolerance[nr] & data->tolerance_mask);
- nct6775_write_value(data, data->REG_FAN_MODE[nr], reg);
- nct6775_write_value(data, data->REG_TARGET[nr],
+ data->write_value(data, data->REG_FAN_MODE[nr], reg);
+ data->write_value(data, data->REG_TARGET[nr],
data->target_speed[nr] & 0xff);
if (data->REG_TOLERANCE_H) {
reg = (data->target_speed[nr] >> 8) & 0x0f;
reg |= (data->target_speed_tolerance[nr] & 0x38) << 1;
- nct6775_write_value(data,
- data->REG_TOLERANCE_H[nr],
- reg);
+ data->write_value(data,
+ data->REG_TOLERANCE_H[nr],
+ reg);
}
break;
case thermal_cruise:
- nct6775_write_value(data, data->REG_TARGET[nr],
- data->target_temp[nr]);
+ data->write_value(data, data->REG_TARGET[nr],
+ data->target_temp[nr]);
fallthrough;
default:
- reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
+ reg = data->read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
data->temp_tolerance[0][nr];
- nct6775_write_value(data, data->REG_FAN_MODE[nr], reg);
+ data->write_value(data, data->REG_FAN_MODE[nr], reg);
break;
}
}
@@ -2722,13 +2873,13 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
* turn off pwm control: select manual mode, set pwm to maximum
*/
data->pwm[0][nr] = 255;
- nct6775_write_value(data, data->REG_PWM[0][nr], 255);
+ data->write_value(data, data->REG_PWM[0][nr], 255);
}
pwm_update_registers(data, nr);
- reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
+ reg = data->read_value(data, data->REG_FAN_MODE[nr]);
reg &= 0x0f;
reg |= pwm_enable_to_reg(val) << 4;
- nct6775_write_value(data, data->REG_FAN_MODE[nr], reg);
+ data->write_value(data, data->REG_FAN_MODE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -2781,10 +2932,10 @@ store_pwm_temp_sel(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
src = data->temp_src[val - 1];
data->pwm_temp_sel[nr] = src;
- reg = nct6775_read_value(data, data->REG_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_TEMP_SEL[nr]);
reg &= 0xe0;
reg |= src;
- nct6775_write_value(data, data->REG_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_TEMP_SEL[nr], reg);
mutex_unlock(&data->update_lock);
return count;
@@ -2826,15 +2977,15 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
if (val) {
src = data->temp_src[val - 1];
data->pwm_weight_temp_sel[nr] = src;
- reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
reg &= 0xe0;
reg |= (src | 0x80);
- nct6775_write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
} else {
data->pwm_weight_temp_sel[nr] = 0;
- reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
reg &= 0x7f;
- nct6775_write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
}
mutex_unlock(&data->update_lock);
@@ -2946,9 +3097,9 @@ store_temp_tolerance(struct device *dev, struct device_attribute *attr,
if (index)
pwm_update_registers(data, nr);
else
- nct6775_write_value(data,
- data->REG_CRITICAL_TEMP_TOLERANCE[nr],
- val);
+ data->write_value(data,
+ data->REG_CRITICAL_TEMP_TOLERANCE[nr],
+ val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -3071,7 +3222,7 @@ store_weight_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->weight_temp[index][nr] = val;
- nct6775_write_value(data, data->REG_WEIGHT_TEMP[index][nr], val);
+ data->write_value(data, data->REG_WEIGHT_TEMP[index][nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -3120,7 +3271,7 @@ store_fan_time(struct device *dev, struct device_attribute *attr,
val = step_time_to_reg(val, data->pwm_mode[nr]);
mutex_lock(&data->update_lock);
data->fan_time[index][nr] = val;
- nct6775_write_value(data, data->REG_FAN_TIME[index][nr], val);
+ data->write_value(data, data->REG_FAN_TIME[index][nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -3162,21 +3313,21 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->auto_pwm[nr][point] = val;
if (point < data->auto_pwm_num) {
- nct6775_write_value(data,
+ data->write_value(data,
NCT6775_AUTO_PWM(data, nr, point),
data->auto_pwm[nr][point]);
} else {
switch (data->kind) {
case nct6775:
/* disable if needed (pwm == 0) */
- reg = nct6775_read_value(data,
- NCT6775_REG_CRITICAL_ENAB[nr]);
+ reg = data->read_value(data,
+ NCT6775_REG_CRITICAL_ENAB[nr]);
if (val)
reg |= 0x02;
else
reg &= ~0x02;
- nct6775_write_value(data, NCT6775_REG_CRITICAL_ENAB[nr],
- reg);
+ data->write_value(data, NCT6775_REG_CRITICAL_ENAB[nr],
+ reg);
break;
case nct6776:
break; /* always enabled, nothing to do */
@@ -3190,17 +3341,17 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
case nct6796:
case nct6797:
case nct6798:
- nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
+ data->write_value(data, data->REG_CRITICAL_PWM[nr],
val);
- reg = nct6775_read_value(data,
+ reg = data->read_value(data,
data->REG_CRITICAL_PWM_ENABLE[nr]);
if (val == 255)
reg &= ~data->CRITICAL_PWM_ENABLE_MASK;
else
reg |= data->CRITICAL_PWM_ENABLE_MASK;
- nct6775_write_value(data,
- data->REG_CRITICAL_PWM_ENABLE[nr],
- reg);
+ data->write_value(data,
+ data->REG_CRITICAL_PWM_ENABLE[nr],
+ reg);
break;
}
}
@@ -3243,11 +3394,11 @@ store_auto_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->auto_temp[nr][point] = DIV_ROUND_CLOSEST(val, 1000);
if (point < data->auto_pwm_num) {
- nct6775_write_value(data,
+ data->write_value(data,
NCT6775_AUTO_TEMP(data, nr, point),
data->auto_temp[nr][point]);
} else {
- nct6775_write_value(data, data->REG_CRITICAL_TEMP[nr],
+ data->write_value(data, data->REG_CRITICAL_TEMP[nr],
data->auto_temp[nr][point]);
}
mutex_unlock(&data->update_lock);
@@ -3410,6 +3561,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6775_data *data = dev_get_drvdata(dev);
+ struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
unsigned long val;
u8 reg;
@@ -3425,19 +3577,19 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
* The CR registers are the same for all chips, and not all chips
* support clearing the caseopen status through "regular" registers.
*/
- ret = superio_enter(data->sioreg);
+ ret = sio_data->sio_enter(sio_data);
if (ret) {
count = ret;
goto error;
}
- superio_select(data->sioreg, NCT6775_LD_ACPI);
- reg = superio_inb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
+ sio_data->sio_select(sio_data, NCT6775_LD_ACPI);
+ reg = sio_data->sio_inb(sio_data, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
reg |= NCT6775_CR_CASEOPEN_CLR_MASK[nr];
- superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+ sio_data->sio_outb(sio_data, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
reg &= ~NCT6775_CR_CASEOPEN_CLR_MASK[nr];
- superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
- superio_exit(data->sioreg);
+ sio_data->sio_outb(sio_data, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+ sio_data->sio_exit(sio_data);
data->valid = false; /* Force cache refresh */
error:
@@ -3506,9 +3658,9 @@ static inline void nct6775_init_device(struct nct6775_data *data)
/* Start monitoring if needed */
if (data->REG_CONFIG) {
- tmp = nct6775_read_value(data, data->REG_CONFIG);
+ tmp = data->read_value(data, data->REG_CONFIG);
if (!(tmp & 0x01))
- nct6775_write_value(data, data->REG_CONFIG, tmp | 0x01);
+ data->write_value(data, data->REG_CONFIG, tmp | 0x01);
}
/* Enable temperature sensors if needed */
@@ -3517,18 +3669,18 @@ static inline void nct6775_init_device(struct nct6775_data *data)
continue;
if (!data->reg_temp_config[i])
continue;
- tmp = nct6775_read_value(data, data->reg_temp_config[i]);
+ tmp = data->read_value(data, data->reg_temp_config[i]);
if (tmp & 0x01)
- nct6775_write_value(data, data->reg_temp_config[i],
+ data->write_value(data, data->reg_temp_config[i],
tmp & 0xfe);
}
/* Enable VBAT monitoring if needed */
- tmp = nct6775_read_value(data, data->REG_VBAT);
+ tmp = data->read_value(data, data->REG_VBAT);
if (!(tmp & 0x01))
- nct6775_write_value(data, data->REG_VBAT, tmp | 0x01);
+ data->write_value(data, data->REG_VBAT, tmp | 0x01);
- diode = nct6775_read_value(data, data->REG_DIODE);
+ diode = data->read_value(data, data->REG_DIODE);
for (i = 0; i < data->temp_fixed_num; i++) {
if (!(data->have_temp_fixed & BIT(i)))
@@ -3542,29 +3694,28 @@ static inline void nct6775_init_device(struct nct6775_data *data)
}
static void
-nct6775_check_fan_inputs(struct nct6775_data *data)
+nct6775_check_fan_inputs(struct nct6775_data *data, struct nct6775_sio_data *sio_data)
{
bool fan3pin = false, fan4pin = false, fan4min = false;
bool fan5pin = false, fan6pin = false, fan7pin = false;
bool pwm3pin = false, pwm4pin = false, pwm5pin = false;
bool pwm6pin = false, pwm7pin = false;
- int sioreg = data->sioreg;
/* Store SIO_REG_ENABLE for use during resume */
- superio_select(sioreg, NCT6775_LD_HWM);
- data->sio_reg_enable = superio_inb(sioreg, SIO_REG_ENABLE);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ data->sio_reg_enable = sio_data->sio_inb(sio_data, SIO_REG_ENABLE);
/* fan4 and fan5 share some pins with the GPIO and serial flash */
if (data->kind == nct6775) {
- int cr2c = superio_inb(sioreg, 0x2c);
+ int cr2c = sio_data->sio_inb(sio_data, 0x2c);
fan3pin = cr2c & BIT(6);
pwm3pin = cr2c & BIT(7);
/* On NCT6775, fan4 shares pins with the fdc interface */
- fan4pin = !(superio_inb(sioreg, 0x2A) & 0x80);
+ fan4pin = !(sio_data->sio_inb(sio_data, 0x2A) & 0x80);
} else if (data->kind == nct6776) {
- bool gpok = superio_inb(sioreg, 0x27) & 0x80;
+ bool gpok = sio_data->sio_inb(sio_data, 0x27) & 0x80;
const char *board_vendor, *board_name;
board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -3580,7 +3731,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
if (!strcmp(board_name, "Z77 Pro4-M")) {
if ((data->sio_reg_enable & 0xe0) != 0xe0) {
data->sio_reg_enable |= 0xe0;
- superio_outb(sioreg, SIO_REG_ENABLE,
+ sio_data->sio_outb(sio_data, SIO_REG_ENABLE,
data->sio_reg_enable);
}
}
@@ -3589,32 +3740,32 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
if (data->sio_reg_enable & 0x80)
fan3pin = gpok;
else
- fan3pin = !(superio_inb(sioreg, 0x24) & 0x40);
+ fan3pin = !(sio_data->sio_inb(sio_data, 0x24) & 0x40);
if (data->sio_reg_enable & 0x40)
fan4pin = gpok;
else
- fan4pin = superio_inb(sioreg, 0x1C) & 0x01;
+ fan4pin = sio_data->sio_inb(sio_data, 0x1C) & 0x01;
if (data->sio_reg_enable & 0x20)
fan5pin = gpok;
else
- fan5pin = superio_inb(sioreg, 0x1C) & 0x02;
+ fan5pin = sio_data->sio_inb(sio_data, 0x1C) & 0x02;
fan4min = fan4pin;
pwm3pin = fan3pin;
} else if (data->kind == nct6106) {
- int cr24 = superio_inb(sioreg, 0x24);
+ int cr24 = sio_data->sio_inb(sio_data, 0x24);
fan3pin = !(cr24 & 0x80);
pwm3pin = cr24 & 0x08;
} else if (data->kind == nct6116) {
- int cr1a = superio_inb(sioreg, 0x1a);
- int cr1b = superio_inb(sioreg, 0x1b);
- int cr24 = superio_inb(sioreg, 0x24);
- int cr2a = superio_inb(sioreg, 0x2a);
- int cr2b = superio_inb(sioreg, 0x2b);
- int cr2f = superio_inb(sioreg, 0x2f);
+ int cr1a = sio_data->sio_inb(sio_data, 0x1a);
+ int cr1b = sio_data->sio_inb(sio_data, 0x1b);
+ int cr24 = sio_data->sio_inb(sio_data, 0x24);
+ int cr2a = sio_data->sio_inb(sio_data, 0x2a);
+ int cr2b = sio_data->sio_inb(sio_data, 0x2b);
+ int cr2f = sio_data->sio_inb(sio_data, 0x2f);
fan3pin = !(cr2b & 0x10);
fan4pin = (cr2b & 0x80) || // pin 1(2)
@@ -3630,24 +3781,24 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
* NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D,
* NCT6797D, NCT6798D
*/
- int cr1a = superio_inb(sioreg, 0x1a);
- int cr1b = superio_inb(sioreg, 0x1b);
- int cr1c = superio_inb(sioreg, 0x1c);
- int cr1d = superio_inb(sioreg, 0x1d);
- int cr2a = superio_inb(sioreg, 0x2a);
- int cr2b = superio_inb(sioreg, 0x2b);
- int cr2d = superio_inb(sioreg, 0x2d);
- int cr2f = superio_inb(sioreg, 0x2f);
+ int cr1a = sio_data->sio_inb(sio_data, 0x1a);
+ int cr1b = sio_data->sio_inb(sio_data, 0x1b);
+ int cr1c = sio_data->sio_inb(sio_data, 0x1c);
+ int cr1d = sio_data->sio_inb(sio_data, 0x1d);
+ int cr2a = sio_data->sio_inb(sio_data, 0x2a);
+ int cr2b = sio_data->sio_inb(sio_data, 0x2b);
+ int cr2d = sio_data->sio_inb(sio_data, 0x2d);
+ int cr2f = sio_data->sio_inb(sio_data, 0x2f);
bool dsw_en = cr2f & BIT(3);
bool ddr4_en = cr2f & BIT(4);
int cre0;
int creb;
int cred;
- superio_select(sioreg, NCT6775_LD_12);
- cre0 = superio_inb(sioreg, 0xe0);
- creb = superio_inb(sioreg, 0xeb);
- cred = superio_inb(sioreg, 0xed);
+ sio_data->sio_select(sio_data, NCT6775_LD_12);
+ cre0 = sio_data->sio_inb(sio_data, 0xe0);
+ creb = sio_data->sio_inb(sio_data, 0xeb);
+ cred = sio_data->sio_inb(sio_data, 0xed);
fan3pin = !(cr1c & BIT(5));
fan4pin = !(cr1c & BIT(6));
@@ -3774,7 +3925,7 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
if (!regp[i])
continue;
- src = nct6775_read_value(data, regp[i]);
+ src = data->read_value(data, regp[i]);
src &= 0x1f;
if (!src || (*mask & BIT(src)))
continue;
@@ -3782,7 +3933,7 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
continue;
index = __ffs(*available);
- nct6775_write_value(data, data->REG_TEMP_SOURCE[index], src);
+ data->write_value(data, data->REG_TEMP_SOURCE[index], src);
*available &= ~BIT(index);
*mask |= BIT(src);
}
@@ -3805,10 +3956,12 @@ static int nct6775_probe(struct platform_device *pdev)
struct device *hwmon_dev;
int num_attr_groups = 0;
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
- DRVNAME))
- return -EBUSY;
+ if (sio_data->access == access_direct) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
+ DRVNAME))
+ return -EBUSY;
+ }
data = devm_kzalloc(&pdev->dev, sizeof(struct nct6775_data),
GFP_KERNEL);
@@ -3817,7 +3970,16 @@ static int nct6775_probe(struct platform_device *pdev)
data->kind = sio_data->kind;
data->sioreg = sio_data->sioreg;
- data->addr = res->start;
+
+ if (sio_data->access == access_direct) {
+ data->addr = res->start;
+ data->read_value = nct6775_read_value;
+ data->write_value = nct6775_write_value;
+ } else {
+ data->read_value = nct6775_wmi_read_value;
+ data->write_value = nct6775_wmi_write_value;
+ }
+
mutex_init(&data->update_lock);
data->name = nct6775_device_names[data->kind];
data->bank = 0xff; /* Force initial bank selection */
@@ -4337,7 +4499,7 @@ static int nct6775_probe(struct platform_device *pdev)
if (reg_temp[i] == 0)
continue;
- src = nct6775_read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
+ src = data->read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
if (!src || (mask & BIT(src)))
available |= BIT(i);
@@ -4357,7 +4519,7 @@ static int nct6775_probe(struct platform_device *pdev)
if (reg_temp[i] == 0)
continue;
- src = nct6775_read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
+ src = data->read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
if (!src || (mask & BIT(src)))
continue;
@@ -4417,7 +4579,7 @@ static int nct6775_probe(struct platform_device *pdev)
if (reg_temp_mon[i] == 0)
continue;
- src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
+ src = data->read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
if (!src)
continue;
@@ -4502,11 +4664,11 @@ static int nct6775_probe(struct platform_device *pdev)
/* Initialize the chip */
nct6775_init_device(data);
- err = superio_enter(sio_data->sioreg);
+ err = sio_data->sio_enter(sio_data);
if (err)
return err;
- cr2a = superio_inb(sio_data->sioreg, 0x2a);
+ cr2a = sio_data->sio_inb(sio_data, 0x2a);
switch (data->kind) {
case nct6775:
data->have_vid = (cr2a & 0x40);
@@ -4532,17 +4694,17 @@ static int nct6775_probe(struct platform_device *pdev)
* We can get the VID input values directly at logical device D 0xe3.
*/
if (data->have_vid) {
- superio_select(sio_data->sioreg, NCT6775_LD_VID);
- data->vid = superio_inb(sio_data->sioreg, 0xe3);
+ sio_data->sio_select(sio_data, NCT6775_LD_VID);
+ data->vid = sio_data->sio_inb(sio_data, 0xe3);
data->vrm = vid_which_vrm();
}
if (fan_debounce) {
u8 tmp;
- superio_select(sio_data->sioreg, NCT6775_LD_HWM);
- tmp = superio_inb(sio_data->sioreg,
- NCT6775_REG_CR_FAN_DEBOUNCE);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ tmp = sio_data->sio_inb(sio_data,
+ NCT6775_REG_CR_FAN_DEBOUNCE);
switch (data->kind) {
case nct6106:
case nct6116:
@@ -4565,15 +4727,15 @@ static int nct6775_probe(struct platform_device *pdev)
tmp |= 0x7e;
break;
}
- superio_outb(sio_data->sioreg, NCT6775_REG_CR_FAN_DEBOUNCE,
+ sio_data->sio_outb(sio_data, NCT6775_REG_CR_FAN_DEBOUNCE,
tmp);
dev_info(&pdev->dev, "Enabled fan debounce for chip %s\n",
data->name);
}
- nct6775_check_fan_inputs(data);
+ nct6775_check_fan_inputs(data, sio_data);
- superio_exit(sio_data->sioreg);
+ sio_data->sio_exit(sio_data);
/* Read fan clock dividers immediately */
nct6775_init_fan_common(dev, data);
@@ -4613,15 +4775,15 @@ static int nct6775_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static void nct6791_enable_io_mapping(int sioaddr)
+static void nct6791_enable_io_mapping(struct nct6775_sio_data *sio_data)
{
int val;
- val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+ val = sio_data->sio_inb(sio_data, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
if (val & 0x10) {
pr_info("Enabling hardware monitor logical device mappings.\n");
- superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
- val & ~0x10);
+ sio_data->sio_outb(sio_data, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+ val & ~0x10);
}
}
@@ -4630,10 +4792,10 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
struct nct6775_data *data = nct6775_update_device(dev);
mutex_lock(&data->update_lock);
- data->vbat = nct6775_read_value(data, data->REG_VBAT);
+ data->vbat = data->read_value(data, data->REG_VBAT);
if (data->kind == nct6775) {
- data->fandiv1 = nct6775_read_value(data, NCT6775_REG_FANDIV1);
- data->fandiv2 = nct6775_read_value(data, NCT6775_REG_FANDIV2);
+ data->fandiv1 = data->read_value(data, NCT6775_REG_FANDIV1);
+ data->fandiv2 = data->read_value(data, NCT6775_REG_FANDIV2);
}
mutex_unlock(&data->update_lock);
@@ -4643,47 +4805,47 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
static int __maybe_unused nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- int sioreg = data->sioreg;
+ struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
int i, j, err = 0;
u8 reg;
mutex_lock(&data->update_lock);
data->bank = 0xff; /* Force initial bank selection */
- err = superio_enter(sioreg);
+ err = sio_data->sio_enter(sio_data);
if (err)
goto abort;
- superio_select(sioreg, NCT6775_LD_HWM);
- reg = superio_inb(sioreg, SIO_REG_ENABLE);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ reg = sio_data->sio_inb(sio_data, SIO_REG_ENABLE);
if (reg != data->sio_reg_enable)
- superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable);
+ sio_data->sio_outb(sio_data, SIO_REG_ENABLE, data->sio_reg_enable);
if (data->kind == nct6791 || data->kind == nct6792 ||
data->kind == nct6793 || data->kind == nct6795 ||
data->kind == nct6796 || data->kind == nct6797 ||
data->kind == nct6798)
- nct6791_enable_io_mapping(sioreg);
+ nct6791_enable_io_mapping(sio_data);
- superio_exit(sioreg);
+ sio_data->sio_exit(sio_data);
/* Restore limits */
for (i = 0; i < data->in_num; i++) {
if (!(data->have_in & BIT(i)))
continue;
- nct6775_write_value(data, data->REG_IN_MINMAX[0][i],
- data->in[i][1]);
- nct6775_write_value(data, data->REG_IN_MINMAX[1][i],
- data->in[i][2]);
+ data->write_value(data, data->REG_IN_MINMAX[0][i],
+ data->in[i][1]);
+ data->write_value(data, data->REG_IN_MINMAX[1][i],
+ data->in[i][2]);
}
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
if (!(data->has_fan_min & BIT(i)))
continue;
- nct6775_write_value(data, data->REG_FAN_MIN[i],
- data->fan_min[i]);
+ data->write_value(data, data->REG_FAN_MIN[i],
+ data->fan_min[i]);
}
for (i = 0; i < NUM_TEMP; i++) {
@@ -4697,10 +4859,10 @@ static int __maybe_unused nct6775_resume(struct device *dev)
}
/* Restore other settings */
- nct6775_write_value(data, data->REG_VBAT, data->vbat);
+ data->write_value(data, data->REG_VBAT, data->vbat);
if (data->kind == nct6775) {
- nct6775_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
- nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
+ data->write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
+ data->write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
}
abort:
@@ -4728,12 +4890,15 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
int err;
int addr;
- err = superio_enter(sioaddr);
+ sio_data->access = access_direct;
+ sio_data->sioreg = sioaddr;
+
+ err = sio_data->sio_enter(sio_data);
if (err)
return err;
- val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8) |
- superio_inb(sioaddr, SIO_REG_DEVID + 1);
+ val = (sio_data->sio_inb(sio_data, SIO_REG_DEVID) << 8) |
+ sio_data->sio_inb(sio_data, SIO_REG_DEVID + 1);
if (force_id && val != 0xffff)
val = force_id;
@@ -4777,38 +4942,37 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
- superio_exit(sioaddr);
+ sio_data->sio_exit(sio_data);
return -ENODEV;
}
/* We have a known chip, find the HWM I/O address */
- superio_select(sioaddr, NCT6775_LD_HWM);
- val = (superio_inb(sioaddr, SIO_REG_ADDR) << 8)
- | superio_inb(sioaddr, SIO_REG_ADDR + 1);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ val = (sio_data->sio_inb(sio_data, SIO_REG_ADDR) << 8)
+ | sio_data->sio_inb(sio_data, SIO_REG_ADDR + 1);
addr = val & IOREGION_ALIGNMENT;
if (addr == 0) {
pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
- superio_exit(sioaddr);
+ sio_data->sio_exit(sio_data);
return -ENODEV;
}
/* Activate logical device if needed */
- val = superio_inb(sioaddr, SIO_REG_ENABLE);
+ val = sio_data->sio_inb(sio_data, SIO_REG_ENABLE);
if (!(val & 0x01)) {
pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
- superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
+ sio_data->sio_outb(sio_data, SIO_REG_ENABLE, val | 0x01);
}
if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
sio_data->kind == nct6798)
- nct6791_enable_io_mapping(sioaddr);
+ nct6791_enable_io_mapping(sio_data);
- superio_exit(sioaddr);
+ sio_data->sio_exit(sio_data);
pr_info("Found %s or compatible chip at %#x:%#x\n",
nct6775_sio_names[sio_data->kind], sioaddr, addr);
- sio_data->sioreg = sioaddr;
return addr;
}
@@ -4821,6 +4985,34 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
*/
static struct platform_device *pdev[2];
+static const char * const asus_wmi_boards[] = {
+ "ProArt X570-CREATOR WIFI",
+ "Pro WS X570-ACE",
+ "PRIME B360-PLUS",
+ "PRIME B460-PLUS",
+ "PRIME X570-PRO",
+ "ROG CROSSHAIR VIII DARK HERO",
+ "ROG CROSSHAIR VIII FORMULA",
+ "ROG CROSSHAIR VIII HERO",
+ "ROG CROSSHAIR VIII IMPACT",
+ "ROG STRIX B550-E GAMING",
+ "ROG STRIX B550-F GAMING",
+ "ROG STRIX B550-F GAMING (WI-FI)",
+ "ROG STRIX B550-I GAMING",
+ "ROG STRIX X570-F GAMING",
+ "ROG STRIX Z390-E GAMING",
+ "ROG STRIX Z490-I GAMING",
+ "TUF GAMING B550M-PLUS",
+ "TUF GAMING B550M-PLUS (WI-FI)",
+ "TUF GAMING B550-PLUS",
+ "TUF GAMING B550-PRO",
+ "TUF GAMING X570-PLUS",
+ "TUF GAMING X570-PLUS (WI-FI)",
+ "TUF GAMING X570-PRO (WI-FI)",
+ "TUF GAMING Z490-PLUS",
+ "TUF GAMING Z490-PLUS (WI-FI)",
+};
+
static int __init sensors_nct6775_init(void)
{
int i, err;
@@ -4829,11 +5021,32 @@ static int __init sensors_nct6775_init(void)
struct resource res;
struct nct6775_sio_data sio_data;
int sioaddr[2] = { 0x2e, 0x4e };
+ enum sensor_access access = access_direct;
+ const char *board_vendor, *board_name;
+ u8 tmp;
err = platform_driver_register(&nct6775_driver);
if (err)
return err;
+ board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (board_name && board_vendor &&
+ !strcmp(board_vendor, "ASUSTeK COMPUTER INC.")) {
+ err = match_string(asus_wmi_boards, ARRAY_SIZE(asus_wmi_boards),
+ board_name);
+ if (err >= 0) {
+ /* if reading chip id via WMI succeeds, use WMI */
+ if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp)) {
+ pr_info("Using Asus WMI to access %#x chip.\n", tmp);
+ access = access_asuswmi;
+ } else {
+ pr_err("Can't read ChipID by Asus WMI.\n");
+ }
+ }
+ }
+
/*
* initialize sio_data->kind and sio_data->sioreg.
*
@@ -4842,12 +5055,28 @@ static int __init sensors_nct6775_init(void)
* nct6775 hardware monitor, and call probe()
*/
for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+ sio_data.sio_outb = superio_outb;
+ sio_data.sio_inb = superio_inb;
+ sio_data.sio_select = superio_select;
+ sio_data.sio_enter = superio_enter;
+ sio_data.sio_exit = superio_exit;
+
address = nct6775_find(sioaddr[i], &sio_data);
if (address <= 0)
continue;
found = true;
+ sio_data.access = access;
+
+ if (access == access_asuswmi) {
+ sio_data.sio_outb = superio_wmi_outb;
+ sio_data.sio_inb = superio_wmi_inb;
+ sio_data.sio_select = superio_wmi_select;
+ sio_data.sio_enter = superio_wmi_enter;
+ sio_data.sio_exit = superio_wmi_exit;
+ }
+
pdev[i] = platform_device_alloc(DRVNAME, address);
if (!pdev[i]) {
err = -ENOMEM;
@@ -4859,23 +5088,25 @@ static int __init sensors_nct6775_init(void)
if (err)
goto exit_device_put;
- memset(&res, 0, sizeof(res));
- res.name = DRVNAME;
- res.start = address + IOREGION_OFFSET;
- res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
- res.flags = IORESOURCE_IO;
+ if (sio_data.access == access_direct) {
+ memset(&res, 0, sizeof(res));
+ res.name = DRVNAME;
+ res.start = address + IOREGION_OFFSET;
+ res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
+ res.flags = IORESOURCE_IO;
+
+ err = acpi_check_resource_conflict(&res);
+ if (err) {
+ platform_device_put(pdev[i]);
+ pdev[i] = NULL;
+ continue;
+ }
- err = acpi_check_resource_conflict(&res);
- if (err) {
- platform_device_put(pdev[i]);
- pdev[i] = NULL;
- continue;
+ err = platform_device_add_resources(pdev[i], &res, 1);
+ if (err)
+ goto exit_device_put;
}
- err = platform_device_add_resources(pdev[i], &res, 1);
- if (err)
- goto exit_device_put;
-
/* platform_device_add calls probe() */
err = platform_device_add(pdev[i]);
if (err)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 604af2f6103a..d1eeef02b6dc 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -52,6 +52,23 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
#define REG_VERSION_ID 0xff
/*
+ * Resistance temperature detector (RTD) modes according to 7.2.32 Mode
+ * Selection Register
+ */
+#define RTD_MODE_CURRENT 0x1
+#define RTD_MODE_THERMISTOR 0x2
+#define RTD_MODE_VOLTAGE 0x3
+
+#define MODE_RTD_MASK 0x3
+#define MODE_LTD_EN 0x40
+
+/*
+ * Bit offset for sensors modes in REG_MODE.
+ * Valid for index 0..2, indicating RTD1..3.
+ */
+#define MODE_BIT_OFFSET_RTD(index) ((index) * 2)
+
+/*
* Data structures and manipulation thereof
*/
@@ -1038,7 +1055,114 @@ static const struct regmap_config nct7802_regmap_config = {
.volatile_reg = nct7802_regmap_is_volatile,
};
-static int nct7802_init_chip(struct nct7802_data *data)
+static int nct7802_get_channel_config(struct device *dev,
+ struct device_node *node, u8 *mode_mask,
+ u8 *mode_val)
+{
+ u32 reg;
+ const char *type_str, *md_str;
+ u8 md;
+
+ if (!node->name || of_node_cmp(node->name, "channel"))
+ return 0;
+
+ if (of_property_read_u32(node, "reg", &reg)) {
+ dev_err(dev, "Could not read reg value for '%s'\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (reg > 3) {
+ dev_err(dev, "Invalid reg (%u) in '%s'\n", reg,
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (reg == 0) {
+ if (!of_device_is_available(node))
+ *mode_val &= ~MODE_LTD_EN;
+ else
+ *mode_val |= MODE_LTD_EN;
+ *mode_mask |= MODE_LTD_EN;
+ return 0;
+ }
+
+ /* At this point we have reg >= 1 && reg <= 3 */
+
+ if (!of_device_is_available(node)) {
+ *mode_val &= ~(MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1));
+ *mode_mask |= MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1);
+ return 0;
+ }
+
+ if (of_property_read_string(node, "sensor-type", &type_str)) {
+ dev_err(dev, "No type for '%s'\n", node->full_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(type_str, "voltage")) {
+ *mode_val |= (RTD_MODE_VOLTAGE & MODE_RTD_MASK)
+ << MODE_BIT_OFFSET_RTD(reg - 1);
+ *mode_mask |= MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1);
+ return 0;
+ }
+
+ if (strcmp(type_str, "temperature")) {
+ dev_err(dev, "Invalid type '%s' for '%s'\n", type_str,
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (reg == 3) {
+ /* RTD3 only supports thermistor mode */
+ md = RTD_MODE_THERMISTOR;
+ } else {
+ if (of_property_read_string(node, "temperature-mode",
+ &md_str)) {
+ dev_err(dev, "No mode for '%s'\n", node->full_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(md_str, "thermal-diode"))
+ md = RTD_MODE_CURRENT;
+ else if (!strcmp(md_str, "thermistor"))
+ md = RTD_MODE_THERMISTOR;
+ else {
+ dev_err(dev, "Invalid mode '%s' for '%s'\n", md_str,
+ node->full_name);
+ return -EINVAL;
+ }
+ }
+
+ *mode_val |= (md & MODE_RTD_MASK) << MODE_BIT_OFFSET_RTD(reg - 1);
+ *mode_mask |= MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1);
+
+ return 0;
+}
+
+static int nct7802_configure_channels(struct device *dev,
+ struct nct7802_data *data)
+{
+ /* Enable local temperature sensor by default */
+ u8 mode_mask = MODE_LTD_EN, mode_val = MODE_LTD_EN;
+ struct device_node *node;
+ int err;
+
+ if (dev->of_node) {
+ for_each_child_of_node(dev->of_node, node) {
+ err = nct7802_get_channel_config(dev, node, &mode_mask,
+ &mode_val);
+ if (err) {
+ of_node_put(node);
+ return err;
+ }
+ }
+ }
+
+ return regmap_update_bits(data->regmap, REG_MODE, mode_mask, mode_val);
+}
+
+static int nct7802_init_chip(struct device *dev, struct nct7802_data *data)
{
int err;
@@ -1047,8 +1171,7 @@ static int nct7802_init_chip(struct nct7802_data *data)
if (err)
return err;
- /* Enable local temperature sensor */
- err = regmap_update_bits(data->regmap, REG_MODE, 0x40, 0x40);
+ err = nct7802_configure_channels(dev, data);
if (err)
return err;
@@ -1074,7 +1197,7 @@ static int nct7802_probe(struct i2c_client *client)
mutex_init(&data->access_lock);
mutex_init(&data->in_alarm_lock);
- ret = nct7802_init_chip(data);
+ ret = nct7802_init_chip(dev, data);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index ae664613289c..0cb4a0a6cbc1 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -132,22 +132,20 @@ struct extended_sensor {
static int occ_poll(struct occ *occ)
{
int rc;
- u16 checksum = occ->poll_cmd_data + occ->seq_no + 1;
- u8 cmd[8];
+ u8 cmd[7];
struct occ_poll_response_header *header;
/* big endian */
- cmd[0] = occ->seq_no++; /* sequence number */
+ cmd[0] = 0; /* sequence number */
cmd[1] = 0; /* cmd type */
cmd[2] = 0; /* data length msb */
cmd[3] = 1; /* data length lsb */
cmd[4] = occ->poll_cmd_data; /* data */
- cmd[5] = checksum >> 8; /* checksum msb */
- cmd[6] = checksum & 0xFF; /* checksum lsb */
- cmd[7] = 0;
+ cmd[5] = 0; /* checksum msb */
+ cmd[6] = 0; /* checksum lsb */
/* mutex should already be locked if necessary */
- rc = occ->send_cmd(occ, cmd);
+ rc = occ->send_cmd(occ, cmd, sizeof(cmd));
if (rc) {
occ->last_error = rc;
if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
@@ -184,25 +182,23 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap)
{
int rc;
u8 cmd[8];
- u16 checksum = 0x24;
__be16 user_power_cap_be = cpu_to_be16(user_power_cap);
- cmd[0] = 0;
- cmd[1] = 0x22;
- cmd[2] = 0;
- cmd[3] = 2;
+ cmd[0] = 0; /* sequence number */
+ cmd[1] = 0x22; /* cmd type */
+ cmd[2] = 0; /* data length msb */
+ cmd[3] = 2; /* data length lsb */
memcpy(&cmd[4], &user_power_cap_be, 2);
- checksum += cmd[4] + cmd[5];
- cmd[6] = checksum >> 8;
- cmd[7] = checksum & 0xFF;
+ cmd[6] = 0; /* checksum msb */
+ cmd[7] = 0; /* checksum lsb */
rc = mutex_lock_interruptible(&occ->lock);
if (rc)
return rc;
- rc = occ->send_cmd(occ, cmd);
+ rc = occ->send_cmd(occ, cmd, sizeof(cmd));
mutex_unlock(&occ->lock);
@@ -1144,8 +1140,6 @@ int occ_setup(struct occ *occ, const char *name)
{
int rc;
- /* start with 1 to avoid false match with zero-initialized SRAM buffer */
- occ->seq_no = 1;
mutex_init(&occ->lock);
occ->groups[0] = &occ->group;
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index e6df719770e8..5020117be740 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -95,9 +95,8 @@ struct occ {
struct occ_sensors sensors;
int powr_sample_time_us; /* average power sample time */
- u8 seq_no;
u8 poll_cmd_data; /* to perform OCC poll command */
- int (*send_cmd)(struct occ *occ, u8 *cmd);
+ int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len);
unsigned long next_update;
struct mutex lock; /* lock OCC access */
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index 0cf8588be35a..9e61e1fb5142 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -97,18 +97,21 @@ static int p8_i2c_occ_putscom_u32(struct i2c_client *client, u32 address,
}
static int p8_i2c_occ_putscom_be(struct i2c_client *client, u32 address,
- u8 *data)
+ u8 *data, size_t len)
{
- __be32 data0, data1;
+ __be32 data0 = 0, data1 = 0;
- memcpy(&data0, data, 4);
- memcpy(&data1, data + 4, 4);
+ memcpy(&data0, data, min_t(size_t, len, 4));
+ if (len > 4) {
+ len -= 4;
+ memcpy(&data1, data + 4, min_t(size_t, len, 4));
+ }
return p8_i2c_occ_putscom_u32(client, address, be32_to_cpu(data0),
be32_to_cpu(data1));
}
-static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd)
+static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
{
int i, rc;
unsigned long start;
@@ -127,7 +130,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd)
return rc;
/* write command (expected to already be BE), we need bus-endian... */
- rc = p8_i2c_occ_putscom_be(client, OCB_DATA3, cmd);
+ rc = p8_i2c_occ_putscom_be(client, OCB_DATA3, cmd, len);
if (rc)
return rc;
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index f6387cc0b754..e50243580269 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -4,28 +4,96 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fsi-occ.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
#include "common.h"
struct p9_sbe_occ {
struct occ occ;
+ bool sbe_error;
+ void *ffdc;
+ size_t ffdc_len;
+ size_t ffdc_size;
+ struct mutex sbe_error_lock; /* lock access to ffdc data */
struct device *sbe;
};
#define to_p9_sbe_occ(x) container_of((x), struct p9_sbe_occ, occ)
-static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd)
+static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *battr, char *buf, loff_t pos,
+ size_t count)
+{
+ ssize_t rc = 0;
+ struct occ *occ = dev_get_drvdata(kobj_to_dev(kobj));
+ struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
+
+ mutex_lock(&ctx->sbe_error_lock);
+ if (ctx->sbe_error) {
+ rc = memory_read_from_buffer(buf, count, &pos, ctx->ffdc,
+ ctx->ffdc_len);
+ if (pos >= ctx->ffdc_len)
+ ctx->sbe_error = false;
+ }
+ mutex_unlock(&ctx->sbe_error_lock);
+
+ return rc;
+}
+static BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
+
+static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp,
+ size_t resp_len)
+{
+ bool notify = false;
+
+ mutex_lock(&ctx->sbe_error_lock);
+ if (!ctx->sbe_error) {
+ if (resp_len > ctx->ffdc_size) {
+ if (ctx->ffdc)
+ kvfree(ctx->ffdc);
+ ctx->ffdc = kvmalloc(resp_len, GFP_KERNEL);
+ if (!ctx->ffdc) {
+ ctx->ffdc_len = 0;
+ ctx->ffdc_size = 0;
+ goto done;
+ }
+
+ ctx->ffdc_size = resp_len;
+ }
+
+ notify = true;
+ ctx->sbe_error = true;
+ ctx->ffdc_len = resp_len;
+ memcpy(ctx->ffdc, resp, resp_len);
+ }
+
+done:
+ mutex_unlock(&ctx->sbe_error_lock);
+ return notify;
+}
+
+static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
{
struct occ_response *resp = &occ->resp;
struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
size_t resp_len = sizeof(*resp);
int rc;
- rc = fsi_occ_submit(ctx->sbe, cmd, 8, resp, &resp_len);
- if (rc < 0)
+ rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
+ if (rc < 0) {
+ if (resp_len) {
+ if (p9_sbe_occ_save_ffdc(ctx, resp, resp_len))
+ sysfs_notify(&occ->bus_dev->kobj, NULL,
+ bin_attr_ffdc.attr.name);
+ }
+
return rc;
+ }
switch (resp->return_status) {
case OCC_RESP_CMD_IN_PRG:
@@ -65,6 +133,8 @@ static int p9_sbe_occ_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
+ mutex_init(&ctx->sbe_error_lock);
+
ctx->sbe = pdev->dev.parent;
occ = &ctx->occ;
occ->bus_dev = &pdev->dev;
@@ -78,6 +148,15 @@ static int p9_sbe_occ_probe(struct platform_device *pdev)
if (rc == -ESHUTDOWN)
rc = -ENODEV; /* Host is shutdown, don't spew errors */
+ if (!rc) {
+ rc = device_create_bin_file(occ->bus_dev, &bin_attr_ffdc);
+ if (rc) {
+ dev_warn(occ->bus_dev,
+ "failed to create SBE error ffdc file\n");
+ rc = 0;
+ }
+ }
+
return rc;
}
@@ -86,9 +165,14 @@ static int p9_sbe_occ_remove(struct platform_device *pdev)
struct occ *occ = platform_get_drvdata(pdev);
struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
+ device_remove_bin_file(occ->bus_dev, &bin_attr_ffdc);
+
ctx->sbe = NULL;
occ_shutdown(occ);
+ if (ctx->ffdc)
+ kvfree(ctx->ffdc);
+
return 0;
}
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 6a9ba23cd302..0828436a1f6c 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -178,7 +178,7 @@ struct pc87360_data {
struct device *hwmon_dev;
struct mutex lock;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
int address[3];
@@ -1673,7 +1673,7 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index 53f7d1418bc9..e3294a1a54bb 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -18,6 +18,7 @@
#include "pmbus.h"
+#define CFFPS_MFG_ID_CMD 0x99
#define CFFPS_FRU_CMD 0x9A
#define CFFPS_PN_CMD 0x9B
#define CFFPS_HEADER_CMD 0x9C
@@ -34,7 +35,7 @@
#define CFFPS_INPUT_HISTORY_SIZE 100
#define CFFPS_CCIN_REVISION GENMASK(7, 0)
-#define CFFPS_CCIN_REVISION_LEGACY 0xde
+#define CFFPS_CCIN_REVISION_LEGACY 0xde
#define CFFPS_CCIN_VERSION GENMASK(15, 8)
#define CFFPS_CCIN_VERSION_1 0x2b
#define CFFPS_CCIN_VERSION_2 0x2e
@@ -57,6 +58,7 @@
enum {
CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_MFG_ID,
CFFPS_DEBUGFS_FRU,
CFFPS_DEBUGFS_PN,
CFFPS_DEBUGFS_HEADER,
@@ -158,6 +160,9 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
switch (idx) {
case CFFPS_DEBUGFS_INPUT_HISTORY:
return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_MFG_ID:
+ cmd = CFFPS_MFG_ID_CMD;
+ break;
case CFFPS_DEBUGFS_FRU:
cmd = CFFPS_FRU_CMD;
break;
@@ -503,16 +508,27 @@ static int ibm_cffps_probe(struct i2c_client *client)
u16 ccin_revision = 0;
u16 ccin_version = CFFPS_CCIN_VERSION_1;
int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
+ char mfg_id[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
if (ccin > 0) {
ccin_revision = FIELD_GET(CFFPS_CCIN_REVISION, ccin);
ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
}
+ rc = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, mfg_id);
+ if (rc < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return rc;
+ }
+
switch (ccin_version) {
default:
case CFFPS_CCIN_VERSION_1:
- vs = cffps1;
+ if ((strncmp(mfg_id, "ACBE", 4) == 0) ||
+ (strncmp(mfg_id, "ARTE", 4) == 0))
+ vs = cffps1;
+ else
+ vs = cffps2;
break;
case CFFPS_CCIN_VERSION_2:
vs = cffps2;
@@ -564,6 +580,9 @@ static int ibm_cffps_probe(struct i2c_client *client)
debugfs_create_file("input_history", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
&ibm_cffps_fops);
+ debugfs_create_file("mfg_id", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_MFG_ID],
+ &ibm_cffps_fops);
debugfs_create_file("fru", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
&ibm_cffps_fops);
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index d209e0afc2ca..8402b41520eb 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/of_device.h>
#include "pmbus.h"
enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
@@ -51,26 +52,31 @@ struct __coeff {
#define PSC_CURRENT_IN_L (PSC_NUM_CLASSES)
#define PSC_POWER_L (PSC_NUM_CLASSES + 1)
-static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
+static const struct __coeff lm25066_coeff[][PSC_NUM_CLASSES + 2] = {
[lm25056] = {
[PSC_VOLTAGE_IN] = {
.m = 16296,
+ .b = 1343,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 13797,
+ .b = -1833,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
.m = 6726,
+ .b = -537,
.R = -2,
},
[PSC_POWER] = {
.m = 5501,
+ .b = -2908,
.R = -3,
},
[PSC_POWER_L] = {
.m = 26882,
+ .b = -5646,
.R = -4,
},
[PSC_TEMPERATURE] = {
@@ -82,26 +88,32 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm25066] = {
[PSC_VOLTAGE_IN] = {
.m = 22070,
+ .b = -1800,
.R = -2,
},
[PSC_VOLTAGE_OUT] = {
.m = 22070,
+ .b = -1800,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 13661,
+ .b = -5200,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
- .m = 6852,
+ .m = 6854,
+ .b = -3100,
.R = -2,
},
[PSC_POWER] = {
.m = 736,
+ .b = -3300,
.R = -2,
},
[PSC_POWER_L] = {
.m = 369,
+ .b = -1900,
.R = -2,
},
[PSC_TEMPERATURE] = {
@@ -111,26 +123,32 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
+ .b = -642,
.R = -2,
},
[PSC_VOLTAGE_OUT] = {
.m = 4621,
+ .b = 423,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 10742,
+ .b = 1552,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
.m = 5456,
+ .b = 2118,
.R = -2,
},
[PSC_POWER] = {
.m = 1204,
+ .b = 8524,
.R = -3,
},
[PSC_POWER_L] = {
.m = 612,
+ .b = 11202,
.R = -3,
},
[PSC_TEMPERATURE] = {
@@ -140,26 +158,32 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm5066] = {
[PSC_VOLTAGE_IN] = {
.m = 4587,
+ .b = -1200,
.R = -2,
},
[PSC_VOLTAGE_OUT] = {
.m = 4587,
+ .b = -2400,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 10753,
+ .b = -1200,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
.m = 5405,
+ .b = -600,
.R = -2,
},
[PSC_POWER] = {
.m = 1204,
+ .b = -6000,
.R = -3,
},
[PSC_POWER_L] = {
.m = 605,
+ .b = -8000,
.R = -3,
},
[PSC_TEMPERATURE] = {
@@ -211,8 +235,6 @@ struct lm25066_data {
#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
-static const struct i2c_device_id lm25066_id[];
-
static int lm25066_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -413,12 +435,35 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
return ret;
}
+static const struct i2c_device_id lm25066_id[] = {
+ {"lm25056", lm25056},
+ {"lm25066", lm25066},
+ {"lm5064", lm5064},
+ {"lm5066", lm5066},
+ {"lm5066i", lm5066i},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm25066_id);
+
+static const struct of_device_id __maybe_unused lm25066_of_match[] = {
+ { .compatible = "ti,lm25056", .data = (void *)lm25056, },
+ { .compatible = "ti,lm25066", .data = (void *)lm25066, },
+ { .compatible = "ti,lm5064", .data = (void *)lm5064, },
+ { .compatible = "ti,lm5066", .data = (void *)lm5066, },
+ { .compatible = "ti,lm5066i", .data = (void *)lm5066i, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lm25066_of_match);
+
static int lm25066_probe(struct i2c_client *client)
{
int config;
+ u32 shunt;
struct lm25066_data *data;
struct pmbus_driver_info *info;
- struct __coeff *coeff;
+ const struct __coeff *coeff;
+ const struct of_device_id *of_id;
+ const struct i2c_device_id *i2c_id;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA))
@@ -433,7 +478,14 @@ static int lm25066_probe(struct i2c_client *client)
if (config < 0)
return config;
- data->id = i2c_match_id(lm25066_id, client)->driver_data;
+ i2c_id = i2c_match_id(lm25066_id, client);
+
+ of_id = of_match_device(lm25066_of_match, &client->dev);
+ if (of_id && (unsigned long)of_id->data != i2c_id->driver_data)
+ dev_notice(&client->dev, "Device mismatch: %s in device tree, %s detected\n",
+ of_id->name, i2c_id->name);
+
+ data->id = i2c_id->driver_data;
info = &data->info;
info->pages = 1;
@@ -483,25 +535,25 @@ static int lm25066_probe(struct i2c_client *client)
info->b[PSC_POWER] = coeff[PSC_POWER].b;
}
- return pmbus_do_probe(client, info);
-}
+ /*
+ * Values in the TI datasheets are normalized for a 1mOhm sense
+ * resistor; assume that unless DT specifies a value explicitly.
+ */
+ if (of_property_read_u32(client->dev.of_node, "shunt-resistor-micro-ohms", &shunt))
+ shunt = 1000;
-static const struct i2c_device_id lm25066_id[] = {
- {"lm25056", lm25056},
- {"lm25066", lm25066},
- {"lm5064", lm5064},
- {"lm5066", lm5066},
- {"lm5066i", lm5066i},
- { }
-};
+ info->m[PSC_CURRENT_IN] = info->m[PSC_CURRENT_IN] * shunt / 1000;
+ info->m[PSC_POWER] = info->m[PSC_POWER] * shunt / 1000;
-MODULE_DEVICE_TABLE(i2c, lm25066_id);
+ return pmbus_do_probe(client, info);
+}
/* This is the driver that will be inserted */
static struct i2c_driver lm25066_driver = {
.driver = {
.name = "lm25066",
- },
+ .of_match_table = of_match_ptr(lm25066_of_match),
+ },
.probe_new = lm25066_probe,
.id_table = lm25066_id,
};
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 805d396aa81b..573f53d52912 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -53,7 +53,7 @@ static void rpi_firmware_get_throttled(struct rpi_hwmon_data *data)
else
dev_info(data->hwmon_dev, "Voltage normalised\n");
- sysfs_notify(&data->hwmon_dev->kobj, NULL, "in0_lcrit_alarm");
+ hwmon_notify_event(data->hwmon_dev, hwmon_in, hwmon_in_lcrit_alarm, 0);
}
static void get_values_poll(struct work_struct *work)
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index a5cd4de36575..39ff1c9b1df5 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -56,7 +56,7 @@ struct sch5636_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[SCH5636_NO_INS];
u8 temp_val[SCH5636_NO_TEMPS];
@@ -140,7 +140,7 @@ static struct sch5636_data *sch5636_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
abort:
mutex_unlock(&data->update_lock);
return ret;
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 7d18ce5d3839..e23dbf287233 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@ struct sht21 {
unsigned long last_update;
int temperature;
int humidity;
- char valid;
+ bool valid;
char eic[18];
};
@@ -105,7 +105,7 @@ static int sht21_update_measurements(struct device *dev)
goto out;
sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret);
sht21->last_update = jiffies;
- sht21->valid = 1;
+ sht21->valid = true;
}
out:
mutex_unlock(&sht21->lock);
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 0c6741f949f5..018cb5a7651f 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -172,7 +172,7 @@ struct sis5595_data {
struct mutex lock;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
char maxins; /* == 3 if temp enabled, otherwise == 4 */
u8 revision; /* Reg. value */
@@ -728,7 +728,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
sis5595_read_value(data, SIS5595_REG_ALARM1) |
(sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index 62906d9c4b86..8c4ed72e5d68 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -265,7 +265,7 @@ static struct smm665_data *smm665_update_device(struct device *dev)
data->adc[i] = val;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index f928b8d4ff48..c26d6eae0e4e 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -96,7 +96,7 @@ struct smsc47b397_data {
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
- int valid;
+ bool valid;
/* register values */
u16 fan[4];
@@ -137,7 +137,7 @@ static struct smsc47b397_data *smsc47b397_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
dev_dbg(dev, "... device update complete\n");
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 03a87aa2017a..a5db15c087ae 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ struct smsc47m192_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[8]; /* Register value */
@@ -157,7 +157,7 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
SMSC47M192_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index fde5e2d0825a..6a804f5036f4 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -62,7 +62,7 @@ struct thmc50_data {
enum chips type;
unsigned long last_updated; /* In jiffies */
char has_temp3; /* !=0 if it is ADM1022 in temp3 mode */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
/* Register values */
s8 temp_input[3];
@@ -107,7 +107,7 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
data->alarms =
i2c_smbus_read_byte_data(client, THMC50_REG_INTR);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index a7e202cc8323..5cab4436aa77 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -51,51 +51,92 @@ static inline u8 tmp103_mc_to_reg(int val)
return DIV_ROUND_CLOSEST(val, 1000);
}
-static ssize_t tmp103_temp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int tmp103_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct regmap *regmap = dev_get_drvdata(dev);
unsigned int regval;
- int ret;
+ int err, reg;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = TMP103_TEMP_REG;
+ break;
+ case hwmon_temp_min:
+ reg = TMP103_TLOW_REG;
+ break;
+ case hwmon_temp_max:
+ reg = TMP103_THIGH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- ret = regmap_read(regmap, sda->index, &regval);
- if (ret < 0)
- return ret;
+ err = regmap_read(regmap, reg, &regval);
+ if (err < 0)
+ return err;
+
+ *temp = tmp103_reg_to_mc(regval);
- return sprintf(buf, "%d\n", tmp103_reg_to_mc(regval));
+ return 0;
}
-static ssize_t tmp103_temp_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int tmp103_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct regmap *regmap = dev_get_drvdata(dev);
- long val;
- int ret;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
+ int reg;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = TMP103_TLOW_REG;
+ break;
+ case hwmon_temp_max:
+ reg = TMP103_THIGH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- val = clamp_val(val, -55000, 127000);
- ret = regmap_write(regmap, sda->index, tmp103_mc_to_reg(val));
- return ret ? ret : count;
+ temp = clamp_val(temp, -55000, 127000);
+ return regmap_write(regmap, reg, tmp103_mc_to_reg(temp));
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, tmp103_temp, TMP103_TEMP_REG);
+static umode_t tmp103_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return 0644;
+ default:
+ return 0;
+ }
+}
-static SENSOR_DEVICE_ATTR_RW(temp1_min, tmp103_temp, TMP103_TLOW_REG);
+static const struct hwmon_channel_info *tmp103_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN),
+ NULL
+};
-static SENSOR_DEVICE_ATTR_RW(temp1_max, tmp103_temp, TMP103_THIGH_REG);
+static const struct hwmon_ops tmp103_hwmon_ops = {
+ .is_visible = tmp103_is_visible,
+ .read = tmp103_read,
+ .write = tmp103_write,
+};
-static struct attribute *tmp103_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- NULL
+static const struct hwmon_chip_info tmp103_chip_info = {
+ .ops = &tmp103_hwmon_ops,
+ .info = tmp103_info,
};
-ATTRIBUTE_GROUPS(tmp103);
static bool tmp103_regmap_is_volatile(struct device *dev, unsigned int reg)
{
@@ -130,8 +171,10 @@ static int tmp103_probe(struct i2c_client *client)
}
i2c_set_clientdata(client, regmap);
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- regmap, tmp103_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ regmap,
+ &tmp103_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 9dc210b55e69..b31f4964f852 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -34,7 +34,7 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
0x4e, 0x4f, I2C_CLIENT_END };
-enum chips { tmp401, tmp411, tmp431, tmp432, tmp435, tmp461 };
+enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
/*
* The TMP401 registers, note some registers have different addresses for
@@ -56,7 +56,6 @@ static const u8 TMP401_TEMP_MSB_READ[7][2] = {
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
- { 0, 0x11 }, /* offset */
};
static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
@@ -66,7 +65,6 @@ static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
- { 0, 0x11 }, /* offset */
};
static const u8 TMP432_TEMP_MSB_READ[4][3] = {
@@ -123,7 +121,6 @@ static const struct i2c_device_id tmp401_id[] = {
{ "tmp431", tmp431 },
{ "tmp432", tmp432 },
{ "tmp435", tmp435 },
- { "tmp461", tmp461 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
@@ -136,7 +133,7 @@ struct tmp401_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
enum chips kind;
@@ -267,7 +264,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev)
data->temp_crit_hyst = val;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
@@ -413,7 +410,7 @@ static ssize_t reset_temp_history_store(struct device *dev,
}
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, TMP401_TEMP_MSB_WRITE[5][0], val);
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
@@ -571,21 +568,6 @@ static const struct attribute_group tmp432_group = {
};
/*
- * Additional features of the TMP461 chip.
- * The TMP461 temperature offset for the remote channel.
- */
-static SENSOR_DEVICE_ATTR_2_RW(temp2_offset, temp, 6, 1);
-
-static struct attribute *tmp461_attributes[] = {
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group tmp461_group = {
- .attrs = tmp461_attributes,
-};
-
-/*
* Begin non sysfs callback code (aka Real code)
*/
@@ -686,7 +668,7 @@ static int tmp401_detect(struct i2c_client *client,
static int tmp401_probe(struct i2c_client *client)
{
static const char * const names[] = {
- "TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461"
+ "TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
};
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -717,9 +699,6 @@ static int tmp401_probe(struct i2c_client *client)
if (data->kind == tmp432)
data->groups[groups++] = &tmp432_group;
- if (data->kind == tmp461)
- data->groups[groups++] = &tmp461_group;
-
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(hwmon_dev))
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index b963a369c5ab..1fd8d41d90c8 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -29,15 +29,20 @@ static const unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
enum chips { tmp421, tmp422, tmp423, tmp441, tmp442 };
+#define MAX_CHANNELS 4
/* The TMP421 registers */
#define TMP421_STATUS_REG 0x08
#define TMP421_CONFIG_REG_1 0x09
+#define TMP421_CONFIG_REG_2 0x0A
+#define TMP421_CONFIG_REG_REN(x) (BIT(3 + (x)))
+#define TMP421_CONFIG_REG_REN_MASK GENMASK(6, 3)
#define TMP421_CONVERSION_RATE_REG 0x0B
+#define TMP421_N_FACTOR_REG_1 0x21
#define TMP421_MANUFACTURER_ID_REG 0xFE
#define TMP421_DEVICE_ID_REG 0xFF
-static const u8 TMP421_TEMP_MSB[4] = { 0x00, 0x01, 0x02, 0x03 };
-static const u8 TMP421_TEMP_LSB[4] = { 0x10, 0x11, 0x12, 0x13 };
+static const u8 TMP421_TEMP_MSB[MAX_CHANNELS] = { 0x00, 0x01, 0x02, 0x03 };
+static const u8 TMP421_TEMP_LSB[MAX_CHANNELS] = { 0x10, 0x11, 0x12, 0x13 };
/* Flags */
#define TMP421_CONFIG_SHUTDOWN 0x40
@@ -86,18 +91,24 @@ static const struct of_device_id __maybe_unused tmp421_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tmp421_of_match);
+struct tmp421_channel {
+ const char *label;
+ bool enabled;
+ s16 temp;
+};
+
struct tmp421_data {
struct i2c_client *client;
struct mutex update_lock;
- u32 temp_config[5];
+ u32 temp_config[MAX_CHANNELS + 1];
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[2];
struct hwmon_chip_info chip;
- char valid;
+ bool valid;
unsigned long last_updated;
unsigned long channels;
u8 config;
- s16 temp[4];
+ struct tmp421_channel channel[MAX_CHANNELS];
};
static int temp_from_raw(u16 reg, bool extended)
@@ -132,28 +143,56 @@ static int tmp421_update_device(struct tmp421_data *data)
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
if (ret < 0)
goto exit;
- data->temp[i] = ret << 8;
+ data->channel[i].temp = ret << 8;
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
if (ret < 0)
goto exit;
- data->temp[i] |= ret;
+ data->channel[i].temp |= ret;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
exit:
mutex_unlock(&data->update_lock);
if (ret < 0) {
- data->valid = 0;
+ data->valid = false;
return ret;
}
return 0;
}
+static int tmp421_enable_channels(struct tmp421_data *data)
+{
+ int err;
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int old = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_2);
+ int new, i;
+
+ if (old < 0) {
+ dev_err(dev, "error reading register, can't disable channels\n");
+ return old;
+ }
+
+ new = old & ~TMP421_CONFIG_REG_REN_MASK;
+ for (i = 0; i < data->channels; i++)
+ if (data->channel[i].enabled)
+ new |= TMP421_CONFIG_REG_REN(i);
+
+ if (new == old)
+ return 0;
+
+ err = i2c_smbus_write_byte_data(client, TMP421_CONFIG_REG_2, new);
+ if (err < 0)
+ dev_err(dev, "error writing register, can't disable channels\n");
+
+ return err;
+}
+
static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -166,15 +205,22 @@ static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_temp_input:
- *val = temp_from_raw(tmp421->temp[channel],
+ if (!tmp421->channel[channel].enabled)
+ return -ENODATA;
+ *val = temp_from_raw(tmp421->channel[channel].temp,
tmp421->config & TMP421_CONFIG_RANGE);
return 0;
case hwmon_temp_fault:
+ if (!tmp421->channel[channel].enabled)
+ return -ENODATA;
/*
* Any of OPEN or /PVLD bits indicate a hardware mulfunction
* and the conversion result may be incorrect
*/
- *val = !!(tmp421->temp[channel] & 0x03);
+ *val = !!(tmp421->channel[channel].temp & 0x03);
+ return 0;
+ case hwmon_temp_enable:
+ *val = tmp421->channel[channel].enabled;
return 0;
default:
return -EOPNOTSUPP;
@@ -182,6 +228,34 @@ static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
}
+static int tmp421_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct tmp421_data *data = dev_get_drvdata(dev);
+
+ *str = data->channel[channel].label;
+
+ return 0;
+}
+
+static int tmp421_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct tmp421_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ data->channel[channel].enabled = val;
+ ret = tmp421_enable_channels(data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -189,14 +263,19 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_temp_fault:
case hwmon_temp_input:
return 0444;
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_enable:
+ return 0644;
default:
return 0;
}
}
-static int tmp421_init_client(struct i2c_client *client)
+static int tmp421_init_client(struct tmp421_data *data)
{
int config, config_orig;
+ struct i2c_client *client = data->client;
/* Set the conversion rate to 2 Hz */
i2c_smbus_write_byte_data(client, TMP421_CONVERSION_RATE_REG, 0x05);
@@ -217,7 +296,7 @@ static int tmp421_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, TMP421_CONFIG_REG_1, config);
}
- return 0;
+ return tmp421_enable_channels(data);
}
static int tmp421_detect(struct i2c_client *client,
@@ -281,9 +360,78 @@ static int tmp421_detect(struct i2c_client *client,
return 0;
}
+static int tmp421_probe_child_from_dt(struct i2c_client *client,
+ struct device_node *child,
+ struct tmp421_data *data)
+
+{
+ struct device *dev = &client->dev;
+ u32 i;
+ s32 val;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &i);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+
+ if (i >= data->channels) {
+ dev_err(dev, "invalid reg %d of %pOFn\n", i, child);
+ return -EINVAL;
+ }
+
+ of_property_read_string(child, "label", &data->channel[i].label);
+ if (data->channel[i].label)
+ data->temp_config[i] |= HWMON_T_LABEL;
+
+ data->channel[i].enabled = of_device_is_available(child);
+
+ err = of_property_read_s32(child, "ti,n-factor", &val);
+ if (!err) {
+ if (i == 0) {
+ dev_err(dev, "n-factor can't be set for internal channel\n");
+ return -EINVAL;
+ }
+
+ if (val > 127 || val < -128) {
+ dev_err(dev, "n-factor for channel %d invalid (%d)\n",
+ i, val);
+ return -EINVAL;
+ }
+ i2c_smbus_write_byte_data(client, TMP421_N_FACTOR_REG_1 + i - 1,
+ val);
+ }
+
+ return 0;
+}
+
+static int tmp421_probe_from_dt(struct i2c_client *client, struct tmp421_data *data)
+{
+ struct device *dev = &client->dev;
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int err;
+
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "channel"))
+ continue;
+
+ err = tmp421_probe_child_from_dt(client, child, data);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static const struct hwmon_ops tmp421_ops = {
.is_visible = tmp421_is_visible,
.read = tmp421_read,
+ .read_string = tmp421_read_string,
+ .write = tmp421_write,
};
static int tmp421_probe(struct i2c_client *client)
@@ -305,12 +453,18 @@ static int tmp421_probe(struct i2c_client *client)
data->channels = i2c_match_id(tmp421_id, client)->driver_data;
data->client = client;
- err = tmp421_init_client(client);
+ for (i = 0; i < data->channels; i++) {
+ data->temp_config[i] = HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_ENABLE;
+ data->channel[i].enabled = true;
+ }
+
+ err = tmp421_probe_from_dt(client, data);
if (err)
return err;
- for (i = 0; i < data->channels; i++)
- data->temp_config[i] = HWMON_T_INPUT | HWMON_T_FAULT;
+ err = tmp421_init_client(data);
+ if (err)
+ return err;
data->chip.ops = &tmp421_ops;
data->chip.info = data->info;
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index a2eddd2c2538..55634110c2f9 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -304,7 +304,7 @@ struct via686a_data {
const char *name;
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[5]; /* Register value */
@@ -800,7 +800,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
VIA686A_REG_ALARM1) |
(via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 2fbdc532aed4..4a5e911d26eb 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -105,7 +105,7 @@ struct vt1211_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
@@ -319,7 +319,7 @@ static struct vt1211_data *vt1211_update_device(struct device *dev)
vt1211_read8(data, VT1211_REG_ALARM1);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 6603727e15a0..03275ac8ba72 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -145,7 +145,7 @@ struct vt8231_data {
struct mutex update_lock;
struct device *hwmon_dev;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[6]; /* Register value */
@@ -929,7 +929,7 @@ static struct vt8231_data *vt8231_update_device(struct device *dev)
data->alarms &= ~0x80;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 705a59663d42..af89b32a93a5 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -320,7 +320,7 @@ struct w83627ehf_data {
const u16 *scale_in;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
@@ -688,7 +688,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
W83627EHF_REG_CASEOPEN_DET);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -1099,7 +1099,7 @@ clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg & ~mask);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return 0;
@@ -2004,7 +2004,7 @@ static int __maybe_unused w83627ehf_resume(struct device *dev)
w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
/* Force re-reading all values */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return 0;
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index a07b97400cba..9be277156ed2 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -355,7 +355,7 @@ struct w83627hf_data {
enum chips type;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[9]; /* Register value */
@@ -448,7 +448,7 @@ static int w83627hf_resume(struct device *dev)
w83627hf_write_value(data, W83781D_REG_SCFG2, data->scfg2);
/* Force re-reading all values */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return 0;
@@ -1905,7 +1905,7 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) |
w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index ce8e2c10e854..b3579721265f 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -203,7 +203,7 @@ struct w83781d_data {
int isa_addr;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
struct i2c_client *lm75[2]; /* for secondary I2C addresses */
@@ -1554,7 +1554,7 @@ static struct w83781d_data *w83781d_update_device(struct device *dev)
W83781D_REG_BEEP_INTS3) << 16;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 3c1be2c11fdf..80a9a78d7ce9 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -270,7 +270,7 @@ struct w83791d_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* volts */
@@ -1596,7 +1596,7 @@ static struct w83791d_data *w83791d_update_device(struct device *dev)
<< 4;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 1f175f381350..31a1cdc30877 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -261,7 +261,7 @@ struct w83792d_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[9]; /* Register value */
@@ -740,7 +740,7 @@ intrusion0_alarm_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
@@ -1589,7 +1589,7 @@ static struct w83792d_data *w83792d_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 1d2854de1cfc..0a65d164c8f0 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -204,7 +204,7 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
struct w83793_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
* nonvolatile registers
@@ -452,7 +452,7 @@ store_chassis_clear(struct device *dev,
mutex_lock(&data->update_lock);
reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
@@ -2077,7 +2077,7 @@ static struct w83793_data *w83793_update_device(struct device *dev)
data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
w83793_update_nonvolatile(dev);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
END:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 621b05afa837..45b12c4287df 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -379,7 +379,7 @@ struct w83795_data {
u8 enable_beep;
u8 beeps[6]; /* Register value */
- char valid;
+ bool valid;
char valid_limits;
char valid_pwm_config;
};
@@ -684,7 +684,7 @@ static struct w83795_data *w83795_update_device(struct device *dev)
tmp & ~ALARM_CTRL_RTSACS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
END:
mutex_unlock(&data->update_lock);
@@ -764,7 +764,7 @@ store_chassis_clear(struct device *dev,
/* Clear status and force cache refresh */
w83795_read(client, W83795_REG_ALARM(5));
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 656a77102ca6..a41f989d66e2 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -98,7 +98,7 @@ static struct i2c_driver w83l785ts_driver = {
struct w83l785ts_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -270,7 +270,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev)
W83L785TS_REG_TEMP_OVER, data->temp[1]);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 542afff1423b..11ba23c1af85 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -113,7 +113,7 @@ DIV_TO_REG(long val)
struct w83l786ng_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
* nonvolatile registers */
@@ -209,7 +209,7 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
data->tolerance[1] = (reg_tmp >> 4) & 0x0f;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 382ef0395d8e..30aae8642069 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -93,6 +93,7 @@ struct slimpro_resp_msg {
struct xgene_hwmon_dev {
struct device *dev;
struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
struct mbox_client mbox_client;
int mbox_idx;
@@ -652,7 +653,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
} else {
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
int version;
@@ -671,26 +672,16 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
- ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(ctx->mbox_chan)) {
+ pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(pcc_chan)) {
dev_err(&pdev->dev,
"PPC channel request failed\n");
rc = -ENODEV;
goto out_mbox_free;
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = ctx->mbox_chan->con_priv;
- if (!cppc_ss) {
- dev_err(&pdev->dev, "PPC subspace not found\n");
- rc = -ENODEV;
- goto out;
- }
+ ctx->pcc_chan = pcc_chan;
+ ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
@@ -702,16 +693,16 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
* This is the shared communication region
* for the OS and Platform to communicate over.
*/
- ctx->comm_base_addr = cppc_ss->base_address;
+ ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_HWMON_V2)
ctx->pcc_comm_addr = (void __force *)ioremap(
ctx->comm_base_addr,
- cppc_ss->length);
+ pcc_chan->shmem_size);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
@@ -727,11 +718,11 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * cppc_ss->latency is just a Nominal value. In reality
+ * pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
- ctx->usecs_lat = PCC_NUM_RETRIES * cppc_ss->latency;
+ ctx->usecs_lat = PCC_NUM_RETRIES * pcc_chan->latency;
}
ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
@@ -757,7 +748,7 @@ out:
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
out_mbox_free:
kfifo_free(&ctx->async_msg_fifo);
@@ -773,7 +764,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return 0;
}
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index f026e5c0e777..514a9b8086e3 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -150,6 +150,19 @@ config CORESIGHT_CPU_DEBUG
To compile this driver as a module, choose M here: the
module will be called coresight-cpu-debug.
+config CORESIGHT_CPU_DEBUG_DEFAULT_ON
+ bool "Enable CoreSight CPU Debug by default"
+ depends on CORESIGHT_CPU_DEBUG
+ help
+ Say Y here to enable the CoreSight Debug panic-debug by default. This
+ can also be enabled via debugfs, but this ensures the debug feature
+ is enabled as early as possible.
+
+ Has the same effect as setting coresight_cpu_debug.enable=1 on the
+ kernel command line.
+
+ Say N if unsure.
+
config CORESIGHT_CTI
tristate "CoreSight Cross Trigger Interface (CTI) driver"
depends on ARM || ARM64
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 00de46565bc4..8845ec4b4402 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -105,7 +105,7 @@ static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
static int debug_count;
static struct dentry *debug_debugfs_dir;
-static bool debug_enable;
+static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
module_param_named(enable, debug_enable, bool, 0600);
MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index e2a3620cbf48..8988b2ed2ea6 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -175,7 +175,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(dev);
+ pm_runtime_put(dev->parent);
return 0;
/* not disabled this call */
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index f775cbee12b8..efa39820acec 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -557,9 +557,8 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
/*
* In snapshot mode we simply increment the head by the number of byte
- * that were written. User space function cs_etm_find_snapshot() will
- * figure out how many bytes to get from the AUX buffer based on the
- * position of the head.
+ * that were written. User space will figure out how many bytes to get
+ * from the AUX buffer based on the position of the head.
*/
if (buf->snapshot)
handle->head += to_read;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 8ebd728d3a80..c039b6ae206f 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -452,9 +452,14 @@ static void etm_event_start(struct perf_event *event, int flags)
* sink from this ETM. We can't do much in this case if
* the sink was specified or hinted to the driver. For
* now, simply don't record anything on this ETM.
+ *
+ * As such we pretend that everything is fine, and let
+ * it continue without actually tracing. The event could
+ * continue tracing when it moves to a CPU where it is
+ * reachable to a sink.
*/
if (!cpumask_test_cpu(cpu, &event_data->mask))
- goto fail_end_stop;
+ goto out;
path = etm_event_cpu_path(event_data, cpu);
/* We need a sink, no need to continue without one */
@@ -466,26 +471,32 @@ static void etm_event_start(struct perf_event *event, int flags)
if (coresight_enable_path(path, CS_MODE_PERF, handle))
goto fail_end_stop;
- /* Tell the perf core the event is alive */
- event->hw.state = 0;
-
/* Finally enable the tracer */
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_disable_path;
+out:
+ /* Tell the perf core the event is alive */
+ event->hw.state = 0;
/* Save the event_data for this ETM */
ctxt->event_data = event_data;
-out:
return;
fail_disable_path:
coresight_disable_path(path);
fail_end_stop:
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
- perf_aux_output_end(handle, 0);
+ /*
+ * Check if the handle is still associated with the event,
+ * to handle cases where if the sink failed to start the
+ * trace and TRUNCATED the handle already.
+ */
+ if (READ_ONCE(handle->event)) {
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, 0);
+ }
fail:
event->hw.state = PERF_HES_STOPPED;
- goto out;
+ return;
}
static void etm_event_stop(struct perf_event *event, int mode)
@@ -517,6 +528,19 @@ static void etm_event_stop(struct perf_event *event, int mode)
if (WARN_ON(!event_data))
return;
+ /*
+ * Check if this ETM was allowed to trace, as decided at
+ * etm_setup_aux(). If it wasn't allowed to trace, then
+ * nothing needs to be torn down other than outputting a
+ * zero sized record.
+ */
+ if (handle->event && (mode & PERF_EF_UPDATE) &&
+ !cpumask_test_cpu(cpu, &event_data->mask)) {
+ event->hw.state = PERF_HES_STOPPED;
+ perf_aux_output_end(handle, 0);
+ return;
+ }
+
if (!csdev)
return;
@@ -550,7 +574,21 @@ static void etm_event_stop(struct perf_event *event, int mode)
size = sink_ops(sink)->update_buffer(sink, handle,
event_data->snk_config);
- perf_aux_output_end(handle, size);
+ /*
+ * Make sure the handle is still valid as the
+ * sink could have closed it from an IRQ.
+ * The sink driver must handle the race with
+ * update_buffer() and IRQ. Thus either we
+ * should get a valid handle and valid size
+ * (which may be 0).
+ *
+ * But we should never get a non-zero size with
+ * an invalid handle.
+ */
+ if (READ_ONCE(handle->event))
+ perf_aux_output_end(handle, size);
+ else
+ WARN_ON(size);
}
/* Disabling the path make its elements available to other sessions */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index e24252eaf8e4..86a313857b58 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -40,6 +40,7 @@
#include "coresight-etm4x.h"
#include "coresight-etm-perf.h"
#include "coresight-etm4x-cfg.h"
+#include "coresight-self-hosted-trace.h"
#include "coresight-syscfg.h"
static int boot_enable;
@@ -238,6 +239,45 @@ struct etm4_enable_arg {
int rc;
};
+/*
+ * etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs.
+ * When the CPU supports FEAT_TRF, we could move the ETM to a trace
+ * prohibited state by filtering the Exception levels via TRFCR_EL1.
+ */
+static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
+{
+ /* If the CPU doesn't support FEAT_TRF, nothing to do */
+ if (!drvdata->trfcr)
+ return;
+ cpu_prohibit_trace();
+}
+
+/*
+ * etm4x_allow_trace - Allow CPU tracing in the respective ELs,
+ * as configured by the drvdata->config.mode for the current
+ * session. Even though we have TRCVICTLR bits to filter the
+ * trace in the ELs, it doesn't prevent the ETM from generating
+ * a packet (e.g, TraceInfo) that might contain the addresses from
+ * the excluded levels. Thus we use the additional controls provided
+ * via the Trace Filtering controls (FEAT_TRF) to make sure no trace
+ * is generated for the excluded ELs.
+ */
+static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
+{
+ u64 trfcr = drvdata->trfcr;
+
+ /* If the CPU doesn't support FEAT_TRF, nothing to do */
+ if (!trfcr)
+ return;
+
+ if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
+ trfcr &= ~TRFCR_ELx_ExTRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_USER)
+ trfcr &= ~TRFCR_ELx_E0TRE;
+
+ write_trfcr(trfcr);
+}
+
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
#define HISI_HIP08_AMBA_ID 0x000b6d01
@@ -442,6 +482,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
if (etm4x_is_ete(drvdata))
etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
+ etm4x_allow_trace(drvdata);
/* Enable the trace unit */
etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
@@ -737,7 +778,6 @@ static int etm4_enable(struct coresight_device *csdev,
static void etm4_disable_hw(void *info)
{
u32 control;
- u64 trfcr;
struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
@@ -764,12 +804,7 @@ static void etm4_disable_hw(void *info)
* If the CPU supports v8.4 Trace filter Control,
* set the ETM to trace prohibited region.
*/
- if (drvdata->trfc) {
- trfcr = read_sysreg_s(SYS_TRFCR_EL1);
- write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE),
- SYS_TRFCR_EL1);
- isb();
- }
+ etm4x_prohibit_trace(drvdata);
/*
* Make sure everything completes before disabling, as recommended
* by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
@@ -785,9 +820,6 @@ static void etm4_disable_hw(void *info)
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
- if (drvdata->trfc)
- write_sysreg_s(trfcr, SYS_TRFCR_EL1);
-
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
@@ -989,15 +1021,15 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
return false;
}
-static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
+static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
{
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
u64 trfcr;
+ drvdata->trfcr = 0;
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
return;
- drvdata->trfc = true;
/*
* If the CPU supports v8.4 SelfHosted Tracing, enable
* tracing at the kernel EL and EL0, forcing to use the
@@ -1011,7 +1043,7 @@ static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
if (is_kernel_in_hyp_mode())
trfcr |= TRFCR_EL2_CX;
- write_sysreg_s(trfcr, SYS_TRFCR_EL1);
+ drvdata->trfcr = trfcr;
}
static void etm4_init_arch_data(void *info)
@@ -1202,7 +1234,7 @@ static void etm4_init_arch_data(void *info)
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
etm4_cs_lock(drvdata, csa);
- cpu_enable_tracing(drvdata);
+ cpu_detect_trace_filtering(drvdata);
}
static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
@@ -1554,7 +1586,7 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
-static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
struct etmv4_save_state *state;
@@ -1693,7 +1725,23 @@ out:
return ret;
}
-static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+{
+ int ret = 0;
+
+ /* Save the TRFCR irrespective of whether the ETM is ON */
+ if (drvdata->trfcr)
+ drvdata->save_trfcr = read_trfcr();
+ /*
+ * Save and restore the ETM Trace registers only if
+ * the ETM is active.
+ */
+ if (local_read(&drvdata->mode) && drvdata->save_state)
+ ret = __etm4_cpu_save(drvdata);
+ return ret;
+}
+
+static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
int i;
struct etmv4_save_state *state = drvdata->save_state;
@@ -1789,6 +1837,14 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4_cs_lock(drvdata, csa);
}
+static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+{
+ if (drvdata->trfcr)
+ write_trfcr(drvdata->save_trfcr);
+ if (drvdata->state_needs_restore)
+ __etm4_cpu_restore(drvdata);
+}
+
static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
@@ -1800,23 +1856,17 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
drvdata = etmdrvdata[cpu];
- if (!drvdata->save_state)
- return NOTIFY_OK;
-
if (WARN_ON_ONCE(drvdata->cpu != cpu))
return NOTIFY_BAD;
switch (cmd) {
case CPU_PM_ENTER:
- /* save the state if self-hosted coresight is in use */
- if (local_read(&drvdata->mode))
- if (etm4_cpu_save(drvdata))
- return NOTIFY_BAD;
+ if (etm4_cpu_save(drvdata))
+ return NOTIFY_BAD;
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
- if (drvdata->state_needs_restore)
- etm4_cpu_restore(drvdata);
+ etm4_cpu_restore(drvdata);
break;
default:
return NOTIFY_DONE;
@@ -2099,6 +2149,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
+ CS_AMBA_UCI_ID(0x000bbd0d, uci_id_etm4),/* Qualcomm Kryo 5XX Cortex-A77 */
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index e5b79bdb9851..3c4d69b096ca 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -919,8 +919,12 @@ struct etmv4_save_state {
* @nooverflow: Indicate if overflow prevention is supported.
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
- * @trfc: If the implementation supports Arm v8.4 trace filter controls.
+ * @trfcr: If the CPU supports FEAT_TRF, value of the TRFCR_ELx that
+ * allows tracing at all ELs. We don't want to compute this
+ * at runtime, due to the additional setting of TRFCR_CX when
+ * in EL2. Otherwise, 0.
* @config: structure holding configuration parameters.
+ * @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event.
* @save_state: State to be preserved across power loss
* @state_needs_restore: True when there is context to restore after PM exit
* @skip_power_up: Indicates if an implementation can skip powering up
@@ -971,8 +975,9 @@ struct etmv4_drvdata {
bool nooverflow;
bool atbtrig;
bool lpoverride;
- bool trfc;
+ u64 trfcr;
struct etmv4_config config;
+ u64 save_trfcr;
struct etmv4_save_state *save_state;
bool state_needs_restore;
bool skip_power_up;
diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
new file mode 100644
index 000000000000..53840a2c41f2
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Arm v8 Self-Hosted trace support.
+ *
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef __CORESIGHT_SELF_HOSTED_TRACE_H
+#define __CORESIGHT_SELF_HOSTED_TRACE_H
+
+#include <asm/sysreg.h>
+
+static inline u64 read_trfcr(void)
+{
+ return read_sysreg_s(SYS_TRFCR_EL1);
+}
+
+static inline void write_trfcr(u64 val)
+{
+ write_sysreg_s(val, SYS_TRFCR_EL1);
+ isb();
+}
+
+static inline u64 cpu_prohibit_trace(void)
+{
+ u64 trfcr = read_trfcr();
+
+ /* Prohibit tracing at EL0 & the kernel EL */
+ write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE));
+ /* Return the original value of the TRFCR */
+ return trfcr;
+}
+#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 74c6323d4d6a..d0276af82494 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -432,6 +432,21 @@ static u32 tmc_etr_get_default_buffer_size(struct device *dev)
return size;
}
+static u32 tmc_etr_get_max_burst_size(struct device *dev)
+{
+ u32 burst_size;
+
+ if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
+ &burst_size))
+ return TMC_AXICTL_WR_BURST_16;
+
+ /* Only permissible values are 0 to 15 */
+ if (burst_size > 0xF)
+ burst_size = TMC_AXICTL_WR_BURST_16;
+
+ return burst_size;
+}
+
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -469,10 +484,12 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
/* This device is not associated with a session */
drvdata->pid = -1;
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
drvdata->size = tmc_etr_get_default_buffer_size(dev);
- else
+ drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
+ } else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
+ }
desc.dev = dev;
desc.groups = coresight_tmc_groups;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index cd0fb7bfba68..4c4cbd1f7258 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -546,13 +546,17 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
/*
* In snapshot mode we simply increment the head by the number of byte
- * that were written. User space function cs_etm_find_snapshot() will
- * figure out how many bytes to get from the AUX buffer based on the
- * position of the head.
+ * that were written. User space will figure out how many bytes to get
+ * from the AUX buffer based on the position of the head.
*/
if (buf->snapshot)
handle->head += to_read;
+ /*
+ * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
+ * data before the aux_head is updated via perf_aux_output_end(), which
+ * is expected by the perf ring buffer.
+ */
CS_LOCK(drvdata->base);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index acdb59e0e661..867ad8bb9b0c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -32,7 +32,6 @@ struct etr_flat_buf {
* @etr_buf - Actual buffer used by the ETR
* @pid - The PID this etr_perf_buffer belongs to.
* @snaphost - Perf session mode
- * @head - handle->head at the beginning of the session.
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
*/
@@ -41,7 +40,6 @@ struct etr_perf_buffer {
struct etr_buf *etr_buf;
pid_t pid;
bool snapshot;
- unsigned long head;
int nr_pages;
void **pages;
};
@@ -609,8 +607,9 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
if (!flat_buf)
return -ENOMEM;
- flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
- &flat_buf->daddr, GFP_KERNEL);
+ flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
+ &flat_buf->daddr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
if (!flat_buf->vaddr) {
kfree(flat_buf);
return -ENOMEM;
@@ -631,14 +630,18 @@ static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
if (flat_buf && flat_buf->daddr) {
struct device *real_dev = flat_buf->dev->parent;
- dma_free_coherent(real_dev, flat_buf->size,
- flat_buf->vaddr, flat_buf->daddr);
+ dma_free_noncoherent(real_dev, etr_buf->size,
+ flat_buf->vaddr, flat_buf->daddr,
+ DMA_FROM_DEVICE);
}
kfree(flat_buf);
}
static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+ struct device *real_dev = flat_buf->dev->parent;
+
/*
* Adjust the buffer to point to the beginning of the trace data
* and update the available trace data.
@@ -648,6 +651,19 @@ static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
etr_buf->len = etr_buf->size;
else
etr_buf->len = rwp - rrp;
+
+ /*
+ * The driver always starts tracing at the beginning of the buffer,
+ * the only reason why we would get a wrap around is when the buffer
+ * is full. Sync the entire buffer in one go for this case.
+ */
+ if (etr_buf->offset + etr_buf->len > etr_buf->size)
+ dma_sync_single_for_cpu(real_dev, flat_buf->daddr,
+ etr_buf->size, DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_cpu(real_dev,
+ flat_buf->daddr + etr_buf->offset,
+ etr_buf->len, DMA_FROM_DEVICE);
}
static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
@@ -982,7 +998,8 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
axictl &= ~TMC_AXICTL_CLEAR_MASK;
- axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
+ axictl |= TMC_AXICTL_PROT_CTL_B1;
+ axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size);
axictl |= TMC_AXICTL_AXCACHE_OS;
if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
@@ -1437,16 +1454,16 @@ free_etr_perf_buffer:
* buffer to the perf ring buffer.
*/
static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
+ unsigned long head,
unsigned long src_offset,
unsigned long to_copy)
{
long bytes;
long pg_idx, pg_offset;
- unsigned long head = etr_perf->head;
char **dst_pages, *src_buf;
struct etr_buf *etr_buf = etr_perf->etr_buf;
- head = etr_perf->head;
+ head = PERF_IDX2OFF(head, etr_perf);
pg_idx = head >> PAGE_SHIFT;
pg_offset = head & (PAGE_SIZE - 1);
dst_pages = (char **)etr_perf->pages;
@@ -1553,16 +1570,23 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
/* Insert barrier packets at the beginning, if there was an overflow */
if (lost)
tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
- tmc_etr_sync_perf_buffer(etr_perf, offset, size);
+ tmc_etr_sync_perf_buffer(etr_perf, handle->head, offset, size);
/*
* In snapshot mode we simply increment the head by the number of byte
- * that were written. User space function cs_etm_find_snapshot() will
- * figure out how many bytes to get from the AUX buffer based on the
- * position of the head.
+ * that were written. User space will figure out how many bytes to get
+ * from the AUX buffer based on the position of the head.
*/
if (etr_perf->snapshot)
handle->head += size;
+
+ /*
+ * Ensure that the AUX trace data is visible before the aux_head
+ * is updated via perf_aux_output_end(), as expected by the
+ * perf ring buffer.
+ */
+ smp_wmb();
+
out:
/*
* Don't set the TRUNCATED flag in snapshot mode because 1) the
@@ -1605,8 +1629,6 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
- etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
-
/*
* No HW configuration is needed if the sink is already in
* use for this session.
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index b91ec7dde7bc..6bec20a392b3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -70,7 +70,8 @@
#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
-#define TMC_AXICTL_WR_BURST_16 0xF00
+#define TMC_AXICTL_WR_BURST(v) (((v) & 0xf) << 8)
+#define TMC_AXICTL_WR_BURST_16 0xf
/* Write-back Read and Write-allocate */
#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
@@ -174,6 +175,8 @@ struct etr_buf {
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
* @size: trace buffer size for this TMC (common for all modes).
+ * @max_burst_size: The maximum burst size that can be initiated by
+ * TMC-ETR on AXI bus.
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -198,6 +201,7 @@ struct tmc_drvdata {
};
u32 len;
u32 size;
+ u32 max_burst_size;
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 176868496879..276862c07e32 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -16,6 +16,9 @@
#define pr_fmt(fmt) DRVNAME ": " fmt
#include <asm/barrier.h>
+#include <asm/cpufeature.h>
+
+#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
@@ -56,6 +59,8 @@ struct trbe_buf {
* trbe_limit sibling pointers.
*/
unsigned long trbe_base;
+ /* The base programmed into the TRBE */
+ unsigned long trbe_hw_base;
unsigned long trbe_limit;
unsigned long trbe_write;
int nr_pages;
@@ -64,13 +69,63 @@ struct trbe_buf {
struct trbe_cpudata *cpudata;
};
+/*
+ * TRBE erratum list
+ *
+ * The errata are defined in arm64 generic cpu_errata framework.
+ * Since the errata work arounds could be applied individually
+ * to the affected CPUs inside the TRBE driver, we need to know if
+ * a given CPU is affected by the erratum. Unlike the other erratum
+ * work arounds, TRBE driver needs to check multiple times during
+ * a trace session. Thus we need a quicker access to per-CPU
+ * errata and not issue costly this_cpu_has_cap() everytime.
+ * We keep a set of the affected errata in trbe_cpudata, per TRBE.
+ *
+ * We rely on the corresponding cpucaps to be defined for a given
+ * TRBE erratum. We map the given cpucap into a TRBE internal number
+ * to make the tracking of the errata lean.
+ *
+ * This helps in :
+ * - Not duplicating the detection logic
+ * - Streamlined detection of erratum across the system
+ */
+#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
+#define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
+
+static int trbe_errata_cpucaps[] = {
+ [TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
+ [TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
+ -1, /* Sentinel, must be the last entry */
+};
+
+/* The total number of listed errata in trbe_errata_cpucaps */
+#define TRBE_ERRATA_MAX (ARRAY_SIZE(trbe_errata_cpucaps) - 1)
+
+/*
+ * Safe limit for the number of bytes that may be overwritten
+ * when ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE is triggered.
+ */
+#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES 256
+
+/*
+ * struct trbe_cpudata: TRBE instance specific data
+ * @trbe_flag - TRBE dirty/access flag support
+ * @trbe_hw_align - Actual TRBE alignment required for TRBPTR_EL1.
+ * @trbe_align - Software alignment used for the TRBPTR_EL1.
+ * @cpu - CPU this TRBE belongs to.
+ * @mode - Mode of current operation. (perf/disabled)
+ * @drvdata - TRBE specific drvdata
+ * @errata - Bit map for the errata on this TRBE.
+ */
struct trbe_cpudata {
bool trbe_flag;
+ u64 trbe_hw_align;
u64 trbe_align;
int cpu;
enum cs_mode mode;
struct trbe_buf *buf;
struct trbe_drvdata *drvdata;
+ DECLARE_BITMAP(errata, TRBE_ERRATA_MAX);
};
struct trbe_drvdata {
@@ -83,6 +138,35 @@ struct trbe_drvdata {
struct platform_device *pdev;
};
+static void trbe_check_errata(struct trbe_cpudata *cpudata)
+{
+ int i;
+
+ for (i = 0; i < TRBE_ERRATA_MAX; i++) {
+ int cap = trbe_errata_cpucaps[i];
+
+ if (WARN_ON_ONCE(cap < 0))
+ return;
+ if (this_cpu_has_cap(cap))
+ set_bit(i, cpudata->errata);
+ }
+}
+
+static inline bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
+{
+ return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
+}
+
+static inline bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
+{
+ return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
+}
+
+static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
+{
+ return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
+}
+
static int trbe_alloc_node(struct perf_event *event)
{
if (event->cpu == -1)
@@ -120,6 +204,25 @@ static void trbe_reset_local(void)
write_sysreg_s(0, SYS_TRBSR_EL1);
}
+static void trbe_report_wrap_event(struct perf_output_handle *handle)
+{
+ /*
+ * Mark the buffer to indicate that there was a WRAP event by
+ * setting the COLLISION flag. This indicates to the user that
+ * the TRBE trace collection was stopped without stopping the
+ * ETE and thus there might be some amount of trace that was
+ * lost between the time the WRAP was detected and the IRQ
+ * was consumed by the CPU.
+ *
+ * Setting the TRUNCATED flag would move the event to STOPPED
+ * state unnecessarily, even when there is space left in the
+ * ring buffer. Using the COLLISION flag doesn't have this side
+ * effect. We only set TRUNCATED flag when there is no space
+ * left in the ring buffer.
+ */
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
+}
+
static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
@@ -133,6 +236,7 @@ static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
*/
trbe_drain_and_disable_local();
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, 0);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
}
@@ -178,12 +282,18 @@ static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
* consumed from the user space. The enabled TRBE buffer area is a moving subset of
* the allocated perf auxiliary buffer.
*/
+
+static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len)
+{
+ memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len);
+}
+
static void trbe_pad_buf(struct perf_output_handle *handle, int len)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
u64 head = PERF_IDX2OFF(handle->head, buf);
- memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len);
+ __trbe_pad_buf(buf, head, len);
if (!buf->snapshot)
perf_aux_output_skip(handle, len);
}
@@ -200,6 +310,25 @@ static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
return buf->nr_pages * PAGE_SIZE;
}
+static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle)
+{
+ u64 size = TRBE_TRACE_MIN_BUF_SIZE;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+
+ /*
+ * When the TRBE is affected by an erratum that could make it
+ * write to the next "virtually addressed" page beyond the LIMIT.
+ * We need to make sure there is always a PAGE after the LIMIT,
+ * within the buffer. Thus we ensure there is at least an extra
+ * page than normal. With this we could then adjust the LIMIT
+ * pointer down by a PAGE later.
+ */
+ if (trbe_may_write_out_of_range(cpudata))
+ size += PAGE_SIZE;
+ return size;
+}
+
/*
* TRBE Limit Calculation
*
@@ -252,13 +381,9 @@ static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
* trbe_base trbe_base + nr_pages
*
* Perf aux buffer does not have any space for the driver to write into.
- * Just communicate trace truncation event to the user space by marking
- * it with PERF_AUX_FLAG_TRUNCATED.
*/
- if (!handle->size) {
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ if (!handle->size)
return 0;
- }
/* Compute the tail and wakeup indices now that we've aligned head */
tail = PERF_IDX2OFF(handle->head + handle->size, buf);
@@ -360,13 +485,12 @@ static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
return limit;
trbe_pad_buf(handle, handle->size);
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
return 0;
}
static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
{
- struct trbe_buf *buf = perf_get_aux(handle);
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
u64 limit = __trbe_normal_offset(handle);
u64 head = PERF_IDX2OFF(handle->head, buf);
@@ -374,10 +498,14 @@ static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
* If the head is too close to the limit and we don't
* have space for a meaningful run, we rather pad it
* and start fresh.
+ *
+ * We might have to do this more than once to make sure
+ * we have enough required space.
*/
- if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) {
+ while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) {
trbe_pad_buf(handle, limit - head);
limit = __trbe_normal_offset(handle);
+ head = PERF_IDX2OFF(handle->head, buf);
}
return limit;
}
@@ -448,12 +576,13 @@ static void set_trbe_limit_pointer_enabled(unsigned long addr)
static void trbe_enable_hw(struct trbe_buf *buf)
{
- WARN_ON(buf->trbe_write < buf->trbe_base);
+ WARN_ON(buf->trbe_hw_base < buf->trbe_base);
+ WARN_ON(buf->trbe_write < buf->trbe_hw_base);
WARN_ON(buf->trbe_write >= buf->trbe_limit);
set_trbe_disabled();
isb();
clr_trbe_status();
- set_trbe_base_pointer(buf->trbe_base);
+ set_trbe_base_pointer(buf->trbe_hw_base);
set_trbe_write_pointer(buf->trbe_write);
/*
@@ -464,10 +593,13 @@ static void trbe_enable_hw(struct trbe_buf *buf)
set_trbe_limit_pointer_enabled(buf->trbe_limit);
}
-static enum trbe_fault_action trbe_get_fault_act(u64 trbsr)
+static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
+ u64 trbsr)
{
int ec = get_trbe_ec(trbsr);
int bsc = get_trbe_bsc(trbsr);
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
WARN_ON(is_trbe_running(trbsr));
if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
@@ -476,13 +608,71 @@ static enum trbe_fault_action trbe_get_fault_act(u64 trbsr)
if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
return TRBE_FAULT_ACT_FATAL;
- if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) {
- if (get_trbe_write_pointer() == get_trbe_base_pointer())
- return TRBE_FAULT_ACT_WRAP;
- }
+ /*
+ * If the trbe is affected by TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
+ * it might write data after a WRAP event in the fill mode.
+ * Thus the check TRBPTR == TRBBASER will not be honored.
+ */
+ if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) &&
+ (trbe_may_overwrite_in_fill_mode(cpudata) ||
+ get_trbe_write_pointer() == get_trbe_base_pointer()))
+ return TRBE_FAULT_ACT_WRAP;
+
return TRBE_FAULT_ACT_SPURIOUS;
}
+static unsigned long trbe_get_trace_size(struct perf_output_handle *handle,
+ struct trbe_buf *buf, bool wrap)
+{
+ u64 write;
+ u64 start_off, end_off;
+ u64 size;
+ u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
+
+ /*
+ * If the TRBE has wrapped around the write pointer has
+ * wrapped and should be treated as limit.
+ *
+ * When the TRBE is affected by TRBE_WORKAROUND_WRITE_OUT_OF_RANGE,
+ * it may write upto 64bytes beyond the "LIMIT". The driver already
+ * keeps a valid page next to the LIMIT and we could potentially
+ * consume the trace data that may have been collected there. But we
+ * cannot be really sure it is available, and the TRBPTR may not
+ * indicate the same. Also, affected cores are also affected by another
+ * erratum which forces the PAGE_SIZE alignment on the TRBPTR, and thus
+ * could potentially pad an entire PAGE_SIZE - 64bytes, to get those
+ * 64bytes. Thus we ignore the potential triggering of the erratum
+ * on WRAP and limit the data to LIMIT.
+ */
+ if (wrap)
+ write = get_trbe_limit_pointer();
+ else
+ write = get_trbe_write_pointer();
+
+ /*
+ * TRBE may use a different base address than the base
+ * of the ring buffer. Thus use the beginning of the ring
+ * buffer to compute the offsets.
+ */
+ end_off = write - buf->trbe_base;
+ start_off = PERF_IDX2OFF(handle->head, buf);
+
+ if (WARN_ON_ONCE(end_off < start_off))
+ return 0;
+
+ size = end_off - start_off;
+ /*
+ * If the TRBE is affected by the following erratum, we must fill
+ * the space we skipped with IGNORE packets. And we are always
+ * guaranteed to have at least a PAGE_SIZE space in the buffer.
+ */
+ if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) &&
+ !WARN_ON(size < overwrite_skip))
+ __trbe_pad_buf(buf, start_off, overwrite_skip);
+
+ return size;
+}
+
static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
struct perf_event *event, void **pages,
int nr_pages, bool snapshot)
@@ -544,9 +734,9 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
struct trbe_buf *buf = config;
enum trbe_fault_action act;
- unsigned long size, offset;
- unsigned long write, base, status;
+ unsigned long size, status;
unsigned long flags;
+ bool wrap = false;
WARN_ON(buf->cpudata != cpudata);
WARN_ON(cpudata->cpu != smp_processor_id());
@@ -554,8 +744,6 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
if (cpudata->mode != CS_MODE_PERF)
return 0;
- perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
-
/*
* We are about to disable the TRBE. And this could in turn
* fill up the buffer triggering, an IRQ. This could be consumed
@@ -588,8 +776,6 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
* handle gets freed in etm_event_stop().
*/
trbe_drain_and_disable_local();
- write = get_trbe_write_pointer();
- base = get_trbe_base_pointer();
/* Check if there is a pending interrupt and handle it here */
status = read_sysreg_s(SYS_TRBSR_EL1);
@@ -603,7 +789,7 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
clr_trbe_irq();
isb();
- act = trbe_get_fault_act(status);
+ act = trbe_get_fault_act(handle, status);
/*
* If this was not due to a WRAP event, we have some
* errors and as such buffer is empty.
@@ -613,20 +799,11 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
goto done;
}
- /*
- * Otherwise, the buffer is full and the write pointer
- * has reached base. Adjust this back to the Limit pointer
- * for correct size. Also, mark the buffer truncated.
- */
- write = get_trbe_limit_pointer();
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ trbe_report_wrap_event(handle);
+ wrap = true;
}
- offset = write - base;
- if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf)))
- size = 0;
- else
- size = offset - PERF_IDX2OFF(handle->head, buf);
+ size = trbe_get_trace_size(handle, buf, wrap);
done:
local_irq_restore(flags);
@@ -636,6 +813,148 @@ done:
return size;
}
+
+static int trbe_apply_work_around_before_enable(struct trbe_buf *buf)
+{
+ /*
+ * TRBE_WORKAROUND_OVERWRITE_FILL_MODE causes the TRBE to overwrite a few cache
+ * line size from the "TRBBASER_EL1" in the event of a "FILL".
+ * Thus, we could loose some amount of the trace at the base.
+ *
+ * Before Fix:
+ *
+ * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
+ * | \/ /
+ * -------------------------------------------------------------
+ * | Pg0 | Pg1 | | | PgN |
+ * -------------------------------------------------------------
+ *
+ * In the normal course of action, we would set the TRBBASER to the
+ * beginning of the ring-buffer (normal-BASE). But with the erratum,
+ * the TRBE could overwrite the contents at the "normal-BASE", after
+ * hitting the "normal-LIMIT", since it doesn't stop as expected. And
+ * this is wrong. This could result in overwriting trace collected in
+ * one of the previous runs, being consumed by the user. So we must
+ * always make sure that the TRBBASER is within the region
+ * [head, head+size]. Note that TRBBASER must be PAGE aligned,
+ *
+ * After moving the BASE:
+ *
+ * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
+ * | \/ /
+ * -------------------------------------------------------------
+ * | | |xyzdef. |.. tuvw| |
+ * -------------------------------------------------------------
+ * /
+ * New-BASER
+ *
+ * Also, we would set the TRBPTR to head (after adjusting for
+ * alignment) at normal-PTR. This would mean that the last few bytes
+ * of the trace (say, "xyz") might overwrite the first few bytes of
+ * trace written ("abc"). More importantly they will appear in what
+ * userspace sees as the beginning of the trace, which is wrong. We may
+ * not always have space to move the latest trace "xyz" to the correct
+ * order as it must appear beyond the LIMIT. (i.e, [head..head+size]).
+ * Thus it is easier to ignore those bytes than to complicate the
+ * driver to move it, assuming that the erratum was triggered and
+ * doing additional checks to see if there is indeed allowed space at
+ * TRBLIMITR.LIMIT.
+ *
+ * Thus the full workaround will move the BASE and the PTR and would
+ * look like (after padding at the skipped bytes at the end of
+ * session) :
+ *
+ * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
+ * | \/ /
+ * -------------------------------------------------------------
+ * | | |///abc.. |.. rst| |
+ * -------------------------------------------------------------
+ * / |
+ * New-BASER New-TRBPTR
+ *
+ * To summarize, with the work around:
+ *
+ * - We always align the offset for the next session to PAGE_SIZE
+ * (This is to ensure we can program the TRBBASER to this offset
+ * within the region [head...head+size]).
+ *
+ * - At TRBE enable:
+ * - Set the TRBBASER to the page aligned offset of the current
+ * proposed write offset. (which is guaranteed to be aligned
+ * as above)
+ * - Move the TRBPTR to skip first 256bytes (that might be
+ * overwritten with the erratum). This ensures that the trace
+ * generated in the session is not re-written.
+ *
+ * - At trace collection:
+ * - Pad the 256bytes skipped above again with IGNORE packets.
+ */
+ if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) {
+ if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
+ return -EINVAL;
+ buf->trbe_hw_base = buf->trbe_write;
+ buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
+ }
+
+ /*
+ * TRBE_WORKAROUND_WRITE_OUT_OF_RANGE could cause the TRBE to write to
+ * the next page after the TRBLIMITR.LIMIT. For perf, the "next page"
+ * may be:
+ * - The page beyond the ring buffer. This could mean, TRBE could
+ * corrupt another entity (kernel / user)
+ * - A portion of the "ring buffer" consumed by the userspace.
+ * i.e, a page outisde [head, head + size].
+ *
+ * We work around this by:
+ * - Making sure that we have at least an extra space of PAGE left
+ * in the ring buffer [head, head + size], than we normally do
+ * without the erratum. See trbe_min_trace_buf_size().
+ *
+ * - Adjust the TRBLIMITR.LIMIT to leave the extra PAGE outside
+ * the TRBE's range (i.e [TRBBASER, TRBLIMITR.LIMI] ).
+ */
+ if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) {
+ s64 space = buf->trbe_limit - buf->trbe_write;
+ /*
+ * We must have more than a PAGE_SIZE worth space in the proposed
+ * range for the TRBE.
+ */
+ if (WARN_ON(space <= PAGE_SIZE ||
+ !IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
+ return -EINVAL;
+ buf->trbe_limit -= PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int __arm_trbe_enable(struct trbe_buf *buf,
+ struct perf_output_handle *handle)
+{
+ int ret = 0;
+
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ ret = -ENOSPC;
+ goto err;
+ }
+ /* Set the base of the TRBE to the buffer base */
+ buf->trbe_hw_base = buf->trbe_base;
+
+ ret = trbe_apply_work_around_before_enable(buf);
+ if (ret)
+ goto err;
+
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
+ trbe_enable_hw(buf);
+ return 0;
+err:
+ trbe_stop_and_truncate_event(handle);
+ return ret;
+}
+
static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
{
struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -648,18 +967,11 @@ static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
if (mode != CS_MODE_PERF)
return -EINVAL;
- *this_cpu_ptr(drvdata->handle) = handle;
cpudata->buf = buf;
cpudata->mode = mode;
buf->cpudata = cpudata;
- buf->trbe_limit = compute_trbe_buffer_limit(handle);
- buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
- if (buf->trbe_limit == buf->trbe_base) {
- trbe_stop_and_truncate_event(handle);
- return 0;
- }
- trbe_enable_hw(buf);
- return 0;
+
+ return __arm_trbe_enable(buf, handle);
}
static int arm_trbe_disable(struct coresight_device *csdev)
@@ -683,35 +995,30 @@ static int arm_trbe_disable(struct coresight_device *csdev)
static void trbe_handle_spurious(struct perf_output_handle *handle)
{
- struct trbe_buf *buf = etm_perf_sink_config(handle);
+ u64 limitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
- buf->trbe_limit = compute_trbe_buffer_limit(handle);
- buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
- if (buf->trbe_limit == buf->trbe_base) {
- trbe_drain_and_disable_local();
- return;
- }
- trbe_enable_hw(buf);
+ /*
+ * If the IRQ was spurious, simply re-enable the TRBE
+ * back without modifying the buffer parameters to
+ * retain the trace collected so far.
+ */
+ limitr |= TRBLIMITR_ENABLE;
+ write_sysreg_s(limitr, SYS_TRBLIMITR_EL1);
+ isb();
}
-static void trbe_handle_overflow(struct perf_output_handle *handle)
+static int trbe_handle_overflow(struct perf_output_handle *handle)
{
struct perf_event *event = handle->event;
struct trbe_buf *buf = etm_perf_sink_config(handle);
- unsigned long offset, size;
+ unsigned long size;
struct etm_event_data *event_data;
- offset = get_trbe_limit_pointer() - get_trbe_base_pointer();
- size = offset - PERF_IDX2OFF(handle->head, buf);
+ size = trbe_get_trace_size(handle, buf, true);
if (buf->snapshot)
handle->head += size;
- /*
- * Mark the buffer as truncated, as we have stopped the trace
- * collection upon the WRAP event, without stopping the source.
- */
- perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW |
- PERF_AUX_FLAG_TRUNCATED);
+ trbe_report_wrap_event(handle);
perf_aux_output_end(handle, size);
event_data = perf_aux_output_begin(handle, event);
if (!event_data) {
@@ -723,16 +1030,10 @@ static void trbe_handle_overflow(struct perf_output_handle *handle)
*/
trbe_drain_and_disable_local();
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
- return;
- }
- buf->trbe_limit = compute_trbe_buffer_limit(handle);
- buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
- if (buf->trbe_limit == buf->trbe_base) {
- trbe_stop_and_truncate_event(handle);
- return;
+ return -EINVAL;
}
- *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
- trbe_enable_hw(buf);
+
+ return __arm_trbe_enable(buf, handle);
}
static bool is_perf_trbe(struct perf_output_handle *handle)
@@ -742,7 +1043,7 @@ static bool is_perf_trbe(struct perf_output_handle *handle)
struct trbe_drvdata *drvdata = cpudata->drvdata;
int cpu = smp_processor_id();
- WARN_ON(buf->trbe_base != get_trbe_base_pointer());
+ WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer());
WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
if (cpudata->mode != CS_MODE_PERF)
@@ -763,13 +1064,10 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
struct perf_output_handle *handle = *handle_ptr;
enum trbe_fault_action act;
u64 status;
+ bool truncated = false;
+ u64 trfcr;
- /*
- * Ensure the trace is visible to the CPUs and
- * any external aborts have been resolved.
- */
- trbe_drain_and_disable_local();
-
+ /* Reads to TRBSR_EL1 is fine when TRBE is active */
status = read_sysreg_s(SYS_TRBSR_EL1);
/*
* If the pending IRQ was handled by update_buffer callback
@@ -778,6 +1076,13 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
if (!is_trbe_irq(status))
return IRQ_NONE;
+ /* Prohibit the CPU from tracing before we disable the TRBE */
+ trfcr = cpu_prohibit_trace();
+ /*
+ * Ensure the trace is visible to the CPUs and
+ * any external aborts have been resolved.
+ */
+ trbe_drain_and_disable_local();
clr_trbe_irq();
isb();
@@ -787,24 +1092,32 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
if (!is_perf_trbe(handle))
return IRQ_NONE;
- /*
- * Ensure perf callbacks have completed, which may disable
- * the trace buffer in response to a TRUNCATION flag.
- */
- irq_work_run();
-
- act = trbe_get_fault_act(status);
+ act = trbe_get_fault_act(handle, status);
switch (act) {
case TRBE_FAULT_ACT_WRAP:
- trbe_handle_overflow(handle);
+ truncated = !!trbe_handle_overflow(handle);
break;
case TRBE_FAULT_ACT_SPURIOUS:
trbe_handle_spurious(handle);
break;
case TRBE_FAULT_ACT_FATAL:
trbe_stop_and_truncate_event(handle);
+ truncated = true;
break;
}
+
+ /*
+ * If the buffer was truncated, ensure perf callbacks
+ * have completed, which will disable the event.
+ *
+ * Otherwise, restore the trace filter controls to
+ * allow the tracing.
+ */
+ if (truncated)
+ irq_work_run();
+ else
+ write_trfcr(trfcr);
+
return IRQ_HANDLED;
}
@@ -824,7 +1137,7 @@ static ssize_t align_show(struct device *dev, struct device_attribute *attr, cha
{
struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", cpudata->trbe_align);
+ return sprintf(buf, "%llx\n", cpudata->trbe_hw_align);
}
static DEVICE_ATTR_RO(align);
@@ -869,6 +1182,10 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
if (WARN_ON(trbe_csdev))
return;
+ /* If the TRBE was not probed on the CPU, we shouldn't be here */
+ if (WARN_ON(!cpudata->drvdata))
+ return;
+
dev = &cpudata->drvdata->pdev->dev;
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
if (!desc.name)
@@ -891,6 +1208,9 @@ cpu_clear:
cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
}
+/*
+ * Must be called with preemption disabled, for trbe_check_errata().
+ */
static void arm_trbe_probe_cpu(void *info)
{
struct trbe_drvdata *drvdata = info;
@@ -912,11 +1232,34 @@ static void arm_trbe_probe_cpu(void *info)
goto cpu_clear;
}
- cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr);
- if (cpudata->trbe_align > SZ_2K) {
+ cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr);
+ if (cpudata->trbe_hw_align > SZ_2K) {
pr_err("Unsupported alignment on cpu %d\n", cpu);
goto cpu_clear;
}
+
+ /*
+ * Run the TRBE erratum checks, now that we know
+ * this instance is about to be registered.
+ */
+ trbe_check_errata(cpudata);
+
+ /*
+ * If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
+ * we must always program the TBRPTR_EL1, 256bytes from a page
+ * boundary, with TRBBASER_EL1 set to the page, to prevent
+ * TRBE over-writing 256bytes at TRBBASER_EL1 on FILL event.
+ *
+ * Thus make sure we always align our write pointer to a PAGE_SIZE,
+ * which also guarantees that we have at least a PAGE_SIZE space in
+ * the buffer (TRBLIMITR is PAGE aligned) and thus we can skip
+ * the required bytes at the base.
+ */
+ if (trbe_may_overwrite_in_fill_mode(cpudata))
+ cpudata->trbe_align = PAGE_SIZE;
+ else
+ cpudata->trbe_align = cpudata->trbe_hw_align;
+
cpudata->trbe_flag = get_trbe_flag_update(trbidr);
cpudata->cpu = cpu;
cpudata->drvdata = drvdata;
@@ -950,7 +1293,9 @@ static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
return -ENOMEM;
for_each_cpu(cpu, &drvdata->supported_cpus) {
- smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
+ /* If we fail to probe the CPU, let us defer it to hotplug callbacks */
+ if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
+ continue;
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
arm_trbe_register_coresight_cpu(drvdata, cpu);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
@@ -969,6 +1314,13 @@ static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
return 0;
}
+static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata)
+{
+ preempt_disable();
+ arm_trbe_probe_cpu(drvdata);
+ preempt_enable();
+}
+
static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
{
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
@@ -980,7 +1332,7 @@ static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
* initialize it now.
*/
if (!coresight_get_percpu_sink(cpu)) {
- arm_trbe_probe_cpu(drvdata);
+ arm_trbe_probe_hotplugged_cpu(drvdata);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
arm_trbe_register_coresight_cpu(drvdata, cpu);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index f10a603b13fb..1ed4daa918a0 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -63,34 +63,32 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
int outcnt = 0, incnt = 0;
/*
- * We don't support 0 length messages and so filter out
- * 0 length transfers by using i2c_adapter_quirks.
- */
- if (!msgs[i].len)
- break;
-
- /*
* Only 7-bit mode supported for this moment. For the address
* format, Please check the Virtio I2C Specification.
*/
reqs[i].out_hdr.addr = cpu_to_le16(msgs[i].addr << 1);
+ if (msgs[i].flags & I2C_M_RD)
+ reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_M_RD);
+
if (i != num - 1)
- reqs[i].out_hdr.flags = cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
+ reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
sg_init_one(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr));
sgs[outcnt++] = &out_hdr;
- reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
- if (!reqs[i].buf)
- break;
+ if (msgs[i].len) {
+ reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
+ if (!reqs[i].buf)
+ break;
- sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len);
+ sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len);
- if (msgs[i].flags & I2C_M_RD)
- sgs[outcnt + incnt++] = &msg_buf;
- else
- sgs[outcnt++] = &msg_buf;
+ if (msgs[i].flags & I2C_M_RD)
+ sgs[outcnt + incnt++] = &msg_buf;
+ else
+ sgs[outcnt++] = &msg_buf;
+ }
sg_init_one(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr));
sgs[outcnt + incnt++] = &in_hdr;
@@ -191,7 +189,7 @@ static int virtio_i2c_setup_vqs(struct virtio_i2c *vi)
static u32 virtio_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm virtio_algorithm = {
@@ -199,15 +197,16 @@ static struct i2c_algorithm virtio_algorithm = {
.functionality = virtio_i2c_func,
};
-static const struct i2c_adapter_quirks virtio_i2c_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
static int virtio_i2c_probe(struct virtio_device *vdev)
{
struct virtio_i2c *vi;
int ret;
+ if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
+ dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
+ return -EINVAL;
+ }
+
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
return -ENOMEM;
@@ -225,7 +224,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
snprintf(vi->adap.name, sizeof(vi->adap.name),
"i2c_virtio at virtio bus %d", vdev->index);
vi->adap.algo = &virtio_algorithm;
- vi->adap.quirks = &virtio_i2c_quirks;
vi->adap.dev.parent = &vdev->dev;
vi->adap.dev.of_node = vdev->dev.of_node;
i2c_set_adapdata(&vi->adap, vi);
@@ -270,11 +268,17 @@ static int virtio_i2c_restore(struct virtio_device *vdev)
}
#endif
+static const unsigned int features[] = {
+ VIRTIO_I2C_F_ZERO_LENGTH_REQUEST,
+};
+
static struct virtio_driver virtio_i2c_driver = {
- .id_table = id_table,
- .probe = virtio_i2c_probe,
- .remove = virtio_i2c_remove,
- .driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtio_i2c_probe,
+ .remove = virtio_i2c_remove,
+ .driver = {
.name = "i2c_virtio",
},
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index bba08cbce6e1..1a19ebad60ad 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -103,6 +103,7 @@ struct slimpro_i2c_dev {
struct i2c_adapter adapter;
struct device *dev;
struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
struct mbox_client mbox_client;
int mbox_idx;
struct completion rd_complete;
@@ -466,7 +467,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
return PTR_ERR(ctx->mbox_chan);
}
} else {
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
int version = XGENE_SLIMPRO_I2C_V1;
@@ -483,24 +484,14 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
- ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(ctx->mbox_chan)) {
+ pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(pcc_chan)) {
dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
+ return PTR_ERR(ctx->pcc_chan);
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = ctx->mbox_chan->con_priv;
- if (!cppc_ss) {
- dev_err(&pdev->dev, "PPC subspace not found\n");
- rc = -ENOENT;
- goto mbox_err;
- }
+ ctx->pcc_chan = pcc_chan;
+ ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
@@ -512,17 +503,17 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
* This is the shared communication region
* for the OS and Platform to communicate over.
*/
- ctx->comm_base_addr = cppc_ss->base_address;
+ ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_SLIMPRO_I2C_V2)
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WT);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
@@ -561,7 +552,7 @@ mbox_err:
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return rc;
}
@@ -575,7 +566,7 @@ static int xgene_slimpro_i2c_remove(struct platform_device *pdev)
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return 0;
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 546cc935e035..80631f93ad2f 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -398,24 +398,20 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
-static int i2c_acpi_find_match_adapter(struct device *dev, const void *data)
-{
- struct i2c_adapter *adapter = i2c_verify_adapter(dev);
-
- if (!adapter)
- return 0;
-
- return ACPI_HANDLE(dev) == (acpi_handle)data;
-}
-
struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
+ struct i2c_adapter *adapter;
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, handle,
- i2c_acpi_find_match_adapter);
+ dev = bus_find_device(&i2c_bus_type, NULL, handle, device_match_acpi_handle);
+ if (!dev)
+ return NULL;
+
+ adapter = i2c_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
- return dev ? i2c_verify_adapter(dev) : NULL;
+ return adapter;
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index e6c543b5ee1d..0b66e25c0e2d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -89,6 +89,12 @@ static struct cpuidle_state *cpuidle_state_table __initdata;
static unsigned int mwait_substates __initdata;
/*
+ * Enable interrupts before entering the C-state. On some platforms and for
+ * some C-states, this may measurably decrease interrupt latency.
+ */
+#define CPUIDLE_FLAG_IRQ_ENABLE BIT(14)
+
+/*
* Enable this state by default even if the ACPI _CST does not list it.
*/
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
@@ -127,6 +133,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
+ if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)
+ local_irq_enable();
+
mwait_idle_with_hints(eax, ecx);
return index;
@@ -698,7 +707,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
- .flags = MWAIT2flg(0x00),
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
@@ -727,7 +736,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
- .flags = MWAIT2flg(0x00),
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index a0e9061f6d6b..49587c992a6d 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -30,6 +30,35 @@ config ADIS16209
To compile this driver as a module, say M here: the module will be
called adis16209.
+config ADXL313
+ tristate
+
+config ADXL313_I2C
+ tristate "Analog Devices ADXL313 3-Axis Digital Accelerometer I2C Driver"
+ depends on I2C
+ select ADXL313
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build i2c support for the Analog Devices
+ ADXL313 3-axis digital accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called adxl313_i2c and you will also get adxl313_core
+ for the core module.
+
+config ADXL313_SPI
+ tristate "Analog Devices ADXL313 3-Axis Digital Accelerometer SPI Driver"
+ depends on SPI
+ select ADXL313
+ select REGMAP_SPI
+ help
+ Say Y here if you want to build spi support for the Analog Devices
+ ADXL313 3-axis digital accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called adxl313_spi and you will also get adxl313_core
+ for the core module.
+
config ADXL345
tristate
@@ -61,6 +90,39 @@ config ADXL345_SPI
will be called adxl345_spi and you will also get adxl345_core
for the core module.
+config ADXL355
+ tristate
+
+config ADXL355_I2C
+ tristate "Analog Devices ADXL355 3-Axis Digital Accelerometer I2C Driver"
+ depends on I2C
+ select ADXL355
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build i2c support for the Analog Devices
+ ADXL355 3-axis digital accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called adxl355_i2c and you will also get adxl355_core
+ for the core module.
+
+config ADXL355_SPI
+ tristate "Analog Devices ADXL355 3-Axis Digital Accelerometer SPI Driver"
+ depends on SPI
+ select ADXL355
+ select REGMAP_SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build spi support for the Analog Devices
+ ADXL355 3-axis digital accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called adxl355_spi and you will also get adxl355_core
+ for the core module.
+
config ADXL372
tristate
select IIO_BUFFER
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 89280e823bcd..d03e2f6bba08 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -6,9 +6,15 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADIS16201) += adis16201.o
obj-$(CONFIG_ADIS16209) += adis16209.o
+obj-$(CONFIG_ADXL313) += adxl313_core.o
+obj-$(CONFIG_ADXL313_I2C) += adxl313_i2c.o
+obj-$(CONFIG_ADXL313_SPI) += adxl313_spi.o
obj-$(CONFIG_ADXL345) += adxl345_core.o
obj-$(CONFIG_ADXL345_I2C) += adxl345_i2c.o
obj-$(CONFIG_ADXL345_SPI) += adxl345_spi.o
+obj-$(CONFIG_ADXL355) += adxl355_core.o
+obj-$(CONFIG_ADXL355_I2C) += adxl355_i2c.o
+obj-$(CONFIG_ADXL355_SPI) += adxl355_spi.o
obj-$(CONFIG_ADXL372) += adxl372.o
obj-$(CONFIG_ADXL372_I2C) += adxl372_i2c.o
obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o
diff --git a/drivers/iio/accel/adxl313.h b/drivers/iio/accel/adxl313.h
new file mode 100644
index 000000000000..4415f2fc07e1
--- /dev/null
+++ b/drivers/iio/accel/adxl313.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ADXL313 3-Axis Digital Accelerometer
+ *
+ * Copyright (c) 2021 Lucas Stankus <lucas.p.stankus@gmail.com>
+ */
+
+#ifndef _ADXL313_H_
+#define _ADXL313_H_
+
+/* ADXL313 register definitions */
+#define ADXL313_REG_DEVID0 0x00
+#define ADXL313_REG_DEVID1 0x01
+#define ADXL313_REG_PARTID 0x02
+#define ADXL313_REG_XID 0x04
+#define ADXL313_REG_SOFT_RESET 0x18
+#define ADXL313_REG_OFS_AXIS(index) (0x1E + (index))
+#define ADXL313_REG_THRESH_ACT 0x24
+#define ADXL313_REG_ACT_INACT_CTL 0x27
+#define ADXL313_REG_BW_RATE 0x2C
+#define ADXL313_REG_POWER_CTL 0x2D
+#define ADXL313_REG_INT_MAP 0x2F
+#define ADXL313_REG_DATA_FORMAT 0x31
+#define ADXL313_REG_DATA_AXIS(index) (0x32 + ((index) * 2))
+#define ADXL313_REG_FIFO_CTL 0x38
+#define ADXL313_REG_FIFO_STATUS 0x39
+
+#define ADXL313_DEVID0 0xAD
+#define ADXL313_DEVID1 0x1D
+#define ADXL313_PARTID 0xCB
+#define ADXL313_SOFT_RESET 0x52
+
+#define ADXL313_RATE_MSK GENMASK(3, 0)
+#define ADXL313_RATE_BASE 6
+
+#define ADXL313_POWER_CTL_MSK GENMASK(3, 2)
+#define ADXL313_MEASUREMENT_MODE BIT(3)
+
+#define ADXL313_RANGE_MSK GENMASK(1, 0)
+#define ADXL313_RANGE_4G 3
+
+#define ADXL313_FULL_RES BIT(3)
+#define ADXL313_SPI_3WIRE BIT(6)
+#define ADXL313_I2C_DISABLE BIT(6)
+
+extern const struct regmap_access_table adxl313_readable_regs_table;
+
+extern const struct regmap_access_table adxl313_writable_regs_table;
+
+int adxl313_core_probe(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ int (*setup)(struct device *, struct regmap *));
+#endif /* _ADXL313_H_ */
diff --git a/drivers/iio/accel/adxl313_core.c b/drivers/iio/accel/adxl313_core.c
new file mode 100644
index 000000000000..0d243341f1a7
--- /dev/null
+++ b/drivers/iio/accel/adxl313_core.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADXL313 3-Axis Digital Accelerometer
+ *
+ * Copyright (c) 2021 Lucas Stankus <lucas.p.stankus@gmail.com>
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADXL313.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adxl313.h"
+
+static const struct regmap_range adxl313_readable_reg_range[] = {
+ regmap_reg_range(ADXL313_REG_DEVID0, ADXL313_REG_XID),
+ regmap_reg_range(ADXL313_REG_SOFT_RESET, ADXL313_REG_SOFT_RESET),
+ regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
+ regmap_reg_range(ADXL313_REG_THRESH_ACT, ADXL313_REG_ACT_INACT_CTL),
+ regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_FIFO_STATUS),
+};
+
+const struct regmap_access_table adxl313_readable_regs_table = {
+ .yes_ranges = adxl313_readable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl313_readable_reg_range),
+};
+EXPORT_SYMBOL_GPL(adxl313_readable_regs_table);
+
+static const struct regmap_range adxl313_writable_reg_range[] = {
+ regmap_reg_range(ADXL313_REG_SOFT_RESET, ADXL313_REG_SOFT_RESET),
+ regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
+ regmap_reg_range(ADXL313_REG_THRESH_ACT, ADXL313_REG_ACT_INACT_CTL),
+ regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_INT_MAP),
+ regmap_reg_range(ADXL313_REG_DATA_FORMAT, ADXL313_REG_DATA_FORMAT),
+ regmap_reg_range(ADXL313_REG_FIFO_CTL, ADXL313_REG_FIFO_CTL),
+};
+
+const struct regmap_access_table adxl313_writable_regs_table = {
+ .yes_ranges = adxl313_writable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl313_writable_reg_range),
+};
+EXPORT_SYMBOL_GPL(adxl313_writable_regs_table);
+
+struct adxl313_data {
+ struct regmap *regmap;
+ struct mutex lock; /* lock to protect transf_buf */
+ __le16 transf_buf ____cacheline_aligned;
+};
+
+static const int adxl313_odr_freqs[][2] = {
+ [0] = { 6, 250000 },
+ [1] = { 12, 500000 },
+ [2] = { 25, 0 },
+ [3] = { 50, 0 },
+ [4] = { 100, 0 },
+ [5] = { 200, 0 },
+ [6] = { 400, 0 },
+ [7] = { 800, 0 },
+ [8] = { 1600, 0 },
+ [9] = { 3200, 0 },
+};
+
+#define ADXL313_ACCEL_CHANNEL(index, axis) { \
+ .type = IIO_ACCEL, \
+ .address = index, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = { \
+ .realbits = 13, \
+ }, \
+}
+
+static const struct iio_chan_spec adxl313_channels[] = {
+ ADXL313_ACCEL_CHANNEL(0, X),
+ ADXL313_ACCEL_CHANNEL(1, Y),
+ ADXL313_ACCEL_CHANNEL(2, Z),
+};
+
+static int adxl313_set_odr(struct adxl313_data *data,
+ unsigned int freq1, unsigned int freq2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adxl313_odr_freqs); i++) {
+ if (adxl313_odr_freqs[i][0] == freq1 &&
+ adxl313_odr_freqs[i][1] == freq2)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(adxl313_odr_freqs))
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap, ADXL313_REG_BW_RATE,
+ ADXL313_RATE_MSK,
+ FIELD_PREP(ADXL313_RATE_MSK, ADXL313_RATE_BASE + i));
+}
+
+static int adxl313_read_axis(struct adxl313_data *data,
+ struct iio_chan_spec const *chan)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = regmap_bulk_read(data->regmap,
+ ADXL313_REG_DATA_AXIS(chan->address),
+ &data->transf_buf, sizeof(data->transf_buf));
+ if (ret)
+ goto unlock_ret;
+
+ ret = le16_to_cpu(data->transf_buf);
+
+unlock_ret:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adxl313_read_freq_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (const int *)adxl313_odr_freqs;
+ *length = ARRAY_SIZE(adxl313_odr_freqs) * 2;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl313_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adxl313_data *data = iio_priv(indio_dev);
+ unsigned int regval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = adxl313_read_axis(data, chan);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * Scale for any g range is given in datasheet as
+ * 1024 LSB/g = 0.0009765625 * 9.80665 = 0.009576806640625 m/s^2
+ */
+ *val = 0;
+ *val2 = 9576806;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(data->regmap,
+ ADXL313_REG_OFS_AXIS(chan->address), &regval);
+ if (ret)
+ return ret;
+
+ /*
+ * 8-bit resolution at +/- 0.5g, that is 4x accel data scale
+ * factor at full resolution
+ */
+ *val = sign_extend32(regval, 7) * 4;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = regmap_read(data->regmap, ADXL313_REG_BW_RATE, &regval);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(ADXL313_RATE_MSK, regval) - ADXL313_RATE_BASE;
+ *val = adxl313_odr_freqs[ret][0];
+ *val2 = adxl313_odr_freqs[ret][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl313_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adxl313_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ /*
+ * 8-bit resolution at +/- 0.5g, that is 4x accel data scale
+ * factor at full resolution
+ */
+ if (clamp_val(val, -128 * 4, 127 * 4) != val)
+ return -EINVAL;
+
+ return regmap_write(data->regmap,
+ ADXL313_REG_OFS_AXIS(chan->address),
+ val / 4);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return adxl313_set_odr(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info adxl313_info = {
+ .read_raw = adxl313_read_raw,
+ .write_raw = adxl313_write_raw,
+ .read_avail = adxl313_read_freq_avail,
+};
+
+static int adxl313_setup(struct device *dev, struct adxl313_data *data,
+ int (*setup)(struct device *, struct regmap *))
+{
+ unsigned int regval;
+ int ret;
+
+ /* Ensures the device is in a consistent state after start up */
+ ret = regmap_write(data->regmap, ADXL313_REG_SOFT_RESET,
+ ADXL313_SOFT_RESET);
+ if (ret)
+ return ret;
+
+ if (setup) {
+ ret = setup(dev, data->regmap);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID0) {
+ dev_err(dev, "Invalid manufacturer ID: 0x%02x\n", regval);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID1, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID1) {
+ dev_err(dev, "Invalid mems ID: 0x%02x\n", regval);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(data->regmap, ADXL313_REG_PARTID, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_PARTID) {
+ dev_err(dev, "Invalid device ID: 0x%02x\n", regval);
+ return -ENODEV;
+ }
+
+ /* Sets the range to +/- 4g */
+ ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_RANGE_MSK,
+ FIELD_PREP(ADXL313_RANGE_MSK, ADXL313_RANGE_4G));
+ if (ret)
+ return ret;
+
+ /* Enables full resolution */
+ ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_FULL_RES, ADXL313_FULL_RES);
+ if (ret)
+ return ret;
+
+ /* Enables measurement mode */
+ return regmap_update_bits(data->regmap, ADXL313_REG_POWER_CTL,
+ ADXL313_POWER_CTL_MSK,
+ ADXL313_MEASUREMENT_MODE);
+}
+
+/**
+ * adxl313_core_probe() - probe and setup for adxl313 accelerometer
+ * @dev: Driver model representation of the device
+ * @regmap: Register map of the device
+ * @name: Device name buffer reference
+ * @setup: Setup routine to be executed right before the standard device
+ * setup, can also be set to NULL if not required
+ *
+ * Return: 0 on success, negative errno on error cases
+ */
+int adxl313_core_probe(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ int (*setup)(struct device *, struct regmap *))
+{
+ struct adxl313_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+ mutex_init(&data->lock);
+
+ indio_dev->name = name;
+ indio_dev->info = &adxl313_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adxl313_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxl313_channels);
+
+ ret = adxl313_setup(dev, data, setup);
+ if (ret) {
+ dev_err(dev, "ADXL313 setup failed\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(adxl313_core_probe);
+
+MODULE_AUTHOR("Lucas Stankus <lucas.p.stankus@gmail.com>");
+MODULE_DESCRIPTION("ADXL313 3-Axis Digital Accelerometer core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
new file mode 100644
index 000000000000..82e9fb2db1e6
--- /dev/null
+++ b/drivers/iio/accel/adxl313_i2c.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADXL313 3-Axis Digital Accelerometer
+ *
+ * Copyright (c) 2021 Lucas Stankus <lucas.p.stankus@gmail.com>
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADXL313.pdf
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adxl313.h"
+
+static const struct regmap_config adxl313_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl313_readable_regs_table,
+ .wr_table = &adxl313_writable_regs_table,
+ .max_register = 0x39,
+};
+
+static int adxl313_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &adxl313_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return adxl313_core_probe(&client->dev, regmap, client->name, NULL);
+}
+
+static const struct i2c_device_id adxl313_i2c_id[] = {
+ { "adxl313" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, adxl313_i2c_id);
+
+static const struct of_device_id adxl313_of_match[] = {
+ { .compatible = "adi,adxl313" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, adxl313_of_match);
+
+static struct i2c_driver adxl313_i2c_driver = {
+ .driver = {
+ .name = "adxl313_i2c",
+ .of_match_table = adxl313_of_match,
+ },
+ .probe_new = adxl313_i2c_probe,
+ .id_table = adxl313_i2c_id,
+};
+
+module_i2c_driver(adxl313_i2c_driver);
+
+MODULE_AUTHOR("Lucas Stankus <lucas.p.stankus@gmail.com>");
+MODULE_DESCRIPTION("ADXL313 3-Axis Digital Accelerometer I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl313_spi.c b/drivers/iio/accel/adxl313_spi.c
new file mode 100644
index 000000000000..a6162f36ef52
--- /dev/null
+++ b/drivers/iio/accel/adxl313_spi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADXL313 3-Axis Digital Accelerometer
+ *
+ * Copyright (c) 2021 Lucas Stankus <lucas.p.stankus@gmail.com>
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADXL313.pdf
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "adxl313.h"
+
+static const struct regmap_config adxl313_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl313_readable_regs_table,
+ .wr_table = &adxl313_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+};
+
+static int adxl313_spi_setup(struct device *dev, struct regmap *regmap)
+{
+ struct spi_device *spi = container_of(dev, struct spi_device, dev);
+ int ret;
+
+ if (spi->mode & SPI_3WIRE) {
+ ret = regmap_write(regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_SPI_3WIRE);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_update_bits(regmap, ADXL313_REG_POWER_CTL,
+ ADXL313_I2C_DISABLE, ADXL313_I2C_DISABLE);
+}
+
+static int adxl313_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+ int ret;
+
+ spi->mode |= SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ regmap = devm_regmap_init_spi(spi, &adxl313_spi_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return adxl313_core_probe(&spi->dev, regmap, id->name,
+ &adxl313_spi_setup);
+}
+
+static const struct spi_device_id adxl313_spi_id[] = {
+ { "adxl313" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(spi, adxl313_spi_id);
+
+static const struct of_device_id adxl313_of_match[] = {
+ { .compatible = "adi,adxl313" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, adxl313_of_match);
+
+static struct spi_driver adxl313_spi_driver = {
+ .driver = {
+ .name = "adxl313_spi",
+ .of_match_table = adxl313_of_match,
+ },
+ .probe = adxl313_spi_probe,
+ .id_table = adxl313_spi_id,
+};
+
+module_spi_driver(adxl313_spi_driver);
+
+MODULE_AUTHOR("Lucas Stankus <lucas.p.stankus@gmail.com>");
+MODULE_DESCRIPTION("ADXL313 3-Axis Digital Accelerometer SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl355.h b/drivers/iio/accel/adxl355.h
new file mode 100644
index 000000000000..6dd49b13e4fd
--- /dev/null
+++ b/drivers/iio/accel/adxl355.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ADXL355 3-Axis Digital Accelerometer
+ *
+ * Copyright (c) 2021 Puranjay Mohan <puranjay12@gmail.com>
+ */
+
+#ifndef _ADXL355_H_
+#define _ADXL355_H_
+
+#include <linux/regmap.h>
+
+struct device;
+
+extern const struct regmap_access_table adxl355_readable_regs_tbl;
+extern const struct regmap_access_table adxl355_writeable_regs_tbl;
+
+int adxl355_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name);
+
+#endif /* _ADXL355_H_ */
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
new file mode 100644
index 000000000000..4f485909f459
--- /dev/null
+++ b/drivers/iio/accel/adxl355_core.c
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADXL355 3-Axis Digital Accelerometer IIO core driver
+ *
+ * Copyright (c) 2021 Puranjay Mohan <puranjay12@gmail.com>
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/adxl354_adxl355.pdf
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/limits.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include "adxl355.h"
+
+/* ADXL355 Register Definitions */
+#define ADXL355_DEVID_AD_REG 0x00
+#define ADXL355_DEVID_MST_REG 0x01
+#define ADXL355_PARTID_REG 0x02
+#define ADXL355_STATUS_REG 0x04
+#define ADXL355_FIFO_ENTRIES_REG 0x05
+#define ADXL355_TEMP2_REG 0x06
+#define ADXL355_XDATA3_REG 0x08
+#define ADXL355_YDATA3_REG 0x0B
+#define ADXL355_ZDATA3_REG 0x0E
+#define ADXL355_FIFO_DATA_REG 0x11
+#define ADXL355_OFFSET_X_H_REG 0x1E
+#define ADXL355_OFFSET_Y_H_REG 0x20
+#define ADXL355_OFFSET_Z_H_REG 0x22
+#define ADXL355_ACT_EN_REG 0x24
+#define ADXL355_ACT_THRESH_H_REG 0x25
+#define ADXL355_ACT_THRESH_L_REG 0x26
+#define ADXL355_ACT_COUNT_REG 0x27
+#define ADXL355_FILTER_REG 0x28
+#define ADXL355_FILTER_ODR_MSK GENMASK(3, 0)
+#define ADXL355_FILTER_HPF_MSK GENMASK(6, 4)
+#define ADXL355_FIFO_SAMPLES_REG 0x29
+#define ADXL355_INT_MAP_REG 0x2A
+#define ADXL355_SYNC_REG 0x2B
+#define ADXL355_RANGE_REG 0x2C
+#define ADXL355_POWER_CTL_REG 0x2D
+#define ADXL355_POWER_CTL_MODE_MSK GENMASK(1, 0)
+#define ADXL355_POWER_CTL_DRDY_MSK BIT(2)
+#define ADXL355_SELF_TEST_REG 0x2E
+#define ADXL355_RESET_REG 0x2F
+
+#define ADXL355_DEVID_AD_VAL 0xAD
+#define ADXL355_DEVID_MST_VAL 0x1D
+#define ADXL355_PARTID_VAL 0xED
+#define ADXL355_RESET_CODE 0x52
+
+#define MEGA 1000000UL
+#define TERA 1000000000000ULL
+
+static const struct regmap_range adxl355_read_reg_range[] = {
+ regmap_reg_range(ADXL355_DEVID_AD_REG, ADXL355_FIFO_DATA_REG),
+ regmap_reg_range(ADXL355_OFFSET_X_H_REG, ADXL355_SELF_TEST_REG),
+};
+
+const struct regmap_access_table adxl355_readable_regs_tbl = {
+ .yes_ranges = adxl355_read_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl355_read_reg_range),
+};
+EXPORT_SYMBOL_GPL(adxl355_readable_regs_tbl);
+
+static const struct regmap_range adxl355_write_reg_range[] = {
+ regmap_reg_range(ADXL355_OFFSET_X_H_REG, ADXL355_RESET_REG),
+};
+
+const struct regmap_access_table adxl355_writeable_regs_tbl = {
+ .yes_ranges = adxl355_write_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl355_write_reg_range),
+};
+EXPORT_SYMBOL_GPL(adxl355_writeable_regs_tbl);
+
+enum adxl355_op_mode {
+ ADXL355_MEASUREMENT,
+ ADXL355_STANDBY,
+ ADXL355_TEMP_OFF,
+};
+
+enum adxl355_odr {
+ ADXL355_ODR_4000HZ,
+ ADXL355_ODR_2000HZ,
+ ADXL355_ODR_1000HZ,
+ ADXL355_ODR_500HZ,
+ ADXL355_ODR_250HZ,
+ ADXL355_ODR_125HZ,
+ ADXL355_ODR_62_5HZ,
+ ADXL355_ODR_31_25HZ,
+ ADXL355_ODR_15_625HZ,
+ ADXL355_ODR_7_813HZ,
+ ADXL355_ODR_3_906HZ,
+};
+
+enum adxl355_hpf_3db {
+ ADXL355_HPF_OFF,
+ ADXL355_HPF_24_7,
+ ADXL355_HPF_6_2084,
+ ADXL355_HPF_1_5545,
+ ADXL355_HPF_0_3862,
+ ADXL355_HPF_0_0954,
+ ADXL355_HPF_0_0238,
+};
+
+static const int adxl355_odr_table[][2] = {
+ [0] = {4000, 0},
+ [1] = {2000, 0},
+ [2] = {1000, 0},
+ [3] = {500, 0},
+ [4] = {250, 0},
+ [5] = {125, 0},
+ [6] = {62, 500000},
+ [7] = {31, 250000},
+ [8] = {15, 625000},
+ [9] = {7, 813000},
+ [10] = {3, 906000},
+};
+
+static const int adxl355_hpf_3db_multipliers[] = {
+ 0,
+ 247000,
+ 62084,
+ 15545,
+ 3862,
+ 954,
+ 238,
+};
+
+enum adxl355_chans {
+ chan_x, chan_y, chan_z,
+};
+
+struct adxl355_chan_info {
+ u8 data_reg;
+ u8 offset_reg;
+};
+
+static const struct adxl355_chan_info adxl355_chans[] = {
+ [chan_x] = {
+ .data_reg = ADXL355_XDATA3_REG,
+ .offset_reg = ADXL355_OFFSET_X_H_REG
+ },
+ [chan_y] = {
+ .data_reg = ADXL355_YDATA3_REG,
+ .offset_reg = ADXL355_OFFSET_Y_H_REG
+ },
+ [chan_z] = {
+ .data_reg = ADXL355_ZDATA3_REG,
+ .offset_reg = ADXL355_OFFSET_Z_H_REG
+ },
+};
+
+struct adxl355_data {
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex lock; /* lock to protect op_mode */
+ enum adxl355_op_mode op_mode;
+ enum adxl355_odr odr;
+ enum adxl355_hpf_3db hpf_3db;
+ int calibbias[3];
+ int adxl355_hpf_3db_table[7][2];
+ struct iio_trigger *dready_trig;
+ union {
+ u8 transf_buf[3];
+ struct {
+ u8 buf[14];
+ s64 ts;
+ } buffer;
+ } ____cacheline_aligned;
+};
+
+static int adxl355_set_op_mode(struct adxl355_data *data,
+ enum adxl355_op_mode op_mode)
+{
+ int ret;
+
+ if (data->op_mode == op_mode)
+ return 0;
+
+ ret = regmap_update_bits(data->regmap, ADXL355_POWER_CTL_REG,
+ ADXL355_POWER_CTL_MODE_MSK, op_mode);
+ if (ret)
+ return ret;
+
+ data->op_mode = op_mode;
+
+ return ret;
+}
+
+static int adxl355_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct adxl355_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_update_bits(data->regmap, ADXL355_POWER_CTL_REG,
+ ADXL355_POWER_CTL_DRDY_MSK,
+ FIELD_PREP(ADXL355_POWER_CTL_DRDY_MSK,
+ state ? 0 : 1));
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static void adxl355_fill_3db_frequency_table(struct adxl355_data *data)
+{
+ u32 multiplier;
+ u64 div, rem;
+ u64 odr;
+ int i;
+
+ odr = mul_u64_u32_shr(adxl355_odr_table[data->odr][0], MEGA, 0) +
+ adxl355_odr_table[data->odr][1];
+
+ for (i = 0; i < ARRAY_SIZE(adxl355_hpf_3db_multipliers); i++) {
+ multiplier = adxl355_hpf_3db_multipliers[i];
+ div = div64_u64_rem(mul_u64_u32_shr(odr, multiplier, 0),
+ TERA * 100, &rem);
+
+ data->adxl355_hpf_3db_table[i][0] = div;
+ data->adxl355_hpf_3db_table[i][1] = div_u64(rem, MEGA * 100);
+ }
+}
+
+static int adxl355_setup(struct adxl355_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, ADXL355_DEVID_AD_REG, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL355_DEVID_AD_VAL) {
+ dev_err(data->dev, "Invalid ADI ID 0x%02x\n", regval);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(data->regmap, ADXL355_DEVID_MST_REG, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL355_DEVID_MST_VAL) {
+ dev_err(data->dev, "Invalid MEMS ID 0x%02x\n", regval);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(data->regmap, ADXL355_PARTID_REG, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL355_PARTID_VAL) {
+ dev_err(data->dev, "Invalid DEV ID 0x%02x\n", regval);
+ return -ENODEV;
+ }
+
+ /*
+ * Perform a software reset to make sure the device is in a consistent
+ * state after start-up.
+ */
+ ret = regmap_write(data->regmap, ADXL355_RESET_REG, ADXL355_RESET_CODE);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADXL355_POWER_CTL_REG,
+ ADXL355_POWER_CTL_DRDY_MSK,
+ FIELD_PREP(ADXL355_POWER_CTL_DRDY_MSK, 1));
+ if (ret)
+ return ret;
+
+ adxl355_fill_3db_frequency_table(data);
+
+ return adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+}
+
+static int adxl355_get_temp_data(struct adxl355_data *data, u8 addr)
+{
+ return regmap_bulk_read(data->regmap, addr, data->transf_buf, 2);
+}
+
+static int adxl355_read_axis(struct adxl355_data *data, u8 addr)
+{
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, addr, data->transf_buf,
+ ARRAY_SIZE(data->transf_buf));
+ if (ret)
+ return ret;
+
+ return get_unaligned_be24(data->transf_buf);
+}
+
+static int adxl355_find_match(const int (*freq_tbl)[2], const int n,
+ const int val, const int val2)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (freq_tbl[i][0] == val && freq_tbl[i][1] == val2)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int adxl355_set_odr(struct adxl355_data *data,
+ enum adxl355_odr odr)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ if (data->odr == odr) {
+ mutex_unlock(&data->lock);
+ return 0;
+ }
+
+ ret = adxl355_set_op_mode(data, ADXL355_STANDBY);
+ if (ret)
+ goto err_unlock;
+
+ ret = regmap_update_bits(data->regmap, ADXL355_FILTER_REG,
+ ADXL355_FILTER_ODR_MSK,
+ FIELD_PREP(ADXL355_FILTER_ODR_MSK, odr));
+ if (ret)
+ goto err_set_opmode;
+
+ data->odr = odr;
+ adxl355_fill_3db_frequency_table(data);
+
+ ret = adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+ if (ret)
+ goto err_set_opmode;
+
+ mutex_unlock(&data->lock);
+ return 0;
+
+err_set_opmode:
+ adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+err_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adxl355_set_hpf_3db(struct adxl355_data *data,
+ enum adxl355_hpf_3db hpf)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ if (data->hpf_3db == hpf) {
+ mutex_unlock(&data->lock);
+ return 0;
+ }
+
+ ret = adxl355_set_op_mode(data, ADXL355_STANDBY);
+ if (ret)
+ goto err_unlock;
+
+ ret = regmap_update_bits(data->regmap, ADXL355_FILTER_REG,
+ ADXL355_FILTER_HPF_MSK,
+ FIELD_PREP(ADXL355_FILTER_HPF_MSK, hpf));
+ if (ret)
+ goto err_set_opmode;
+
+ data->hpf_3db = hpf;
+
+ ret = adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+ if (ret)
+ goto err_set_opmode;
+
+ mutex_unlock(&data->lock);
+ return 0;
+
+err_set_opmode:
+ adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+err_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adxl355_set_calibbias(struct adxl355_data *data,
+ enum adxl355_chans chan, int calibbias)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adxl355_set_op_mode(data, ADXL355_STANDBY);
+ if (ret)
+ goto err_unlock;
+
+ put_unaligned_be16(calibbias, data->transf_buf);
+ ret = regmap_bulk_write(data->regmap,
+ adxl355_chans[chan].offset_reg,
+ data->transf_buf, 2);
+ if (ret)
+ goto err_set_opmode;
+
+ data->calibbias[chan] = calibbias;
+
+ ret = adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+ if (ret)
+ goto err_set_opmode;
+
+ mutex_unlock(&data->lock);
+ return 0;
+
+err_set_opmode:
+ adxl355_set_op_mode(data, ADXL355_MEASUREMENT);
+err_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adxl355_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adxl355_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = adxl355_get_temp_data(data, chan->address);
+ if (ret < 0)
+ return ret;
+ *val = get_unaligned_be16(data->transf_buf);
+
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ ret = adxl355_read_axis(data, adxl355_chans[
+ chan->address].data_reg);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ /*
+ * The datasheet defines an intercept of 1885 LSB at 25 degC
+ * and a slope of -9.05 LSB/C. The following formula can be used
+ * to find the temperature:
+ * Temp = ((RAW - 1885)/(-9.05)) + 25 but this doesn't follow
+ * the format of the IIO which is Temp = (RAW + OFFSET) * SCALE.
+ * Hence using some rearranging we get the scale as -110.497238
+ * and offset as -2111.25.
+ */
+ case IIO_TEMP:
+ *val = -110;
+ *val2 = 497238;
+ return IIO_VAL_INT_PLUS_MICRO;
+ /*
+ * At +/- 2g with 20-bit resolution, scale is given in datasheet
+ * as 3.9ug/LSB = 0.0000039 * 9.80665 = 0.00003824593 m/s^2.
+ */
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = 38245;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -2111;
+ *val2 = 250000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *val = sign_extend32(data->calibbias[chan->address], 15);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adxl355_odr_table[data->odr][0];
+ *val2 = adxl355_odr_table[data->odr][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *val = data->adxl355_hpf_3db_table[data->hpf_3db][0];
+ *val2 = data->adxl355_hpf_3db_table[data->hpf_3db][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl355_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adxl355_data *data = iio_priv(indio_dev);
+ int odr_idx, hpf_idx, calibbias;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ odr_idx = adxl355_find_match(adxl355_odr_table,
+ ARRAY_SIZE(adxl355_odr_table),
+ val, val2);
+ if (odr_idx < 0)
+ return odr_idx;
+
+ return adxl355_set_odr(data, odr_idx);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ hpf_idx = adxl355_find_match(data->adxl355_hpf_3db_table,
+ ARRAY_SIZE(data->adxl355_hpf_3db_table),
+ val, val2);
+ if (hpf_idx < 0)
+ return hpf_idx;
+
+ return adxl355_set_hpf_3db(data, hpf_idx);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ calibbias = clamp_t(int, val, S16_MIN, S16_MAX);
+
+ return adxl355_set_calibbias(data, chan->address, calibbias);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl355_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct adxl355_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (const int *)adxl355_odr_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(adxl355_odr_table) * 2;
+
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)data->adxl355_hpf_3db_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(data->adxl355_hpf_3db_table) * 2;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const unsigned long adxl355_avail_scan_masks[] = {
+ GENMASK(3, 0),
+ 0
+};
+
+static const struct iio_info adxl355_info = {
+ .read_raw = adxl355_read_raw,
+ .write_raw = adxl355_write_raw,
+ .read_avail = &adxl355_read_avail,
+};
+
+static const struct iio_trigger_ops adxl355_trigger_ops = {
+ .set_trigger_state = &adxl355_data_rdy_trigger_set_state,
+ .validate_device = &iio_trigger_validate_own_device,
+};
+
+static irqreturn_t adxl355_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adxl355_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ /*
+ * data->buffer is used both for triggered buffer support
+ * and read/write_raw(), hence, it has to be zeroed here before usage.
+ */
+ data->buffer.buf[0] = 0;
+
+ /*
+ * The acceleration data is 24 bits and big endian. It has to be saved
+ * in 32 bits, hence, it is saved in the 2nd byte of the 4 byte buffer.
+ * The buf array is 14 bytes as it includes 3x4=12 bytes for
+ * accelaration data of x, y, and z axis. It also includes 2 bytes for
+ * temperature data.
+ */
+ ret = regmap_bulk_read(data->regmap, ADXL355_XDATA3_REG,
+ &data->buffer.buf[1], 3);
+ if (ret)
+ goto out_unlock_notify;
+
+ ret = regmap_bulk_read(data->regmap, ADXL355_YDATA3_REG,
+ &data->buffer.buf[5], 3);
+ if (ret)
+ goto out_unlock_notify;
+
+ ret = regmap_bulk_read(data->regmap, ADXL355_ZDATA3_REG,
+ &data->buffer.buf[9], 3);
+ if (ret)
+ goto out_unlock_notify;
+
+ ret = regmap_bulk_read(data->regmap, ADXL355_TEMP2_REG,
+ &data->buffer.buf[12], 2);
+ if (ret)
+ goto out_unlock_notify;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ pf->timestamp);
+
+out_unlock_notify:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define ADXL355_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 20, \
+ .storagebits = 32, \
+ .shift = 4, \
+ .endianness = IIO_BE, \
+ } \
+}
+
+static const struct iio_chan_spec adxl355_channels[] = {
+ ADXL355_ACCEL_CHANNEL(0, chan_x, X),
+ ADXL355_ACCEL_CHANNEL(1, chan_y, Y),
+ ADXL355_ACCEL_CHANNEL(2, chan_z, Z),
+ {
+ .type = IIO_TEMP,
+ .address = ADXL355_TEMP2_REG,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int adxl355_probe_trigger(struct iio_dev *indio_dev, int irq)
+{
+ struct adxl355_data *data = iio_priv(indio_dev);
+ int ret;
+
+ data->dready_trig = devm_iio_trigger_alloc(data->dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!data->dready_trig)
+ return -ENOMEM;
+
+ data->dready_trig->ops = &adxl355_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+
+ ret = devm_request_irq(data->dev, irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_ONESHOT, "adxl355_irq", data->dready_trig);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "request irq %d failed\n",
+ irq);
+
+ ret = devm_iio_trigger_register(data->dev, data->dready_trig);
+ if (ret) {
+ dev_err(data->dev, "iio trigger register failed\n");
+ return ret;
+ }
+
+ indio_dev->trig = iio_trigger_get(data->dready_trig);
+
+ return 0;
+}
+
+int adxl355_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name)
+{
+ struct adxl355_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+ int irq;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+ data->dev = dev;
+ data->op_mode = ADXL355_STANDBY;
+ mutex_init(&data->lock);
+
+ indio_dev->name = name;
+ indio_dev->info = &adxl355_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adxl355_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxl355_channels);
+ indio_dev->available_scan_masks = adxl355_avail_scan_masks;
+
+ ret = adxl355_setup(data);
+ if (ret) {
+ dev_err(dev, "ADXL355 setup failed\n");
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &adxl355_trigger_handler, NULL);
+ if (ret) {
+ dev_err(dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ /*
+ * TODO: Would be good to move it to the generic version.
+ */
+ irq = of_irq_get_byname(dev->of_node, "DRDY");
+ if (irq > 0) {
+ ret = adxl355_probe_trigger(indio_dev, irq);
+ if (ret)
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(adxl355_core_probe);
+
+MODULE_AUTHOR("Puranjay Mohan <puranjay12@gmail.com>");
+MODULE_DESCRIPTION("ADXL355 3-Axis Digital Accelerometer core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl355_i2c.c b/drivers/iio/accel/adxl355_i2c.c
new file mode 100644
index 000000000000..5a987bda9060
--- /dev/null
+++ b/drivers/iio/accel/adxl355_i2c.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADXL355 3-Axis Digital Accelerometer I2C driver
+ *
+ * Copyright (c) 2021 Puranjay Mohan <puranjay12@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#include "adxl355.h"
+
+static const struct regmap_config adxl355_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x2F,
+ .rd_table = &adxl355_readable_regs_tbl,
+ .wr_table = &adxl355_writeable_regs_tbl,
+};
+
+static int adxl355_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &adxl355_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
+ PTR_ERR(regmap));
+
+ return PTR_ERR(regmap);
+ }
+
+ return adxl355_core_probe(&client->dev, regmap, client->name);
+}
+
+static const struct i2c_device_id adxl355_i2c_id[] = {
+ { "adxl355", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adxl355_i2c_id);
+
+static const struct of_device_id adxl355_of_match[] = {
+ { .compatible = "adi,adxl355" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl355_of_match);
+
+static struct i2c_driver adxl355_i2c_driver = {
+ .driver = {
+ .name = "adxl355_i2c",
+ .of_match_table = adxl355_of_match,
+ },
+ .probe_new = adxl355_i2c_probe,
+ .id_table = adxl355_i2c_id,
+};
+module_i2c_driver(adxl355_i2c_driver);
+
+MODULE_AUTHOR("Puranjay Mohan <puranjay12@gmail.com>");
+MODULE_DESCRIPTION("ADXL355 3-Axis Digital Accelerometer I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl355_spi.c b/drivers/iio/accel/adxl355_spi.c
new file mode 100644
index 000000000000..fb225aeb56e3
--- /dev/null
+++ b/drivers/iio/accel/adxl355_spi.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADXL355 3-Axis Digital Accelerometer SPI driver
+ *
+ * Copyright (c) 2021 Puranjay Mohan <puranjay12@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "adxl355.h"
+
+static const struct regmap_config adxl355_spi_regmap_config = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+ .max_register = 0x2F,
+ .rd_table = &adxl355_readable_regs_tbl,
+ .wr_table = &adxl355_writeable_regs_tbl,
+};
+
+static int adxl355_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &adxl355_spi_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
+ PTR_ERR(regmap));
+
+ return PTR_ERR(regmap);
+ }
+
+ return adxl355_core_probe(&spi->dev, regmap, id->name);
+}
+
+static const struct spi_device_id adxl355_spi_id[] = {
+ { "adxl355", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adxl355_spi_id);
+
+static const struct of_device_id adxl355_of_match[] = {
+ { .compatible = "adi,adxl355" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl355_of_match);
+
+static struct spi_driver adxl355_spi_driver = {
+ .driver = {
+ .name = "adxl355_spi",
+ .of_match_table = adxl355_of_match,
+ },
+ .probe = adxl355_spi_probe,
+ .id_table = adxl355_spi_id,
+};
+module_spi_driver(adxl355_spi_driver);
+
+MODULE_AUTHOR("Puranjay Mohan <puranjay12@gmail.com>");
+MODULE_DESCRIPTION("ADXL355 3-Axis Digital Accelerometer SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index fc9592407717..758952584f8c 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -1214,6 +1214,7 @@ int adxl372_probe(struct device *dev, struct regmap *regmap,
ret = devm_iio_triggered_buffer_setup_ext(dev,
indio_dev, NULL,
adxl372_trigger_handler,
+ IIO_BUFFER_DIRECTION_IN,
&adxl372_buffer_ops,
adxl372_fifo_attributes);
if (ret < 0)
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index 5ad10db9819f..c4c8d74155c2 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -94,6 +94,6 @@ extern const struct regmap_config bma400_regmap_config;
int bma400_probe(struct device *dev, struct regmap *regmap, const char *name);
-int bma400_remove(struct device *dev);
+void bma400_remove(struct device *dev);
#endif
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index 21520e022a21..fd2647b728d3 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -828,7 +828,7 @@ int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
}
EXPORT_SYMBOL(bma400_probe);
-int bma400_remove(struct device *dev)
+void bma400_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bma400_data *data = iio_priv(indio_dev);
@@ -838,12 +838,13 @@ int bma400_remove(struct device *dev)
ret = bma400_set_power_mode(data, POWER_MODE_SLEEP);
mutex_unlock(&data->mutex);
+ if (ret)
+ dev_warn(dev, "Failed to put device into sleep mode (%pe)\n", ERR_PTR(ret));
+
regulator_bulk_disable(ARRAY_SIZE(data->regulators),
data->regulators);
iio_device_unregister(indio_dev);
-
- return ret;
}
EXPORT_SYMBOL(bma400_remove);
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
index 9dcb7cc9996e..f50df5310beb 100644
--- a/drivers/iio/accel/bma400_i2c.c
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -29,7 +29,9 @@ static int bma400_i2c_probe(struct i2c_client *client,
static int bma400_i2c_remove(struct i2c_client *client)
{
- return bma400_remove(&client->dev);
+ bma400_remove(&client->dev);
+
+ return 0;
}
static const struct i2c_device_id bma400_i2c_ids[] = {
diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c
index 7c2825904e08..9f622e37477b 100644
--- a/drivers/iio/accel/bma400_spi.c
+++ b/drivers/iio/accel/bma400_spi.c
@@ -89,7 +89,9 @@ static int bma400_spi_probe(struct spi_device *spi)
static int bma400_spi_remove(struct spi_device *spi)
{
- return bma400_remove(&spi->dev);
+ bma400_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id bma400_spi_ids[] = {
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index e8693a42ad46..b0678c351e82 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1734,6 +1734,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_triggered_buffer_setup_ext(indio_dev,
&iio_pollfunc_store_time,
bmc150_accel_trigger_handler,
+ IIO_BUFFER_DIRECTION_IN,
&bmc150_accel_buffer_ops,
fifo_attrs);
if (ret < 0) {
@@ -1799,7 +1800,7 @@ err_disable_regulators:
}
EXPORT_SYMBOL_GPL(bmc150_accel_core_probe);
-int bmc150_accel_core_remove(struct device *dev)
+void bmc150_accel_core_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_accel_data *data = iio_priv(indio_dev);
@@ -1819,8 +1820,6 @@ int bmc150_accel_core_remove(struct device *dev)
regulator_bulk_disable(ARRAY_SIZE(data->regulators),
data->regulators);
-
- return 0;
}
EXPORT_SYMBOL_GPL(bmc150_accel_core_remove);
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index 88bd8a25f142..9e52df9a8f07 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -213,7 +213,9 @@ static int bmc150_accel_remove(struct i2c_client *client)
{
bmc150_acpi_dual_accel_remove(client);
- return bmc150_accel_core_remove(&client->dev);
+ bmc150_accel_core_remove(&client->dev);
+
+ return 0;
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 191e312dc91a..11559567cb39 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -37,7 +37,9 @@ static int bmc150_accel_probe(struct spi_device *spi)
static int bmc150_accel_remove(struct spi_device *spi)
{
- return bmc150_accel_core_remove(&spi->dev);
+ bmc150_accel_core_remove(&spi->dev);
+
+ return 0;
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index 1bb5023e8ed9..7775c5edaeef 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -88,7 +88,7 @@ struct bmc150_accel_data {
int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
enum bmc150_type type, const char *name,
bool block_supported);
-int bmc150_accel_core_remove(struct device *dev);
+void bmc150_accel_core_remove(struct device *dev);
extern const struct dev_pm_ops bmc150_accel_pm_ops;
extern const struct regmap_config bmc150_regmap_conf;
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index a06dae5c971d..d74465214feb 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -536,7 +536,7 @@ int bmi088_accel_core_probe(struct device *dev, struct regmap *regmap,
EXPORT_SYMBOL_GPL(bmi088_accel_core_probe);
-int bmi088_accel_core_remove(struct device *dev)
+void bmi088_accel_core_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmi088_accel_data *data = iio_priv(indio_dev);
@@ -546,8 +546,6 @@ int bmi088_accel_core_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
bmi088_accel_power_down(data);
-
- return 0;
}
EXPORT_SYMBOL_GPL(bmi088_accel_core_remove);
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index dd1e3f6cf211..758ad2f12896 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -58,7 +58,9 @@ static int bmi088_accel_probe(struct spi_device *spi)
static int bmi088_accel_remove(struct spi_device *spi)
{
- return bmi088_accel_core_remove(&spi->dev);
+ bmi088_accel_core_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id bmi088_accel_id[] = {
diff --git a/drivers/iio/accel/bmi088-accel.h b/drivers/iio/accel/bmi088-accel.h
index 5c25f16b672c..5d40c7cf1cbc 100644
--- a/drivers/iio/accel/bmi088-accel.h
+++ b/drivers/iio/accel/bmi088-accel.h
@@ -13,6 +13,6 @@ extern const struct dev_pm_ops bmi088_accel_pm_ops;
int bmi088_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name, bool block_supported);
-int bmi088_accel_core_remove(struct device *dev);
+void bmi088_accel_core_remove(struct device *dev);
#endif /* BMI088_ACCEL_H */
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index f41db9e0249a..32989d91b982 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/sysfs.h>
@@ -30,6 +31,7 @@
#define FXLS8962AF_INT_STATUS 0x00
#define FXLS8962AF_INT_STATUS_SRC_BOOT BIT(0)
+#define FXLS8962AF_INT_STATUS_SRC_SDCD_OT BIT(4)
#define FXLS8962AF_INT_STATUS_SRC_BUF BIT(5)
#define FXLS8962AF_INT_STATUS_SRC_DRDY BIT(7)
#define FXLS8962AF_TEMP_OUT 0x01
@@ -73,6 +75,7 @@
#define FXLS8962AF_ASLP_COUNT_LSB 0x1e
#define FXLS8962AF_INT_EN 0x20
+#define FXLS8962AF_INT_EN_SDCD_OT_EN BIT(5)
#define FXLS8962AF_INT_EN_BUF_EN BIT(6)
#define FXLS8962AF_INT_PIN_SEL 0x21
#define FXLS8962AF_INT_PIN_SEL_MASK GENMASK(7, 0)
@@ -96,9 +99,21 @@
#define FXLS8962AF_ORIENT_THS_REG 0x2c
#define FXLS8962AF_SDCD_INT_SRC1 0x2d
+#define FXLS8962AF_SDCD_INT_SRC1_X_OT BIT(5)
+#define FXLS8962AF_SDCD_INT_SRC1_X_POL BIT(4)
+#define FXLS8962AF_SDCD_INT_SRC1_Y_OT BIT(3)
+#define FXLS8962AF_SDCD_INT_SRC1_Y_POL BIT(2)
+#define FXLS8962AF_SDCD_INT_SRC1_Z_OT BIT(1)
+#define FXLS8962AF_SDCD_INT_SRC1_Z_POL BIT(0)
#define FXLS8962AF_SDCD_INT_SRC2 0x2e
#define FXLS8962AF_SDCD_CONFIG1 0x2f
+#define FXLS8962AF_SDCD_CONFIG1_Z_OT_EN BIT(3)
+#define FXLS8962AF_SDCD_CONFIG1_Y_OT_EN BIT(4)
+#define FXLS8962AF_SDCD_CONFIG1_X_OT_EN BIT(5)
+#define FXLS8962AF_SDCD_CONFIG1_OT_ELE BIT(7)
#define FXLS8962AF_SDCD_CONFIG2 0x30
+#define FXLS8962AF_SDCD_CONFIG2_SDCD_EN BIT(7)
+#define FXLS8962AF_SC2_REF_UPDM_AC GENMASK(6, 5)
#define FXLS8962AF_SDCD_OT_DBCNT 0x31
#define FXLS8962AF_SDCD_WT_DBCNT 0x32
#define FXLS8962AF_SDCD_LTHS_LSB 0x33
@@ -151,7 +166,11 @@ struct fxls8962af_data {
} scan;
int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
struct iio_mount_matrix orientation;
+ int irq;
u8 watermark;
+ u8 enable_event;
+ u16 lower_thres;
+ u16 upper_thres;
};
const struct regmap_config fxls8962af_regmap_conf = {
@@ -238,7 +257,7 @@ static int fxls8962af_get_out(struct fxls8962af_data *data,
}
ret = regmap_bulk_read(data->regmap, chan->address,
- &raw_val, (chan->scan_type.storagebits / 8));
+ &raw_val, sizeof(data->lower_thres));
if (!is_active)
fxls8962af_power_off(data);
@@ -451,6 +470,15 @@ static int fxls8962af_write_raw(struct iio_dev *indio_dev,
}
}
+static int fxls8962af_event_setup(struct fxls8962af_data *data, int state)
+{
+ /* Enable wakeup interrupt */
+ int mask = FXLS8962AF_INT_EN_SDCD_OT_EN;
+ int value = state ? mask : 0;
+
+ return regmap_update_bits(data->regmap, FXLS8962AF_INT_EN, mask, value);
+}
+
static int fxls8962af_set_watermark(struct iio_dev *indio_dev, unsigned val)
{
struct fxls8962af_data *data = iio_priv(indio_dev);
@@ -463,6 +491,217 @@ static int fxls8962af_set_watermark(struct iio_dev *indio_dev, unsigned val)
return 0;
}
+static int __fxls8962af_set_thresholds(struct fxls8962af_data *data,
+ const struct iio_chan_spec *chan,
+ enum iio_event_direction dir,
+ int val)
+{
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ data->lower_thres = val;
+ return regmap_bulk_write(data->regmap, FXLS8962AF_SDCD_LTHS_LSB,
+ &data->lower_thres, sizeof(data->lower_thres));
+ case IIO_EV_DIR_RISING:
+ data->upper_thres = val;
+ return regmap_bulk_write(data->regmap, FXLS8962AF_SDCD_UTHS_LSB,
+ &data->upper_thres, sizeof(data->upper_thres));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxls8962af_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_bulk_read(data->regmap, FXLS8962AF_SDCD_LTHS_LSB,
+ &data->lower_thres, sizeof(data->lower_thres));
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(data->lower_thres, chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_RISING:
+ ret = regmap_bulk_read(data->regmap, FXLS8962AF_SDCD_UTHS_LSB,
+ &data->upper_thres, sizeof(data->upper_thres));
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(data->upper_thres, chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxls8962af_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ int ret, val_masked;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (val < -2048 || val > 2047)
+ return -EINVAL;
+
+ if (data->enable_event)
+ return -EBUSY;
+
+ val_masked = val & GENMASK(11, 0);
+ if (fxls8962af_is_active(data)) {
+ ret = fxls8962af_standby(data);
+ if (ret)
+ return ret;
+
+ ret = __fxls8962af_set_thresholds(data, chan, dir, val_masked);
+ if (ret)
+ return ret;
+
+ return fxls8962af_active(data);
+ } else {
+ return __fxls8962af_set_thresholds(data, chan, dir, val_masked);
+ }
+}
+
+static int
+fxls8962af_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ return !!(FXLS8962AF_SDCD_CONFIG1_X_OT_EN & data->enable_event);
+ case IIO_MOD_Y:
+ return !!(FXLS8962AF_SDCD_CONFIG1_Y_OT_EN & data->enable_event);
+ case IIO_MOD_Z:
+ return !!(FXLS8962AF_SDCD_CONFIG1_Z_OT_EN & data->enable_event);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+fxls8962af_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ u8 enable_event, enable_bits;
+ int ret, value;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ enable_bits = FXLS8962AF_SDCD_CONFIG1_X_OT_EN;
+ break;
+ case IIO_MOD_Y:
+ enable_bits = FXLS8962AF_SDCD_CONFIG1_Y_OT_EN;
+ break;
+ case IIO_MOD_Z:
+ enable_bits = FXLS8962AF_SDCD_CONFIG1_Z_OT_EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (state)
+ enable_event = data->enable_event | enable_bits;
+ else
+ enable_event = data->enable_event & ~enable_bits;
+
+ if (data->enable_event == enable_event)
+ return 0;
+
+ ret = fxls8962af_standby(data);
+ if (ret)
+ return ret;
+
+ /* Enable events */
+ value = enable_event | FXLS8962AF_SDCD_CONFIG1_OT_ELE;
+ ret = regmap_write(data->regmap, FXLS8962AF_SDCD_CONFIG1, value);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable update of SDCD_REF_X/Y/Z values with the current decimated and
+ * trimmed X/Y/Z acceleration input data. This allows for acceleration
+ * slope detection with Data(n) to Data(n–1) always used as the input
+ * to the window comparator.
+ */
+ value = enable_event ?
+ FXLS8962AF_SDCD_CONFIG2_SDCD_EN | FXLS8962AF_SC2_REF_UPDM_AC :
+ 0x00;
+ ret = regmap_write(data->regmap, FXLS8962AF_SDCD_CONFIG2, value);
+ if (ret)
+ return ret;
+
+ ret = fxls8962af_event_setup(data, state);
+ if (ret)
+ return ret;
+
+ data->enable_event = enable_event;
+
+ if (data->enable_event) {
+ fxls8962af_active(data);
+ ret = fxls8962af_power_on(data);
+ } else {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Not in buffered mode so disable power */
+ ret = fxls8962af_power_off(data);
+
+ iio_device_release_direct_mode(indio_dev);
+ }
+
+ return ret;
+}
+
+static const struct iio_event_spec fxls8962af_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
#define FXLS8962AF_CHANNEL(axis, reg, idx) { \
.type = IIO_ACCEL, \
.address = reg, \
@@ -481,6 +720,8 @@ static int fxls8962af_set_watermark(struct iio_dev *indio_dev, unsigned val)
.shift = 4, \
.endianness = IIO_BE, \
}, \
+ .event_spec = fxls8962af_event, \
+ .num_event_specs = ARRAY_SIZE(fxls8962af_event), \
}
#define FXLS8962AF_TEMP_CHANNEL { \
@@ -522,6 +763,10 @@ static const struct iio_info fxls8962af_info = {
.read_raw = &fxls8962af_read_raw,
.write_raw = &fxls8962af_write_raw,
.write_raw_get_fmt = fxls8962af_write_raw_get_fmt,
+ .read_event_value = fxls8962af_read_event,
+ .write_event_value = fxls8962af_write_event,
+ .read_event_config = fxls8962af_read_event_config,
+ .write_event_config = fxls8962af_write_event_config,
.read_avail = fxls8962af_read_avail,
.hwfifo_set_watermark = fxls8962af_set_watermark,
};
@@ -605,7 +850,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
ret = __fxls8962af_fifo_set_mode(data, false);
- fxls8962af_active(data);
+ if (data->enable_event)
+ fxls8962af_active(data);
return ret;
}
@@ -614,7 +860,10 @@ static int fxls8962af_buffer_postdisable(struct iio_dev *indio_dev)
{
struct fxls8962af_data *data = iio_priv(indio_dev);
- return fxls8962af_power_off(data);
+ if (!data->enable_event)
+ fxls8962af_power_off(data);
+
+ return 0;
}
static const struct iio_buffer_setup_ops fxls8962af_buffer_ops = {
@@ -725,6 +974,45 @@ static int fxls8962af_fifo_flush(struct iio_dev *indio_dev)
return count;
}
+static int fxls8962af_event_interrupt(struct iio_dev *indio_dev)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ s64 ts = iio_get_time_ns(indio_dev);
+ unsigned int reg;
+ u64 ev_code;
+ int ret;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_SDCD_INT_SRC1, &reg);
+ if (ret)
+ return ret;
+
+ if (reg & FXLS8962AF_SDCD_INT_SRC1_X_OT) {
+ ev_code = reg & FXLS8962AF_SDCD_INT_SRC1_X_POL ?
+ IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING;
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
+ IIO_EV_TYPE_THRESH, ev_code), ts);
+ }
+
+ if (reg & FXLS8962AF_SDCD_INT_SRC1_Y_OT) {
+ ev_code = reg & FXLS8962AF_SDCD_INT_SRC1_Y_POL ?
+ IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING;
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
+ IIO_EV_TYPE_THRESH, ev_code), ts);
+ }
+
+ if (reg & FXLS8962AF_SDCD_INT_SRC1_Z_OT) {
+ ev_code = reg & FXLS8962AF_SDCD_INT_SRC1_Z_POL ?
+ IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING;
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
+ IIO_EV_TYPE_THRESH, ev_code), ts);
+ }
+
+ return 0;
+}
+
static irqreturn_t fxls8962af_interrupt(int irq, void *p)
{
struct iio_dev *indio_dev = p;
@@ -744,6 +1032,14 @@ static irqreturn_t fxls8962af_interrupt(int irq, void *p)
return IRQ_HANDLED;
}
+ if (reg & FXLS8962AF_INT_STATUS_SRC_SDCD_OT) {
+ ret = fxls8962af_event_interrupt(indio_dev);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+ }
+
return IRQ_NONE;
}
@@ -861,6 +1157,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
data->regmap = regmap;
+ data->irq = irq;
ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
@@ -930,6 +1227,9 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
if (ret)
return ret;
+ if (device_property_read_bool(dev, "wakeup-source"))
+ device_init_wakeup(dev, true);
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_GPL(fxls8962af_core_probe);
@@ -955,9 +1255,46 @@ static int __maybe_unused fxls8962af_runtime_resume(struct device *dev)
return fxls8962af_active(data);
}
+static int __maybe_unused fxls8962af_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+
+ if (device_may_wakeup(dev) && data->enable_event) {
+ enable_irq_wake(data->irq);
+
+ /*
+ * Disable buffer, as the buffer is so small the device will wake
+ * almost immediately.
+ */
+ if (iio_buffer_enabled(indio_dev))
+ fxls8962af_buffer_predisable(indio_dev);
+ } else {
+ fxls8962af_runtime_suspend(dev);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused fxls8962af_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+
+ if (device_may_wakeup(dev) && data->enable_event) {
+ disable_irq_wake(data->irq);
+
+ if (iio_buffer_enabled(indio_dev))
+ fxls8962af_buffer_postenable(indio_dev);
+ } else {
+ fxls8962af_runtime_resume(dev);
+ }
+
+ return 0;
+}
+
const struct dev_pm_ops fxls8962af_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(fxls8962af_suspend, fxls8962af_resume)
SET_RUNTIME_PM_OPS(fxls8962af_runtime_suspend,
fxls8962af_runtime_resume, NULL)
};
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index b580d605f848..274b41a6e603 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -34,7 +34,9 @@ static int kxsd9_i2c_probe(struct i2c_client *i2c,
static int kxsd9_i2c_remove(struct i2c_client *client)
{
- return kxsd9_common_remove(&client->dev);
+ kxsd9_common_remove(&client->dev);
+
+ return 0;
}
static const struct of_device_id kxsd9_of_match[] = {
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index 7971ec1eeb7e..441e6b764281 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -34,7 +34,9 @@ static int kxsd9_spi_probe(struct spi_device *spi)
static int kxsd9_spi_remove(struct spi_device *spi)
{
- return kxsd9_common_remove(&spi->dev);
+ kxsd9_common_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id kxsd9_spi_id[] = {
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index bf7ed9e7d00f..2faf85ca996e 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -478,7 +478,7 @@ err_power_down:
}
EXPORT_SYMBOL(kxsd9_common_probe);
-int kxsd9_common_remove(struct device *dev)
+void kxsd9_common_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct kxsd9_state *st = iio_priv(indio_dev);
@@ -489,8 +489,6 @@ int kxsd9_common_remove(struct device *dev)
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
kxsd9_power_down(st);
-
- return 0;
}
EXPORT_SYMBOL(kxsd9_common_remove);
diff --git a/drivers/iio/accel/kxsd9.h b/drivers/iio/accel/kxsd9.h
index 5e3ca212f5be..c04dbfa4e0d0 100644
--- a/drivers/iio/accel/kxsd9.h
+++ b/drivers/iio/accel/kxsd9.h
@@ -8,6 +8,6 @@
int kxsd9_common_probe(struct device *dev,
struct regmap *map,
const char *name);
-int kxsd9_common_remove(struct device *dev);
+void kxsd9_common_remove(struct device *dev);
extern const struct dev_pm_ops kxsd9_dev_pm_ops;
diff --git a/drivers/iio/accel/mma7455.h b/drivers/iio/accel/mma7455.h
index 4e3fa988f690..1fcc4b64b3af 100644
--- a/drivers/iio/accel/mma7455.h
+++ b/drivers/iio/accel/mma7455.h
@@ -11,6 +11,6 @@ extern const struct regmap_config mma7455_core_regmap;
int mma7455_core_probe(struct device *dev, struct regmap *regmap,
const char *name);
-int mma7455_core_remove(struct device *dev);
+void mma7455_core_remove(struct device *dev);
#endif
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 922bd38ff6ea..777c6c384b09 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -294,7 +294,7 @@ int mma7455_core_probe(struct device *dev, struct regmap *regmap,
}
EXPORT_SYMBOL_GPL(mma7455_core_probe);
-int mma7455_core_remove(struct device *dev)
+void mma7455_core_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct mma7455_data *mma7455 = iio_priv(indio_dev);
@@ -304,8 +304,6 @@ int mma7455_core_remove(struct device *dev)
regmap_write(mma7455->regmap, MMA7455_REG_MCTL,
MMA7455_MCTL_MODE_STANDBY);
-
- return 0;
}
EXPORT_SYMBOL_GPL(mma7455_core_remove);
diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c
index cddeaa9e230a..8a5256516f9f 100644
--- a/drivers/iio/accel/mma7455_i2c.c
+++ b/drivers/iio/accel/mma7455_i2c.c
@@ -28,7 +28,9 @@ static int mma7455_i2c_probe(struct i2c_client *i2c,
static int mma7455_i2c_remove(struct i2c_client *i2c)
{
- return mma7455_core_remove(&i2c->dev);
+ mma7455_core_remove(&i2c->dev);
+
+ return 0;
}
static const struct i2c_device_id mma7455_i2c_ids[] = {
diff --git a/drivers/iio/accel/mma7455_spi.c b/drivers/iio/accel/mma7455_spi.c
index eb82cdfa8abc..ecf690692dcc 100644
--- a/drivers/iio/accel/mma7455_spi.c
+++ b/drivers/iio/accel/mma7455_spi.c
@@ -24,7 +24,9 @@ static int mma7455_spi_probe(struct spi_device *spi)
static int mma7455_spi_remove(struct spi_device *spi)
{
- return mma7455_core_remove(&spi->dev);
+ mma7455_core_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id mma7455_spi_ids[] = {
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 47f5cd66e996..cd6cdf2c51b0 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -254,7 +254,7 @@ static const struct of_device_id mma7660_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mma7660_of_match);
-static const struct acpi_device_id mma7660_acpi_id[] = {
+static const struct acpi_device_id __maybe_unused mma7660_acpi_id[] = {
{"MMA7660", 0},
{}
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index cb753a43533c..c6b75308148a 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -731,8 +731,7 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
return ret;
}
*val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
- (sizeof(*val) * 8 - 13);
+ *val = sign_extend32(*val, 12);
} else {
/* get the temperature when available */
ret = sca3000_read_data_short(st,
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index f1e6ec380667..31ea19d0ba71 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -1210,7 +1210,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
};
- adev = ACPI_COMPANION(adata->dev);
+ adev = ACPI_COMPANION(indio_dev->dev.parent);
if (!adev)
return 0;
@@ -1334,7 +1334,8 @@ EXPORT_SYMBOL(st_accel_get_settings);
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata = dev_get_platdata(adata->dev);
+ struct device *parent = indio_dev->dev.parent;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(parent);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -1354,7 +1355,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
*/
err = apply_acpi_orientation(indio_dev);
if (err) {
- err = iio_read_mount_matrix(adata->dev, &adata->mount_matrix);
+ err = iio_read_mount_matrix(parent, &adata->mount_matrix);
if (err)
return err;
}
@@ -1380,32 +1381,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
return err;
}
- err = iio_device_register(indio_dev);
- if (err)
- goto st_accel_device_register_error;
-
- dev_info(&indio_dev->dev, "registered accelerometer %s\n",
- indio_dev->name);
-
- return 0;
-
-st_accel_device_register_error:
- if (adata->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
- return err;
+ return devm_iio_device_register(parent, indio_dev);
}
EXPORT_SYMBOL(st_accel_common_probe);
-void st_accel_common_remove(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *adata = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (adata->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
-}
-EXPORT_SYMBOL(st_accel_common_remove);
-
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics accelerometers driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index f711756e41e3..c0ce78eebad9 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -177,27 +177,7 @@ static int st_accel_i2c_probe(struct i2c_client *client)
if (ret)
return ret;
- ret = st_accel_common_probe(indio_dev);
- if (ret < 0)
- goto st_accel_power_off;
-
- return 0;
-
-st_accel_power_off:
- st_sensors_power_disable(indio_dev);
-
- return ret;
-}
-
-static int st_accel_i2c_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- st_sensors_power_disable(indio_dev);
-
- st_accel_common_remove(indio_dev);
-
- return 0;
+ return st_accel_common_probe(indio_dev);
}
static struct i2c_driver st_accel_driver = {
@@ -207,7 +187,6 @@ static struct i2c_driver st_accel_driver = {
.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
},
.probe_new = st_accel_i2c_probe,
- .remove = st_accel_i2c_remove,
.id_table = st_accel_id_table,
};
module_i2c_driver(st_accel_driver);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index bb45d9ff95b8..b74a1c6d03de 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -127,27 +127,7 @@ static int st_accel_spi_probe(struct spi_device *spi)
if (err)
return err;
- err = st_accel_common_probe(indio_dev);
- if (err < 0)
- goto st_accel_power_off;
-
- return 0;
-
-st_accel_power_off:
- st_sensors_power_disable(indio_dev);
-
- return err;
-}
-
-static int st_accel_spi_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- st_sensors_power_disable(indio_dev);
-
- st_accel_common_remove(indio_dev);
-
- return 0;
+ return st_accel_common_probe(indio_dev);
}
static const struct spi_device_id st_accel_id_table[] = {
@@ -177,7 +157,6 @@ static struct spi_driver st_accel_driver = {
.of_match_table = st_accel_of_match,
},
.probe = st_accel_spi_probe,
- .remove = st_accel_spi_remove,
.id_table = st_accel_id_table,
};
module_spi_driver(st_accel_driver);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index af168e1c9fdb..8bf5b62a73f4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -354,7 +354,7 @@ config BCM_IPROC_ADC
config BERLIN2_ADC
tristate "Marvell Berlin2 ADC driver"
- depends on ARCH_BERLIN
+ depends on ARCH_BERLIN || COMPILE_TEST
help
Marvell Berlin2 ADC driver. This ADC has 8 channels, with one used for
temperature measurement.
@@ -430,9 +430,9 @@ config EXYNOS_ADC
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || (OF && COMPILE_TEST)
depends on HAS_IOMEM
help
- Core support for the ADC block found in the Samsung EXYNOS series
- of SoCs for drivers such as the touchscreen and hwmon to use to share
- this resource.
+ Driver for the ADC block found in the Samsung S3C (S3C2410, S3C2416,
+ S3C2440, S3C2443, S3C6410), S5Pv210 and Exynos SoCs.
+ Choose Y here only if you build for such Samsung SoC.
To compile this driver as a module, choose M here: the module will be
called exynos_adc.
@@ -530,6 +530,16 @@ config IMX7D_ADC
This driver can also be built as a module. If so, the module will be
called imx7d_adc.
+config IMX8QXP_ADC
+ tristate "NXP IMX8QXP ADC driver"
+ depends on ARCH_MXC_ARM64 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for IMX8QXP ADC.
+
+ This driver can also be built as a module. If so, the module will be
+ called imx8qxp-adc.
+
config LP8788_ADC
tristate "LP8788 ADC driver"
depends on MFD_LP8788
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d68550f493e3..d3f53549720c 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_HX711) += hx711.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
+obj-$(CONFIG_IMX8QXP_ADC) += imx8qxp-adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 7b5212ba5501..4c46a201d4ef 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -1103,17 +1103,15 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
return ret;
gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
- if (gpadc->irq_sw < 0) {
- dev_err(dev, "failed to get platform sw_conv_end irq\n");
- return gpadc->irq_sw;
- }
+ if (gpadc->irq_sw < 0)
+ return dev_err_probe(dev, gpadc->irq_sw,
+ "failed to get platform sw_conv_end irq\n");
if (is_ab8500(gpadc->ab8500)) {
gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
- if (gpadc->irq_hw < 0) {
- dev_err(dev, "failed to get platform hw_conv_end irq\n");
- return gpadc->irq_hw;
- }
+ if (gpadc->irq_hw < 0)
+ return dev_err_probe(dev, gpadc->irq_hw,
+ "failed to get platform hw_conv_end irq\n");
} else {
gpadc->irq_hw = 0;
}
@@ -1146,11 +1144,9 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
/* The VTVout LDO used to power the AB8500 GPADC */
gpadc->vddadc = devm_regulator_get(dev, "vddadc");
- if (IS_ERR(gpadc->vddadc)) {
- ret = PTR_ERR(gpadc->vddadc);
- dev_err(dev, "failed to get vddadc\n");
- return ret;
- }
+ if (IS_ERR(gpadc->vddadc))
+ return dev_err_probe(dev, PTR_ERR(gpadc->vddadc),
+ "failed to get vddadc\n");
ret = regulator_enable(gpadc->vddadc);
if (ret) {
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index 2301a0e27f23..e9129dac762f 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -460,6 +460,11 @@ static const struct iio_info ad7291_info = {
.write_event_value = &ad7291_write_event_value,
};
+static void ad7291_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7291_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -473,8 +478,6 @@ static int ad7291_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
mutex_init(&chip->state_lock);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
chip->client = client;
@@ -495,6 +498,11 @@ static int ad7291_probe(struct i2c_client *client,
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&client->dev, ad7291_reg_disable,
+ chip->reg);
+ if (ret)
+ return ret;
+
chip->command |= AD7291_EXT_REF;
}
@@ -506,58 +514,25 @@ static int ad7291_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = ad7291_i2c_write(chip, AD7291_COMMAND, AD7291_RESET);
- if (ret) {
- ret = -EIO;
- goto error_disable_reg;
- }
+ if (ret)
+ return -EIO;
ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
- if (ret) {
- ret = -EIO;
- goto error_disable_reg;
- }
+ if (ret)
+ return -EIO;
if (client->irq > 0) {
- ret = request_threaded_irq(client->irq,
- NULL,
- &ad7291_event_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- id->name,
- indio_dev);
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ &ad7291_event_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ id->name,
+ indio_dev);
if (ret)
- goto error_disable_reg;
+ return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_irq;
-
- return 0;
-
-error_unreg_irq:
- if (client->irq)
- free_irq(client->irq, indio_dev);
-error_disable_reg:
- if (chip->reg)
- regulator_disable(chip->reg);
-
- return ret;
-}
-
-static int ad7291_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ad7291_chip_info *chip = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- if (chip->reg)
- regulator_disable(chip->reg);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id ad7291_id[] = {
@@ -579,7 +554,6 @@ static struct i2c_driver ad7291_driver = {
.of_match_table = ad7291_of_match,
},
.probe = ad7291_probe,
- .remove = ad7291_remove,
.id_table = ad7291_id,
};
module_i2c_driver(ad7291_driver);
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index 1b4b3203e428..44bb5fde83de 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -11,12 +11,41 @@
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/bitfield.h>
-#define AD7949_MASK_CHANNEL_SEL GENMASK(9, 7)
-#define AD7949_MASK_TOTAL GENMASK(13, 0)
-#define AD7949_OFFSET_CHANNEL_SEL 7
-#define AD7949_CFG_READ_BACK 0x1
-#define AD7949_CFG_REG_SIZE_BITS 14
+#define AD7949_CFG_MASK_TOTAL GENMASK(13, 0)
+
+/* CFG: Configuration Update */
+#define AD7949_CFG_MASK_OVERWRITE BIT(13)
+
+/* INCC: Input Channel Configuration */
+#define AD7949_CFG_MASK_INCC GENMASK(12, 10)
+#define AD7949_CFG_VAL_INCC_UNIPOLAR_GND 7
+#define AD7949_CFG_VAL_INCC_UNIPOLAR_COMM 6
+#define AD7949_CFG_VAL_INCC_UNIPOLAR_DIFF 4
+#define AD7949_CFG_VAL_INCC_TEMP 3
+#define AD7949_CFG_VAL_INCC_BIPOLAR 2
+#define AD7949_CFG_VAL_INCC_BIPOLAR_DIFF 0
+
+/* INX: Input channel Selection in a binary fashion */
+#define AD7949_CFG_MASK_INX GENMASK(9, 7)
+
+/* BW: select bandwidth for low-pass filter. Full or Quarter */
+#define AD7949_CFG_MASK_BW_FULL BIT(6)
+
+/* REF: reference/buffer selection */
+#define AD7949_CFG_MASK_REF GENMASK(5, 3)
+#define AD7949_CFG_VAL_REF_EXT_TEMP_BUF 3
+#define AD7949_CFG_VAL_REF_EXT_TEMP 2
+#define AD7949_CFG_VAL_REF_INT_4096 1
+#define AD7949_CFG_VAL_REF_INT_2500 0
+#define AD7949_CFG_VAL_REF_EXTERNAL BIT(1)
+
+/* SEQ: channel sequencer. Allows for scanning channels */
+#define AD7949_CFG_MASK_SEQ GENMASK(2, 1)
+
+/* RB: Read back the CFG register */
+#define AD7949_CFG_MASK_RBN BIT(0)
enum {
ID_AD7949 = 0,
@@ -41,41 +70,51 @@ static const struct ad7949_adc_spec ad7949_adc_spec[] = {
* @vref: regulator generating Vref
* @indio_dev: reference to iio structure
* @spi: reference to spi structure
+ * @refsel: reference selection
* @resolution: resolution of the chip
* @cfg: copy of the configuration register
* @current_channel: current channel in use
* @buffer: buffer to send / receive data to / from device
+ * @buf8b: be16 buffer to exchange data with the device in 8-bit transfers
*/
struct ad7949_adc_chip {
struct mutex lock;
struct regulator *vref;
struct iio_dev *indio_dev;
struct spi_device *spi;
+ u32 refsel;
u8 resolution;
u16 cfg;
unsigned int current_channel;
u16 buffer ____cacheline_aligned;
+ __be16 buf8b;
};
static int ad7949_spi_write_cfg(struct ad7949_adc_chip *ad7949_adc, u16 val,
u16 mask)
{
int ret;
- int bits_per_word = ad7949_adc->resolution;
- int shift = bits_per_word - AD7949_CFG_REG_SIZE_BITS;
- struct spi_message msg;
- struct spi_transfer tx[] = {
- {
- .tx_buf = &ad7949_adc->buffer,
- .len = 2,
- .bits_per_word = bits_per_word,
- },
- };
ad7949_adc->cfg = (val & mask) | (ad7949_adc->cfg & ~mask);
- ad7949_adc->buffer = ad7949_adc->cfg << shift;
- spi_message_init_with_transfers(&msg, tx, 1);
- ret = spi_sync(ad7949_adc->spi, &msg);
+
+ switch (ad7949_adc->spi->bits_per_word) {
+ case 16:
+ ad7949_adc->buffer = ad7949_adc->cfg << 2;
+ ret = spi_write(ad7949_adc->spi, &ad7949_adc->buffer, 2);
+ break;
+ case 14:
+ ad7949_adc->buffer = ad7949_adc->cfg;
+ ret = spi_write(ad7949_adc->spi, &ad7949_adc->buffer, 2);
+ break;
+ case 8:
+ /* Here, type is big endian as it must be sent in two transfers */
+ ad7949_adc->buf8b = cpu_to_be16(ad7949_adc->cfg << 2);
+ ret = spi_write(ad7949_adc->spi, &ad7949_adc->buf8b, 2);
+ break;
+ default:
+ dev_err(&ad7949_adc->indio_dev->dev, "unsupported BPW\n");
+ return -EINVAL;
+ }
/*
* This delay is to avoid a new request before the required time to
@@ -90,16 +129,6 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
{
int ret;
int i;
- int bits_per_word = ad7949_adc->resolution;
- int mask = GENMASK(ad7949_adc->resolution - 1, 0);
- struct spi_message msg;
- struct spi_transfer tx[] = {
- {
- .rx_buf = &ad7949_adc->buffer,
- .len = 2,
- .bits_per_word = bits_per_word,
- },
- };
/*
* 1: write CFG for sample N and read old data (sample N-2)
@@ -109,8 +138,8 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
*/
for (i = 0; i < 2; i++) {
ret = ad7949_spi_write_cfg(ad7949_adc,
- channel << AD7949_OFFSET_CHANNEL_SEL,
- AD7949_MASK_CHANNEL_SEL);
+ FIELD_PREP(AD7949_CFG_MASK_INX, channel),
+ AD7949_CFG_MASK_INX);
if (ret)
return ret;
if (channel == ad7949_adc->current_channel)
@@ -118,9 +147,11 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
}
/* 3: write something and read actual data */
- ad7949_adc->buffer = 0;
- spi_message_init_with_transfers(&msg, tx, 1);
- ret = spi_sync(ad7949_adc->spi, &msg);
+ if (ad7949_adc->spi->bits_per_word == 8)
+ ret = spi_read(ad7949_adc->spi, &ad7949_adc->buf8b, 2);
+ else
+ ret = spi_read(ad7949_adc->spi, &ad7949_adc->buffer, 2);
+
if (ret)
return ret;
@@ -132,7 +163,25 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
ad7949_adc->current_channel = channel;
- *val = ad7949_adc->buffer & mask;
+ switch (ad7949_adc->spi->bits_per_word) {
+ case 16:
+ *val = ad7949_adc->buffer;
+ /* Shift-out padding bits */
+ *val >>= 16 - ad7949_adc->resolution;
+ break;
+ case 14:
+ *val = ad7949_adc->buffer & GENMASK(13, 0);
+ break;
+ case 8:
+ /* Here, type is big endian as data was sent in two transfers */
+ *val = be16_to_cpu(ad7949_adc->buf8b);
+ /* Shift-out padding bits */
+ *val >>= 16 - ad7949_adc->resolution;
+ break;
+ default:
+ dev_err(&ad7949_adc->indio_dev->dev, "unsupported BPW\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -178,12 +227,26 @@ static int ad7949_spi_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = regulator_get_voltage(ad7949_adc->vref);
- if (ret < 0)
- return ret;
+ switch (ad7949_adc->refsel) {
+ case AD7949_CFG_VAL_REF_INT_2500:
+ *val = 2500;
+ break;
+ case AD7949_CFG_VAL_REF_INT_4096:
+ *val = 4096;
+ break;
+ case AD7949_CFG_VAL_REF_EXT_TEMP:
+ case AD7949_CFG_VAL_REF_EXT_TEMP_BUF:
+ ret = regulator_get_voltage(ad7949_adc->vref);
+ if (ret < 0)
+ return ret;
+
+ /* convert value back to mV */
+ *val = ret / 1000;
+ break;
+ }
- *val = ret / 5000;
- return IIO_VAL_INT;
+ *val2 = (1 << ad7949_adc->resolution) - 1;
+ return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
@@ -199,8 +262,8 @@ static int ad7949_spi_reg_access(struct iio_dev *indio_dev,
if (readval)
*readval = ad7949_adc->cfg;
else
- ret = ad7949_spi_write_cfg(ad7949_adc,
- writeval & AD7949_MASK_TOTAL, AD7949_MASK_TOTAL);
+ ret = ad7949_spi_write_cfg(ad7949_adc, writeval,
+ AD7949_CFG_MASK_TOTAL);
return ret;
}
@@ -214,10 +277,19 @@ static int ad7949_spi_init(struct ad7949_adc_chip *ad7949_adc)
{
int ret;
int val;
+ u16 cfg;
- /* Sequencer disabled, CFG readback disabled, IN0 as default channel */
ad7949_adc->current_channel = 0;
- ret = ad7949_spi_write_cfg(ad7949_adc, 0x3C79, AD7949_MASK_TOTAL);
+
+ cfg = FIELD_PREP(AD7949_CFG_MASK_OVERWRITE, 1) |
+ FIELD_PREP(AD7949_CFG_MASK_INCC, AD7949_CFG_VAL_INCC_UNIPOLAR_GND) |
+ FIELD_PREP(AD7949_CFG_MASK_INX, ad7949_adc->current_channel) |
+ FIELD_PREP(AD7949_CFG_MASK_BW_FULL, 1) |
+ FIELD_PREP(AD7949_CFG_MASK_REF, ad7949_adc->refsel) |
+ FIELD_PREP(AD7949_CFG_MASK_SEQ, 0x0) |
+ FIELD_PREP(AD7949_CFG_MASK_RBN, 1);
+
+ ret = ad7949_spi_write_cfg(ad7949_adc, cfg, AD7949_CFG_MASK_TOTAL);
/*
* Do two dummy conversions to apply the first configuration setting.
@@ -229,12 +301,19 @@ static int ad7949_spi_init(struct ad7949_adc_chip *ad7949_adc)
return ret;
}
+static void ad7949_disable_reg(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7949_spi_probe(struct spi_device *spi)
{
+ u32 spi_ctrl_mask = spi->controller->bits_per_word_mask;
struct device *dev = &spi->dev;
const struct ad7949_adc_spec *spec;
struct ad7949_adc_chip *ad7949_adc;
struct iio_dev *indio_dev;
+ u32 tmp;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*ad7949_adc));
@@ -257,16 +336,64 @@ static int ad7949_spi_probe(struct spi_device *spi)
indio_dev->num_channels = spec->num_channels;
ad7949_adc->resolution = spec->resolution;
- ad7949_adc->vref = devm_regulator_get(dev, "vref");
+ /* Set SPI bits per word */
+ if (spi_ctrl_mask & SPI_BPW_MASK(ad7949_adc->resolution)) {
+ spi->bits_per_word = ad7949_adc->resolution;
+ } else if (spi_ctrl_mask == SPI_BPW_MASK(16)) {
+ spi->bits_per_word = 16;
+ } else if (spi_ctrl_mask == SPI_BPW_MASK(8)) {
+ spi->bits_per_word = 8;
+ } else {
+ dev_err(dev, "unable to find common BPW with spi controller\n");
+ return -EINVAL;
+ }
+
+ /* Setup internal voltage reference */
+ tmp = 4096000;
+ device_property_read_u32(dev, "adi,internal-ref-microvolt", &tmp);
+
+ switch (tmp) {
+ case 2500000:
+ ad7949_adc->refsel = AD7949_CFG_VAL_REF_INT_2500;
+ break;
+ case 4096000:
+ ad7949_adc->refsel = AD7949_CFG_VAL_REF_INT_4096;
+ break;
+ default:
+ dev_err(dev, "unsupported internal voltage reference\n");
+ return -EINVAL;
+ }
+
+ /* Setup external voltage reference, buffered? */
+ ad7949_adc->vref = devm_regulator_get_optional(dev, "vrefin");
if (IS_ERR(ad7949_adc->vref)) {
- dev_err(dev, "fail to request regulator\n");
- return PTR_ERR(ad7949_adc->vref);
+ ret = PTR_ERR(ad7949_adc->vref);
+ if (ret != -ENODEV)
+ return ret;
+ /* unbuffered? */
+ ad7949_adc->vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(ad7949_adc->vref)) {
+ ret = PTR_ERR(ad7949_adc->vref);
+ if (ret != -ENODEV)
+ return ret;
+ } else {
+ ad7949_adc->refsel = AD7949_CFG_VAL_REF_EXT_TEMP;
+ }
+ } else {
+ ad7949_adc->refsel = AD7949_CFG_VAL_REF_EXT_TEMP_BUF;
}
- ret = regulator_enable(ad7949_adc->vref);
- if (ret < 0) {
- dev_err(dev, "fail to enable regulator\n");
- return ret;
+ if (ad7949_adc->refsel & AD7949_CFG_VAL_REF_EXTERNAL) {
+ ret = regulator_enable(ad7949_adc->vref);
+ if (ret < 0) {
+ dev_err(dev, "fail to enable regulator\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, ad7949_disable_reg,
+ ad7949_adc->vref);
+ if (ret)
+ return ret;
}
mutex_init(&ad7949_adc->lock);
@@ -274,36 +401,16 @@ static int ad7949_spi_probe(struct spi_device *spi)
ret = ad7949_spi_init(ad7949_adc);
if (ret) {
dev_err(dev, "enable to init this device: %d\n", ret);
- goto err;
+ return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret) {
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
dev_err(dev, "fail to register iio device: %d\n", ret);
- goto err;
- }
-
- return 0;
-
-err:
- mutex_destroy(&ad7949_adc->lock);
- regulator_disable(ad7949_adc->vref);
return ret;
}
-static int ad7949_spi_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7949_adc_chip *ad7949_adc = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- mutex_destroy(&ad7949_adc->lock);
- regulator_disable(ad7949_adc->vref);
-
- return 0;
-}
-
static const struct of_device_id ad7949_spi_of_id[] = {
{ .compatible = "adi,ad7949" },
{ .compatible = "adi,ad7682" },
@@ -326,7 +433,6 @@ static struct spi_driver ad7949_spi_driver = {
.of_match_table = ad7949_spi_of_id,
},
.probe = ad7949_spi_probe,
- .remove = ad7949_spi_remove,
.id_table = ad7949_spi_id,
};
module_spi_driver(ad7949_spi_driver);
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 18bf8386d50a..220228c375d3 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -299,7 +299,11 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
GENMASK(chan->scan_type.realbits - 1, 0);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = regulator_get_voltage(st->vref);
+ if (st->vref)
+ ret = regulator_get_voltage(st->vref);
+ else
+ ret = regulator_get_voltage(st->reg);
+
if (ret < 0)
return ret;
*val = ret / 1000;
@@ -770,6 +774,7 @@ static int ad799x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
+ int extra_config = 0;
struct ad799x_state *st;
struct iio_dev *indio_dev;
const struct ad799x_chip_info *chip_info =
@@ -797,14 +802,36 @@ static int ad799x_probe(struct i2c_client *client,
ret = regulator_enable(st->reg);
if (ret)
return ret;
- st->vref = devm_regulator_get(&client->dev, "vref");
+
+ /* check if an external reference is supplied */
+ st->vref = devm_regulator_get_optional(&client->dev, "vref");
+
if (IS_ERR(st->vref)) {
- ret = PTR_ERR(st->vref);
- goto error_disable_reg;
+ if (PTR_ERR(st->vref) == -ENODEV) {
+ st->vref = NULL;
+ dev_info(&client->dev, "Using VCC reference voltage\n");
+ } else {
+ ret = PTR_ERR(st->vref);
+ goto error_disable_reg;
+ }
+ }
+
+ if (st->vref) {
+ /*
+ * Use external reference voltage if supported by hardware.
+ * This is optional if voltage / regulator present, use VCC otherwise.
+ */
+ if ((st->id == ad7991) || (st->id == ad7995) || (st->id == ad7999)) {
+ dev_info(&client->dev, "Using external reference voltage\n");
+ extra_config |= AD7991_REF_SEL;
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto error_disable_reg;
+ } else {
+ st->vref = NULL;
+ dev_warn(&client->dev, "Supplied reference not supported\n");
+ }
}
- ret = regulator_enable(st->vref);
- if (ret)
- goto error_disable_reg;
st->client = client;
@@ -815,7 +842,7 @@ static int ad799x_probe(struct i2c_client *client,
indio_dev->channels = st->chip_config->channel;
indio_dev->num_channels = chip_info->num_channels;
- ret = ad799x_update_config(st, st->chip_config->default_config);
+ ret = ad799x_update_config(st, st->chip_config->default_config | extra_config);
if (ret)
goto error_disable_vref;
@@ -845,7 +872,8 @@ static int ad799x_probe(struct i2c_client *client,
error_cleanup_ring:
iio_triggered_buffer_cleanup(indio_dev);
error_disable_vref:
- regulator_disable(st->vref);
+ if (st->vref)
+ regulator_disable(st->vref);
error_disable_reg:
regulator_disable(st->reg);
@@ -860,7 +888,8 @@ static int ad799x_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(st->vref);
+ if (st->vref)
+ regulator_disable(st->vref);
regulator_disable(st->reg);
kfree(st->rx_buf);
@@ -872,7 +901,8 @@ static int __maybe_unused ad799x_suspend(struct device *dev)
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct ad799x_state *st = iio_priv(indio_dev);
- regulator_disable(st->vref);
+ if (st->vref)
+ regulator_disable(st->vref);
regulator_disable(st->reg);
return 0;
@@ -889,17 +919,21 @@ static int __maybe_unused ad799x_resume(struct device *dev)
dev_err(dev, "Unable to enable vcc regulator\n");
return ret;
}
- ret = regulator_enable(st->vref);
- if (ret) {
- regulator_disable(st->reg);
- dev_err(dev, "Unable to enable vref regulator\n");
- return ret;
+
+ if (st->vref) {
+ ret = regulator_enable(st->vref);
+ if (ret) {
+ regulator_disable(st->reg);
+ dev_err(dev, "Unable to enable vref regulator\n");
+ return ret;
+ }
}
/* resync config */
ret = ad799x_update_config(st, st->config);
if (ret) {
- regulator_disable(st->vref);
+ if (st->vref)
+ regulator_disable(st->vref);
regulator_disable(st->reg);
return ret;
}
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 34ec0c28b2df..e939b84cbb56 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -1,8 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Aspeed AST2400/2500 ADC
+ * Aspeed AST2400/2500/2600 ADC
*
* Copyright (C) 2017 Google, Inc.
+ * Copyright (C) 2021 Aspeed Technology Inc.
+ *
+ * ADC clock formula:
+ * Ast2400/Ast2500:
+ * clock period = period of PCLK * 2 * (ADC0C[31:17] + 1) * (ADC0C[9:0] + 1)
+ * Ast2600:
+ * clock period = period of PCLK * 2 * (ADC0C[15:0] + 1)
*/
#include <linux/clk.h>
@@ -13,9 +20,13 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
@@ -28,34 +39,87 @@
#define ASPEED_REG_INTERRUPT_CONTROL 0x04
#define ASPEED_REG_VGA_DETECT_CONTROL 0x08
#define ASPEED_REG_CLOCK_CONTROL 0x0C
-#define ASPEED_REG_MAX 0xC0
-
-#define ASPEED_OPERATION_MODE_POWER_DOWN (0x0 << 1)
-#define ASPEED_OPERATION_MODE_STANDBY (0x1 << 1)
-#define ASPEED_OPERATION_MODE_NORMAL (0x7 << 1)
-
-#define ASPEED_ENGINE_ENABLE BIT(0)
-
-#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
+#define ASPEED_REG_COMPENSATION_TRIM 0xC4
+/*
+ * The register offset between 0xC8~0xCC can be read and won't affect the
+ * hardware logic in each version of ADC.
+ */
+#define ASPEED_REG_MAX 0xD0
+
+#define ASPEED_ADC_ENGINE_ENABLE BIT(0)
+#define ASPEED_ADC_OP_MODE GENMASK(3, 1)
+#define ASPEED_ADC_OP_MODE_PWR_DOWN 0
+#define ASPEED_ADC_OP_MODE_STANDBY 1
+#define ASPEED_ADC_OP_MODE_NORMAL 7
+#define ASPEED_ADC_CTRL_COMPENSATION BIT(4)
+#define ASPEED_ADC_AUTO_COMPENSATION BIT(5)
+/*
+ * Bit 6 determines not only the reference voltage range but also the dividing
+ * circuit for battery sensing.
+ */
+#define ASPEED_ADC_REF_VOLTAGE GENMASK(7, 6)
+#define ASPEED_ADC_REF_VOLTAGE_2500mV 0
+#define ASPEED_ADC_REF_VOLTAGE_1200mV 1
+#define ASPEED_ADC_REF_VOLTAGE_EXT_HIGH 2
+#define ASPEED_ADC_REF_VOLTAGE_EXT_LOW 3
+#define ASPEED_ADC_BAT_SENSING_DIV BIT(6)
+#define ASPEED_ADC_BAT_SENSING_DIV_2_3 0
+#define ASPEED_ADC_BAT_SENSING_DIV_1_3 1
+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
+#define ASPEED_ADC_CH7_MODE BIT(12)
+#define ASPEED_ADC_CH7_NORMAL 0
+#define ASPEED_ADC_CH7_BAT 1
+#define ASPEED_ADC_BAT_SENSING_ENABLE BIT(13)
+#define ASPEED_ADC_CTRL_CHANNEL GENMASK(31, 16)
+#define ASPEED_ADC_CTRL_CHANNEL_ENABLE(ch) FIELD_PREP(ASPEED_ADC_CTRL_CHANNEL, BIT(ch))
#define ASPEED_ADC_INIT_POLLING_TIME 500
#define ASPEED_ADC_INIT_TIMEOUT 500000
+/*
+ * When the sampling rate is too high, the ADC may not have enough charging
+ * time, resulting in a low voltage value. Thus, the default uses a slow
+ * sampling rate for most use cases.
+ */
+#define ASPEED_ADC_DEF_SAMPLING_RATE 65000
+
+struct aspeed_adc_trim_locate {
+ const unsigned int offset;
+ const unsigned int field;
+};
struct aspeed_adc_model_data {
const char *model_name;
unsigned int min_sampling_rate; // Hz
unsigned int max_sampling_rate; // Hz
- unsigned int vref_voltage; // mV
+ unsigned int vref_fixed_mv;
bool wait_init_sequence;
+ bool need_prescaler;
+ bool bat_sense_sup;
+ u8 scaler_bit_width;
+ unsigned int num_channels;
+ const struct aspeed_adc_trim_locate *trim_locate;
+};
+
+struct adc_gain {
+ u8 mult;
+ u8 div;
};
struct aspeed_adc_data {
struct device *dev;
+ const struct aspeed_adc_model_data *model_data;
+ struct regulator *regulator;
void __iomem *base;
spinlock_t clk_lock;
+ struct clk_hw *fixed_div_clk;
struct clk_hw *clk_prescaler;
struct clk_hw *clk_scaler;
struct reset_control *rst;
+ int vref_mv;
+ u32 sample_period_ns;
+ int cv;
+ bool battery_sensing;
+ struct adc_gain battery_mode_gain;
};
#define ASPEED_CHAN(_idx, _data_reg_addr) { \
@@ -65,7 +129,8 @@ struct aspeed_adc_data {
.address = (_data_reg_addr), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
}
static const struct iio_chan_spec aspeed_adc_iio_channels[] = {
@@ -87,21 +152,170 @@ static const struct iio_chan_spec aspeed_adc_iio_channels[] = {
ASPEED_CHAN(15, 0x2E),
};
+#define ASPEED_BAT_CHAN(_idx, _data_reg_addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .address = (_data_reg_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+static const struct iio_chan_spec aspeed_adc_iio_bat_channels[] = {
+ ASPEED_CHAN(0, 0x10),
+ ASPEED_CHAN(1, 0x12),
+ ASPEED_CHAN(2, 0x14),
+ ASPEED_CHAN(3, 0x16),
+ ASPEED_CHAN(4, 0x18),
+ ASPEED_CHAN(5, 0x1A),
+ ASPEED_CHAN(6, 0x1C),
+ ASPEED_BAT_CHAN(7, 0x1E),
+};
+
+static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev)
+{
+ struct device_node *syscon;
+ struct regmap *scu;
+ u32 scu_otp, trimming_val;
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+
+ syscon = of_find_node_by_name(NULL, "syscon");
+ if (syscon == NULL) {
+ dev_warn(data->dev, "Couldn't find syscon node\n");
+ return -EOPNOTSUPP;
+ }
+ scu = syscon_node_to_regmap(syscon);
+ if (IS_ERR(scu)) {
+ dev_warn(data->dev, "Failed to get syscon regmap\n");
+ return -EOPNOTSUPP;
+ }
+ if (data->model_data->trim_locate) {
+ if (regmap_read(scu, data->model_data->trim_locate->offset,
+ &scu_otp)) {
+ dev_warn(data->dev,
+ "Failed to get adc trimming data\n");
+ trimming_val = 0x8;
+ } else {
+ trimming_val =
+ ((scu_otp) &
+ (data->model_data->trim_locate->field)) >>
+ __ffs(data->model_data->trim_locate->field);
+ }
+ dev_dbg(data->dev,
+ "trimming val = %d, offset = %08x, fields = %08x\n",
+ trimming_val, data->model_data->trim_locate->offset,
+ data->model_data->trim_locate->field);
+ writel(trimming_val, data->base + ASPEED_REG_COMPENSATION_TRIM);
+ }
+ return 0;
+}
+
+static int aspeed_adc_compensation(struct iio_dev *indio_dev)
+{
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+ u32 index, adc_raw = 0;
+ u32 adc_engine_control_reg_val;
+
+ adc_engine_control_reg_val =
+ readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+ adc_engine_control_reg_val &= ~ASPEED_ADC_OP_MODE;
+ adc_engine_control_reg_val |=
+ (FIELD_PREP(ASPEED_ADC_OP_MODE, ASPEED_ADC_OP_MODE_NORMAL) |
+ ASPEED_ADC_ENGINE_ENABLE);
+ /*
+ * Enable compensating sensing:
+ * After that, the input voltage of ADC will force to half of the reference
+ * voltage. So the expected reading raw data will become half of the max
+ * value. We can get compensating value = 0x200 - ADC read raw value.
+ * It is recommended to average at least 10 samples to get a final CV.
+ */
+ writel(adc_engine_control_reg_val | ASPEED_ADC_CTRL_COMPENSATION |
+ ASPEED_ADC_CTRL_CHANNEL_ENABLE(0),
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ /*
+ * After enable compensating sensing mode need to wait some time for ADC stable
+ * Experiment result is 1ms.
+ */
+ mdelay(1);
+
+ for (index = 0; index < 16; index++) {
+ /*
+ * Waiting for the sampling period ensures that the value acquired
+ * is fresh each time.
+ */
+ ndelay(data->sample_period_ns);
+ adc_raw += readw(data->base + aspeed_adc_iio_channels[0].address);
+ }
+ adc_raw >>= 4;
+ data->cv = BIT(ASPEED_RESOLUTION_BITS - 1) - adc_raw;
+ writel(adc_engine_control_reg_val,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ dev_dbg(data->dev, "Compensating value = %d\n", data->cv);
+
+ return 0;
+}
+
+static int aspeed_adc_set_sampling_rate(struct iio_dev *indio_dev, u32 rate)
+{
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+
+ if (rate < data->model_data->min_sampling_rate ||
+ rate > data->model_data->max_sampling_rate)
+ return -EINVAL;
+ /* Each sampling needs 12 clocks to convert.*/
+ clk_set_rate(data->clk_scaler->clk, rate * ASPEED_CLOCKS_PER_SAMPLE);
+ rate = clk_get_rate(data->clk_scaler->clk);
+ data->sample_period_ns = DIV_ROUND_UP_ULL(
+ (u64)NSEC_PER_SEC * ASPEED_CLOCKS_PER_SAMPLE, rate);
+ dev_dbg(data->dev, "Adc clock = %d sample period = %d ns", rate,
+ data->sample_period_ns);
+
+ return 0;
+}
+
static int aspeed_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct aspeed_adc_data *data = iio_priv(indio_dev);
- const struct aspeed_adc_model_data *model_data =
- of_device_get_match_data(data->dev);
+ u32 adc_engine_control_reg_val;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- *val = readw(data->base + chan->address);
+ if (data->battery_sensing && chan->channel == 7) {
+ adc_engine_control_reg_val =
+ readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+ writel(adc_engine_control_reg_val |
+ FIELD_PREP(ASPEED_ADC_CH7_MODE,
+ ASPEED_ADC_CH7_BAT) |
+ ASPEED_ADC_BAT_SENSING_ENABLE,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ /*
+ * After enable battery sensing mode need to wait some time for adc stable
+ * Experiment result is 1ms.
+ */
+ mdelay(1);
+ *val = readw(data->base + chan->address);
+ *val = (*val * data->battery_mode_gain.mult) /
+ data->battery_mode_gain.div;
+ /* Restore control register value */
+ writel(adc_engine_control_reg_val,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ } else
+ *val = readw(data->base + chan->address);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OFFSET:
+ if (data->battery_sensing && chan->channel == 7)
+ *val = (data->cv * data->battery_mode_gain.mult) /
+ data->battery_mode_gain.div;
+ else
+ *val = data->cv;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = model_data->vref_voltage;
+ *val = data->vref_mv;
*val2 = ASPEED_RESOLUTION_BITS;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -119,19 +333,9 @@ static int aspeed_adc_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- struct aspeed_adc_data *data = iio_priv(indio_dev);
- const struct aspeed_adc_model_data *model_data =
- of_device_get_match_data(data->dev);
-
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- if (val < model_data->min_sampling_rate ||
- val > model_data->max_sampling_rate)
- return -EINVAL;
-
- clk_set_rate(data->clk_scaler->clk,
- val * ASPEED_CLOCKS_PER_SAMPLE);
- return 0;
+ return aspeed_adc_set_sampling_rate(indio_dev, val);
case IIO_CHAN_INFO_SCALE:
case IIO_CHAN_INFO_RAW:
@@ -168,14 +372,119 @@ static const struct iio_info aspeed_adc_iio_info = {
.debugfs_reg_access = aspeed_adc_reg_access,
};
+static void aspeed_adc_unregister_fixed_divider(void *data)
+{
+ struct clk_hw *clk = data;
+
+ clk_hw_unregister_fixed_factor(clk);
+}
+
+static void aspeed_adc_reset_assert(void *data)
+{
+ struct reset_control *rst = data;
+
+ reset_control_assert(rst);
+}
+
+static void aspeed_adc_clk_disable_unprepare(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static void aspeed_adc_power_down(void *data)
+{
+ struct aspeed_adc_data *priv_data = data;
+
+ writel(FIELD_PREP(ASPEED_ADC_OP_MODE, ASPEED_ADC_OP_MODE_PWR_DOWN),
+ priv_data->base + ASPEED_REG_ENGINE_CONTROL);
+}
+
+static void aspeed_adc_reg_disable(void *data)
+{
+ struct regulator *reg = data;
+
+ regulator_disable(reg);
+}
+
+static int aspeed_adc_vref_config(struct iio_dev *indio_dev)
+{
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+ int ret;
+ u32 adc_engine_control_reg_val;
+
+ if (data->model_data->vref_fixed_mv) {
+ data->vref_mv = data->model_data->vref_fixed_mv;
+ return 0;
+ }
+ adc_engine_control_reg_val =
+ readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+ data->regulator = devm_regulator_get_optional(data->dev, "vref");
+ if (!IS_ERR(data->regulator)) {
+ ret = regulator_enable(data->regulator);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(
+ data->dev, aspeed_adc_reg_disable, data->regulator);
+ if (ret)
+ return ret;
+ data->vref_mv = regulator_get_voltage(data->regulator);
+ /* Conversion from uV to mV */
+ data->vref_mv /= 1000;
+ if ((data->vref_mv >= 1550) && (data->vref_mv <= 2700))
+ writel(adc_engine_control_reg_val |
+ FIELD_PREP(
+ ASPEED_ADC_REF_VOLTAGE,
+ ASPEED_ADC_REF_VOLTAGE_EXT_HIGH),
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ else if ((data->vref_mv >= 900) && (data->vref_mv <= 1650))
+ writel(adc_engine_control_reg_val |
+ FIELD_PREP(
+ ASPEED_ADC_REF_VOLTAGE,
+ ASPEED_ADC_REF_VOLTAGE_EXT_LOW),
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ else {
+ dev_err(data->dev, "Regulator voltage %d not support",
+ data->vref_mv);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (PTR_ERR(data->regulator) != -ENODEV)
+ return PTR_ERR(data->regulator);
+ data->vref_mv = 2500000;
+ of_property_read_u32(data->dev->of_node,
+ "aspeed,int-vref-microvolt",
+ &data->vref_mv);
+ /* Conversion from uV to mV */
+ data->vref_mv /= 1000;
+ if (data->vref_mv == 2500)
+ writel(adc_engine_control_reg_val |
+ FIELD_PREP(ASPEED_ADC_REF_VOLTAGE,
+ ASPEED_ADC_REF_VOLTAGE_2500mV),
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ else if (data->vref_mv == 1200)
+ writel(adc_engine_control_reg_val |
+ FIELD_PREP(ASPEED_ADC_REF_VOLTAGE,
+ ASPEED_ADC_REF_VOLTAGE_1200mV),
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ else {
+ dev_err(data->dev, "Voltage %d not support", data->vref_mv);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
static int aspeed_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct aspeed_adc_data *data;
- const struct aspeed_adc_model_data *model_data;
- const char *clk_parent_name;
int ret;
u32 adc_engine_control_reg_val;
+ unsigned long scaler_flags = 0;
+ char clk_name[32], clk_parent_name[32];
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*data));
if (!indio_dev)
@@ -183,6 +492,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = &pdev->dev;
+ data->model_data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, indio_dev);
data->base = devm_platform_ioremap_resource(pdev, 0);
@@ -191,45 +501,117 @@ static int aspeed_adc_probe(struct platform_device *pdev)
/* Register ADC clock prescaler with source specified by device tree. */
spin_lock_init(&data->clk_lock);
- clk_parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
-
- data->clk_prescaler = clk_hw_register_divider(
- &pdev->dev, "prescaler", clk_parent_name, 0,
- data->base + ASPEED_REG_CLOCK_CONTROL,
- 17, 15, 0, &data->clk_lock);
- if (IS_ERR(data->clk_prescaler))
- return PTR_ERR(data->clk_prescaler);
-
+ snprintf(clk_parent_name, ARRAY_SIZE(clk_parent_name), "%s",
+ of_clk_get_parent_name(pdev->dev.of_node, 0));
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-fixed-div",
+ data->model_data->model_name);
+ data->fixed_div_clk = clk_hw_register_fixed_factor(
+ &pdev->dev, clk_name, clk_parent_name, 0, 1, 2);
+ if (IS_ERR(data->fixed_div_clk))
+ return PTR_ERR(data->fixed_div_clk);
+
+ ret = devm_add_action_or_reset(data->dev,
+ aspeed_adc_unregister_fixed_divider,
+ data->fixed_div_clk);
+ if (ret)
+ return ret;
+ snprintf(clk_parent_name, ARRAY_SIZE(clk_parent_name), clk_name);
+
+ if (data->model_data->need_prescaler) {
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-prescaler",
+ data->model_data->model_name);
+ data->clk_prescaler = devm_clk_hw_register_divider(
+ &pdev->dev, clk_name, clk_parent_name, 0,
+ data->base + ASPEED_REG_CLOCK_CONTROL, 17, 15, 0,
+ &data->clk_lock);
+ if (IS_ERR(data->clk_prescaler))
+ return PTR_ERR(data->clk_prescaler);
+ snprintf(clk_parent_name, ARRAY_SIZE(clk_parent_name),
+ clk_name);
+ scaler_flags = CLK_SET_RATE_PARENT;
+ }
/*
* Register ADC clock scaler downstream from the prescaler. Allow rate
* setting to adjust the prescaler as well.
*/
- data->clk_scaler = clk_hw_register_divider(
- &pdev->dev, "scaler", "prescaler",
- CLK_SET_RATE_PARENT,
- data->base + ASPEED_REG_CLOCK_CONTROL,
- 0, 10, 0, &data->clk_lock);
- if (IS_ERR(data->clk_scaler)) {
- ret = PTR_ERR(data->clk_scaler);
- goto scaler_error;
- }
-
- data->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-scaler",
+ data->model_data->model_name);
+ data->clk_scaler = devm_clk_hw_register_divider(
+ &pdev->dev, clk_name, clk_parent_name, scaler_flags,
+ data->base + ASPEED_REG_CLOCK_CONTROL, 0,
+ data->model_data->scaler_bit_width, 0, &data->clk_lock);
+ if (IS_ERR(data->clk_scaler))
+ return PTR_ERR(data->clk_scaler);
+
+ data->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
if (IS_ERR(data->rst)) {
dev_err(&pdev->dev,
"invalid or missing reset controller device tree entry");
- ret = PTR_ERR(data->rst);
- goto reset_error;
+ return PTR_ERR(data->rst);
}
reset_control_deassert(data->rst);
- model_data = of_device_get_match_data(&pdev->dev);
+ ret = devm_add_action_or_reset(data->dev, aspeed_adc_reset_assert,
+ data->rst);
+ if (ret)
+ return ret;
- if (model_data->wait_init_sequence) {
- /* Enable engine in normal mode. */
- writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
- data->base + ASPEED_REG_ENGINE_CONTROL);
+ ret = aspeed_adc_vref_config(indio_dev);
+ if (ret)
+ return ret;
+ if (of_find_property(data->dev->of_node, "aspeed,trim-data-valid",
+ NULL)) {
+ ret = aspeed_adc_set_trim_data(indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ if (of_find_property(data->dev->of_node, "aspeed,battery-sensing",
+ NULL)) {
+ if (data->model_data->bat_sense_sup) {
+ data->battery_sensing = 1;
+ if (readl(data->base + ASPEED_REG_ENGINE_CONTROL) &
+ ASPEED_ADC_BAT_SENSING_DIV) {
+ data->battery_mode_gain.mult = 3;
+ data->battery_mode_gain.div = 1;
+ } else {
+ data->battery_mode_gain.mult = 3;
+ data->battery_mode_gain.div = 2;
+ }
+ } else
+ dev_warn(&pdev->dev,
+ "Failed to enable battery-sensing mode\n");
+ }
+
+ ret = clk_prepare_enable(data->clk_scaler->clk);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(data->dev,
+ aspeed_adc_clk_disable_unprepare,
+ data->clk_scaler->clk);
+ if (ret)
+ return ret;
+ ret = aspeed_adc_set_sampling_rate(indio_dev,
+ ASPEED_ADC_DEF_SAMPLING_RATE);
+ if (ret)
+ return ret;
+
+ adc_engine_control_reg_val =
+ readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+ adc_engine_control_reg_val |=
+ FIELD_PREP(ASPEED_ADC_OP_MODE, ASPEED_ADC_OP_MODE_NORMAL) |
+ ASPEED_ADC_ENGINE_ENABLE;
+ /* Enable engine in normal mode. */
+ writel(adc_engine_control_reg_val,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+
+ ret = devm_add_action_or_reset(data->dev, aspeed_adc_power_down,
+ data);
+ if (ret)
+ return ret;
+
+ if (data->model_data->wait_init_sequence) {
/* Wait for initial sequence complete. */
ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
adc_engine_control_reg_val,
@@ -238,87 +620,99 @@ static int aspeed_adc_probe(struct platform_device *pdev)
ASPEED_ADC_INIT_POLLING_TIME,
ASPEED_ADC_INIT_TIMEOUT);
if (ret)
- goto poll_timeout_error;
+ return ret;
}
+ aspeed_adc_compensation(indio_dev);
/* Start all channels in normal mode. */
- ret = clk_prepare_enable(data->clk_scaler->clk);
- if (ret)
- goto clk_enable_error;
-
- adc_engine_control_reg_val = GENMASK(31, 16) |
- ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE;
+ adc_engine_control_reg_val =
+ readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+ adc_engine_control_reg_val |= ASPEED_ADC_CTRL_CHANNEL;
writel(adc_engine_control_reg_val,
- data->base + ASPEED_REG_ENGINE_CONTROL);
+ data->base + ASPEED_REG_ENGINE_CONTROL);
- model_data = of_device_get_match_data(&pdev->dev);
- indio_dev->name = model_data->model_name;
+ indio_dev->name = data->model_data->model_name;
indio_dev->info = &aspeed_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = aspeed_adc_iio_channels;
- indio_dev->num_channels = ARRAY_SIZE(aspeed_adc_iio_channels);
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto iio_register_error;
-
- return 0;
+ indio_dev->channels = data->battery_sensing ?
+ aspeed_adc_iio_bat_channels :
+ aspeed_adc_iio_channels;
+ indio_dev->num_channels = data->model_data->num_channels;
-iio_register_error:
- writel(ASPEED_OPERATION_MODE_POWER_DOWN,
- data->base + ASPEED_REG_ENGINE_CONTROL);
- clk_disable_unprepare(data->clk_scaler->clk);
-clk_enable_error:
-poll_timeout_error:
- reset_control_assert(data->rst);
-reset_error:
- clk_hw_unregister_divider(data->clk_scaler);
-scaler_error:
- clk_hw_unregister_divider(data->clk_prescaler);
+ ret = devm_iio_device_register(data->dev, indio_dev);
return ret;
}
-static int aspeed_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct aspeed_adc_data *data = iio_priv(indio_dev);
+static const struct aspeed_adc_trim_locate ast2500_adc_trim = {
+ .offset = 0x154,
+ .field = GENMASK(31, 28),
+};
- iio_device_unregister(indio_dev);
- writel(ASPEED_OPERATION_MODE_POWER_DOWN,
- data->base + ASPEED_REG_ENGINE_CONTROL);
- clk_disable_unprepare(data->clk_scaler->clk);
- reset_control_assert(data->rst);
- clk_hw_unregister_divider(data->clk_scaler);
- clk_hw_unregister_divider(data->clk_prescaler);
+static const struct aspeed_adc_trim_locate ast2600_adc0_trim = {
+ .offset = 0x5d0,
+ .field = GENMASK(3, 0),
+};
- return 0;
-}
+static const struct aspeed_adc_trim_locate ast2600_adc1_trim = {
+ .offset = 0x5d0,
+ .field = GENMASK(7, 4),
+};
static const struct aspeed_adc_model_data ast2400_model_data = {
.model_name = "ast2400-adc",
- .vref_voltage = 2500, // mV
+ .vref_fixed_mv = 2500,
.min_sampling_rate = 10000,
.max_sampling_rate = 500000,
+ .need_prescaler = true,
+ .scaler_bit_width = 10,
+ .num_channels = 16,
};
static const struct aspeed_adc_model_data ast2500_model_data = {
.model_name = "ast2500-adc",
- .vref_voltage = 1800, // mV
+ .vref_fixed_mv = 1800,
.min_sampling_rate = 1,
.max_sampling_rate = 1000000,
.wait_init_sequence = true,
+ .need_prescaler = true,
+ .scaler_bit_width = 10,
+ .num_channels = 16,
+ .trim_locate = &ast2500_adc_trim,
+};
+
+static const struct aspeed_adc_model_data ast2600_adc0_model_data = {
+ .model_name = "ast2600-adc0",
+ .min_sampling_rate = 10000,
+ .max_sampling_rate = 500000,
+ .wait_init_sequence = true,
+ .bat_sense_sup = true,
+ .scaler_bit_width = 16,
+ .num_channels = 8,
+ .trim_locate = &ast2600_adc0_trim,
+};
+
+static const struct aspeed_adc_model_data ast2600_adc1_model_data = {
+ .model_name = "ast2600-adc1",
+ .min_sampling_rate = 10000,
+ .max_sampling_rate = 500000,
+ .wait_init_sequence = true,
+ .bat_sense_sup = true,
+ .scaler_bit_width = 16,
+ .num_channels = 8,
+ .trim_locate = &ast2600_adc1_trim,
};
static const struct of_device_id aspeed_adc_matches[] = {
{ .compatible = "aspeed,ast2400-adc", .data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data },
+ { .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data },
{},
};
MODULE_DEVICE_TABLE(of, aspeed_adc_matches);
static struct platform_driver aspeed_adc_driver = {
.probe = aspeed_adc_probe,
- .remove = aspeed_adc_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_adc_matches,
@@ -328,5 +722,5 @@ static struct platform_driver aspeed_adc_driver = {
module_platform_driver(aspeed_adc_driver);
MODULE_AUTHOR("Rick Altherr <raltherr@google.com>");
-MODULE_DESCRIPTION("Aspeed AST2400/2500 ADC Driver");
+MODULE_DESCRIPTION("Aspeed AST2400/2500/2600 ADC Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index ea5ca163d879..4c922ef634f8 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -4,6 +4,8 @@
*
* Copyright (C) 2015 Atmel,
* 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ * 2021 Microchip Technology, Inc. and its subsidiaries
+ * 2021 Eugen Hristev <eugen.hristev@microchip.com>
*/
#include <linux/bitops.h>
@@ -27,8 +29,9 @@
#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
+struct at91_adc_reg_layout {
/* Control Register */
-#define AT91_SAMA5D2_CR 0x00
+ u16 CR;
/* Software Reset */
#define AT91_SAMA5D2_CR_SWRST BIT(0)
/* Start Conversion */
@@ -39,7 +42,7 @@
#define AT91_SAMA5D2_CR_CMPRST BIT(4)
/* Mode Register */
-#define AT91_SAMA5D2_MR 0x04
+ u16 MR;
/* Trigger Selection */
#define AT91_SAMA5D2_MR_TRGSEL(v) ((v) << 1)
/* ADTRG */
@@ -82,19 +85,19 @@
#define AT91_SAMA5D2_MR_USEQ BIT(31)
/* Channel Sequence Register 1 */
-#define AT91_SAMA5D2_SEQR1 0x08
+ u16 SEQR1;
/* Channel Sequence Register 2 */
-#define AT91_SAMA5D2_SEQR2 0x0c
+ u16 SEQR2;
/* Channel Enable Register */
-#define AT91_SAMA5D2_CHER 0x10
+ u16 CHER;
/* Channel Disable Register */
-#define AT91_SAMA5D2_CHDR 0x14
+ u16 CHDR;
/* Channel Status Register */
-#define AT91_SAMA5D2_CHSR 0x18
+ u16 CHSR;
/* Last Converted Data Register */
-#define AT91_SAMA5D2_LCDR 0x20
+ u16 LCDR;
/* Interrupt Enable Register */
-#define AT91_SAMA5D2_IER 0x24
+ u16 IER;
/* Interrupt Enable Register - TS X measurement ready */
#define AT91_SAMA5D2_IER_XRDY BIT(20)
/* Interrupt Enable Register - TS Y measurement ready */
@@ -109,22 +112,31 @@
#define AT91_SAMA5D2_IER_PEN BIT(29)
/* Interrupt Enable Register - No pen detect */
#define AT91_SAMA5D2_IER_NOPEN BIT(30)
+
/* Interrupt Disable Register */
-#define AT91_SAMA5D2_IDR 0x28
+ u16 IDR;
/* Interrupt Mask Register */
-#define AT91_SAMA5D2_IMR 0x2c
+ u16 IMR;
/* Interrupt Status Register */
-#define AT91_SAMA5D2_ISR 0x30
+ u16 ISR;
+/* End of Conversion Interrupt Enable Register */
+ u16 EOC_IER;
+/* End of Conversion Interrupt Disable Register */
+ u16 EOC_IDR;
+/* End of Conversion Interrupt Mask Register */
+ u16 EOC_IMR;
+/* End of Conversion Interrupt Status Register */
+ u16 EOC_ISR;
/* Interrupt Status Register - Pen touching sense status */
#define AT91_SAMA5D2_ISR_PENS BIT(31)
/* Last Channel Trigger Mode Register */
-#define AT91_SAMA5D2_LCTMR 0x34
+ u16 LCTMR;
/* Last Channel Compare Window Register */
-#define AT91_SAMA5D2_LCCWR 0x38
+ u16 LCCWR;
/* Overrun Status Register */
-#define AT91_SAMA5D2_OVER 0x3c
+ u16 OVER;
/* Extended Mode Register */
-#define AT91_SAMA5D2_EMR 0x40
+ u16 EMR;
/* Extended Mode Register - Oversampling rate */
#define AT91_SAMA5D2_EMR_OSR(V) ((V) << 16)
#define AT91_SAMA5D2_EMR_OSR_MASK GENMASK(17, 16)
@@ -134,24 +146,22 @@
/* Extended Mode Register - Averaging on single trigger event */
#define AT91_SAMA5D2_EMR_ASTE(V) ((V) << 20)
+
/* Compare Window Register */
-#define AT91_SAMA5D2_CWR 0x44
+ u16 CWR;
/* Channel Gain Register */
-#define AT91_SAMA5D2_CGR 0x48
-
+ u16 CGR;
/* Channel Offset Register */
-#define AT91_SAMA5D2_COR 0x4c
-#define AT91_SAMA5D2_COR_DIFF_OFFSET 16
-
-/* Channel Data Register 0 */
-#define AT91_SAMA5D2_CDR0 0x50
+ u16 COR;
+/* Channel Offset Register differential offset - constant, not a register */
+ u16 COR_diff_offset;
/* Analog Control Register */
-#define AT91_SAMA5D2_ACR 0x94
+ u16 ACR;
/* Analog Control Register - Pen detect sensitivity mask */
#define AT91_SAMA5D2_ACR_PENDETSENS_MASK GENMASK(1, 0)
/* Touchscreen Mode Register */
-#define AT91_SAMA5D2_TSMR 0xb0
+ u16 TSMR;
/* Touchscreen Mode Register - No touch mode */
#define AT91_SAMA5D2_TSMR_TSMODE_NONE 0
/* Touchscreen Mode Register - 4 wire screen, no pressure measurement */
@@ -180,13 +190,13 @@
#define AT91_SAMA5D2_TSMR_PENDET_ENA BIT(24)
/* Touchscreen X Position Register */
-#define AT91_SAMA5D2_XPOSR 0xb4
+ u16 XPOSR;
/* Touchscreen Y Position Register */
-#define AT91_SAMA5D2_YPOSR 0xb8
+ u16 YPOSR;
/* Touchscreen Pressure Register */
-#define AT91_SAMA5D2_PRESSR 0xbc
+ u16 PRESSR;
/* Trigger Register */
-#define AT91_SAMA5D2_TRGR 0xc0
+ u16 TRGR;
/* Mask for TRGMOD field of TRGR register */
#define AT91_SAMA5D2_TRGR_TRGMOD_MASK GENMASK(2, 0)
/* No trigger, only software trigger can start conversions */
@@ -205,30 +215,85 @@
#define AT91_SAMA5D2_TRGR_TRGPER(x) ((x) << 16)
/* Correction Select Register */
-#define AT91_SAMA5D2_COSR 0xd0
+ u16 COSR;
/* Correction Value Register */
-#define AT91_SAMA5D2_CVR 0xd4
+ u16 CVR;
/* Channel Error Correction Register */
-#define AT91_SAMA5D2_CECR 0xd8
+ u16 CECR;
/* Write Protection Mode Register */
-#define AT91_SAMA5D2_WPMR 0xe4
+ u16 WPMR;
/* Write Protection Status Register */
-#define AT91_SAMA5D2_WPSR 0xe8
+ u16 WPSR;
/* Version Register */
-#define AT91_SAMA5D2_VERSION 0xfc
-
-#define AT91_SAMA5D2_HW_TRIG_CNT 3
-#define AT91_SAMA5D2_SINGLE_CHAN_CNT 12
-#define AT91_SAMA5D2_DIFF_CHAN_CNT 6
+ u16 VERSION;
+};
-#define AT91_SAMA5D2_TIMESTAMP_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
- AT91_SAMA5D2_DIFF_CHAN_CNT + 1)
+static const struct at91_adc_reg_layout sama5d2_layout = {
+ .CR = 0x00,
+ .MR = 0x04,
+ .SEQR1 = 0x08,
+ .SEQR2 = 0x0c,
+ .CHER = 0x10,
+ .CHDR = 0x14,
+ .CHSR = 0x18,
+ .LCDR = 0x20,
+ .IER = 0x24,
+ .IDR = 0x28,
+ .IMR = 0x2c,
+ .ISR = 0x30,
+ .LCTMR = 0x34,
+ .LCCWR = 0x38,
+ .OVER = 0x3c,
+ .EMR = 0x40,
+ .CWR = 0x44,
+ .CGR = 0x48,
+ .COR = 0x4c,
+ .COR_diff_offset = 16,
+ .ACR = 0x94,
+ .TSMR = 0xb0,
+ .XPOSR = 0xb4,
+ .YPOSR = 0xb8,
+ .PRESSR = 0xbc,
+ .TRGR = 0xc0,
+ .COSR = 0xd0,
+ .CVR = 0xd4,
+ .CECR = 0xd8,
+ .WPMR = 0xe4,
+ .WPSR = 0xe8,
+ .VERSION = 0xfc,
+};
-#define AT91_SAMA5D2_TOUCH_X_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
- AT91_SAMA5D2_DIFF_CHAN_CNT * 2)
-#define AT91_SAMA5D2_TOUCH_Y_CHAN_IDX (AT91_SAMA5D2_TOUCH_X_CHAN_IDX + 1)
-#define AT91_SAMA5D2_TOUCH_P_CHAN_IDX (AT91_SAMA5D2_TOUCH_Y_CHAN_IDX + 1)
-#define AT91_SAMA5D2_MAX_CHAN_IDX AT91_SAMA5D2_TOUCH_P_CHAN_IDX
+static const struct at91_adc_reg_layout sama7g5_layout = {
+ .CR = 0x00,
+ .MR = 0x04,
+ .SEQR1 = 0x08,
+ .SEQR2 = 0x0c,
+ .CHER = 0x10,
+ .CHDR = 0x14,
+ .CHSR = 0x18,
+ .LCDR = 0x20,
+ .IER = 0x24,
+ .IDR = 0x28,
+ .IMR = 0x2c,
+ .ISR = 0x30,
+ .EOC_IER = 0x34,
+ .EOC_IDR = 0x38,
+ .EOC_IMR = 0x3c,
+ .EOC_ISR = 0x40,
+ .OVER = 0x4c,
+ .EMR = 0x50,
+ .CWR = 0x54,
+ .COR = 0x5c,
+ .COR_diff_offset = 0,
+ .ACR = 0xe0,
+ .TRGR = 0x100,
+ .COSR = 0x104,
+ .CVR = 0x108,
+ .CECR = 0x10c,
+ .WPMR = 0x118,
+ .WPSR = 0x11c,
+ .VERSION = 0x130,
+};
#define AT91_SAMA5D2_TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */
#define AT91_SAMA5D2_TOUCH_PEN_DETECT_DEBOUNCE_US 200
@@ -237,18 +302,6 @@
#define AT91_SAMA5D2_MAX_POS_BITS 12
-/*
- * Maximum number of bytes to hold conversion from all channels
- * without the timestamp.
- */
-#define AT91_BUFFER_MAX_CONVERSION_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \
- AT91_SAMA5D2_DIFF_CHAN_CNT) * 2)
-
-/* This total must also include the timestamp */
-#define AT91_BUFFER_MAX_BYTES (AT91_BUFFER_MAX_CONVERSION_BYTES + 8)
-
-#define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2)
-
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
@@ -257,12 +310,12 @@
#define AT91_OSR_4SAMPLES 4
#define AT91_OSR_16SAMPLES 16
-#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
.address = addr, \
- .scan_index = num, \
+ .scan_index = index, \
.scan_type = { \
.sign = 'u', \
.realbits = 14, \
@@ -276,14 +329,14 @@
.indexed = 1, \
}
-#define AT91_SAMA5D2_CHAN_DIFF(num, num2, addr) \
+#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
{ \
.type = IIO_VOLTAGE, \
.differential = 1, \
.channel = num, \
.channel2 = num2, \
.address = addr, \
- .scan_index = num + AT91_SAMA5D2_SINGLE_CHAN_CNT, \
+ .scan_index = index, \
.scan_type = { \
.sign = 's', \
.realbits = 14, \
@@ -330,13 +383,51 @@
.datasheet_name = name, \
}
-#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
-#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
+#define at91_adc_readl(st, reg) \
+ readl_relaxed((st)->base + (st)->soc_info.platform->layout->reg)
+#define at91_adc_read_chan(st, reg) \
+ readl_relaxed((st)->base + reg)
+#define at91_adc_writel(st, reg, val) \
+ writel_relaxed(val, (st)->base + (st)->soc_info.platform->layout->reg)
+
+/**
+ * struct at91_adc_platform - at91-sama5d2 platform information struct
+ * @layout: pointer to the reg layout struct
+ * @adc_channels: pointer to an array of channels for registering in
+ * the iio subsystem
+ * @nr_channels: number of physical channels available
+ * @touch_chan_x: index of the touchscreen X channel
+ * @touch_chan_y: index of the touchscreen Y channel
+ * @touch_chan_p: index of the touchscreen P channel
+ * @max_channels: number of total channels
+ * @max_index: highest channel index (highest index may be higher
+ * than the total channel number)
+ * @hw_trig_cnt: number of possible hardware triggers
+ */
+struct at91_adc_platform {
+ const struct at91_adc_reg_layout *layout;
+ const struct iio_chan_spec (*adc_channels)[];
+ unsigned int nr_channels;
+ unsigned int touch_chan_x;
+ unsigned int touch_chan_y;
+ unsigned int touch_chan_p;
+ unsigned int max_channels;
+ unsigned int max_index;
+ unsigned int hw_trig_cnt;
+};
+/**
+ * struct at91_adc_soc_info - at91-sama5d2 soc information struct
+ * @startup_time: device startup time
+ * @min_sample_rate: minimum sample rate in Hz
+ * @max_sample_rate: maximum sample rate in Hz
+ * @platform: pointer to the platform structure
+ */
struct at91_adc_soc_info {
unsigned startup_time;
unsigned min_sample_rate;
unsigned max_sample_rate;
+ const struct at91_adc_platform *platform;
};
struct at91_adc_trigger {
@@ -384,6 +475,15 @@ struct at91_adc_touch {
struct work_struct workq;
};
+/*
+ * Buffer size requirements:
+ * No channels * bytes_per_channel(2) + timestamp bytes (8)
+ * Divided by 2 because we need half words.
+ * We assume 32 channels for now, has to be increased if needed.
+ * Nobody minds a buffer being too big.
+ */
+#define AT91_BUFFER_MAX_HWORDS ((32 * 2 + 8) / 2)
+
struct at91_adc_state {
void __iomem *base;
int irq;
@@ -439,29 +539,94 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = {
},
};
-static const struct iio_chan_spec at91_adc_channels[] = {
- AT91_SAMA5D2_CHAN_SINGLE(0, 0x50),
- AT91_SAMA5D2_CHAN_SINGLE(1, 0x54),
- AT91_SAMA5D2_CHAN_SINGLE(2, 0x58),
- AT91_SAMA5D2_CHAN_SINGLE(3, 0x5c),
- AT91_SAMA5D2_CHAN_SINGLE(4, 0x60),
- AT91_SAMA5D2_CHAN_SINGLE(5, 0x64),
- AT91_SAMA5D2_CHAN_SINGLE(6, 0x68),
- AT91_SAMA5D2_CHAN_SINGLE(7, 0x6c),
- AT91_SAMA5D2_CHAN_SINGLE(8, 0x70),
- AT91_SAMA5D2_CHAN_SINGLE(9, 0x74),
- AT91_SAMA5D2_CHAN_SINGLE(10, 0x78),
- AT91_SAMA5D2_CHAN_SINGLE(11, 0x7c),
- AT91_SAMA5D2_CHAN_DIFF(0, 1, 0x50),
- AT91_SAMA5D2_CHAN_DIFF(2, 3, 0x58),
- AT91_SAMA5D2_CHAN_DIFF(4, 5, 0x60),
- AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
- AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
- AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
- IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_TIMESTAMP_CHAN_IDX),
- AT91_SAMA5D2_CHAN_TOUCH(AT91_SAMA5D2_TOUCH_X_CHAN_IDX, "x", IIO_MOD_X),
- AT91_SAMA5D2_CHAN_TOUCH(AT91_SAMA5D2_TOUCH_Y_CHAN_IDX, "y", IIO_MOD_Y),
- AT91_SAMA5D2_CHAN_PRESSURE(AT91_SAMA5D2_TOUCH_P_CHAN_IDX, "pressure"),
+static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
+ AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x50),
+ AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x54),
+ AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x58),
+ AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x5c),
+ AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x60),
+ AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x64),
+ AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x68),
+ AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x6c),
+ AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x70),
+ AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x74),
+ AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x78),
+ AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x7c),
+ /* original ABI has the differential channels with a gap in between */
+ AT91_SAMA5D2_CHAN_DIFF(12, 0, 1, 0x50),
+ AT91_SAMA5D2_CHAN_DIFF(14, 2, 3, 0x58),
+ AT91_SAMA5D2_CHAN_DIFF(16, 4, 5, 0x60),
+ AT91_SAMA5D2_CHAN_DIFF(18, 6, 7, 0x68),
+ AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x70),
+ AT91_SAMA5D2_CHAN_DIFF(22, 10, 11, 0x78),
+ IIO_CHAN_SOFT_TIMESTAMP(23),
+ AT91_SAMA5D2_CHAN_TOUCH(24, "x", IIO_MOD_X),
+ AT91_SAMA5D2_CHAN_TOUCH(25, "y", IIO_MOD_Y),
+ AT91_SAMA5D2_CHAN_PRESSURE(26, "pressure"),
+};
+
+static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
+ AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
+ AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
+ AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
+ AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
+ AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
+ AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
+ AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
+ AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
+ AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
+ AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
+ AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
+ AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
+ AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
+ AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
+ AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
+ AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
+ AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
+ AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
+ AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
+ AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
+ AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
+ AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
+ AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
+ AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
+ IIO_CHAN_SOFT_TIMESTAMP(24),
+};
+
+static const struct at91_adc_platform sama5d2_platform = {
+ .layout = &sama5d2_layout,
+ .adc_channels = &at91_sama5d2_adc_channels,
+#define AT91_SAMA5D2_SINGLE_CHAN_CNT 12
+#define AT91_SAMA5D2_DIFF_CHAN_CNT 6
+ .nr_channels = AT91_SAMA5D2_SINGLE_CHAN_CNT +
+ AT91_SAMA5D2_DIFF_CHAN_CNT,
+#define AT91_SAMA5D2_TOUCH_X_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT * 2)
+ .touch_chan_x = AT91_SAMA5D2_TOUCH_X_CHAN_IDX,
+#define AT91_SAMA5D2_TOUCH_Y_CHAN_IDX (AT91_SAMA5D2_TOUCH_X_CHAN_IDX + 1)
+ .touch_chan_y = AT91_SAMA5D2_TOUCH_Y_CHAN_IDX,
+#define AT91_SAMA5D2_TOUCH_P_CHAN_IDX (AT91_SAMA5D2_TOUCH_Y_CHAN_IDX + 1)
+ .touch_chan_p = AT91_SAMA5D2_TOUCH_P_CHAN_IDX,
+#define AT91_SAMA5D2_MAX_CHAN_IDX AT91_SAMA5D2_TOUCH_P_CHAN_IDX
+ .max_channels = ARRAY_SIZE(at91_sama5d2_adc_channels),
+ .max_index = AT91_SAMA5D2_MAX_CHAN_IDX,
+#define AT91_SAMA5D2_HW_TRIG_CNT 3
+ .hw_trig_cnt = AT91_SAMA5D2_HW_TRIG_CNT,
+};
+
+static const struct at91_adc_platform sama7g5_platform = {
+ .layout = &sama7g5_layout,
+ .adc_channels = &at91_sama7g5_adc_channels,
+#define AT91_SAMA7G5_SINGLE_CHAN_CNT 16
+#define AT91_SAMA7G5_DIFF_CHAN_CNT 8
+ .nr_channels = AT91_SAMA7G5_SINGLE_CHAN_CNT +
+ AT91_SAMA7G5_DIFF_CHAN_CNT,
+#define AT91_SAMA7G5_MAX_CHAN_IDX (AT91_SAMA7G5_SINGLE_CHAN_CNT + \
+ AT91_SAMA7G5_DIFF_CHAN_CNT)
+ .max_channels = ARRAY_SIZE(at91_sama7g5_adc_channels),
+ .max_index = AT91_SAMA7G5_MAX_CHAN_IDX,
+#define AT91_SAMA7G5_HW_TRIG_CNT 3
+ .hw_trig_cnt = AT91_SAMA7G5_HW_TRIG_CNT,
};
static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
@@ -495,6 +660,7 @@ static unsigned int at91_adc_active_scan_mask_to_reg(struct iio_dev *indio_dev)
{
u32 mask = 0;
u8 bit;
+ struct at91_adc_state *st = iio_priv(indio_dev);
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -503,13 +669,66 @@ static unsigned int at91_adc_active_scan_mask_to_reg(struct iio_dev *indio_dev)
mask |= BIT(chan->channel);
}
- return mask & GENMASK(11, 0);
+ return mask & GENMASK(st->soc_info.platform->nr_channels, 0);
+}
+
+static void at91_adc_cor(struct at91_adc_state *st,
+ struct iio_chan_spec const *chan)
+{
+ u32 cor, cur_cor;
+
+ cor = BIT(chan->channel) | BIT(chan->channel2);
+
+ cur_cor = at91_adc_readl(st, COR);
+ cor <<= st->soc_info.platform->layout->COR_diff_offset;
+ if (chan->differential)
+ at91_adc_writel(st, COR, cur_cor | cor);
+ else
+ at91_adc_writel(st, COR, cur_cor & ~cor);
+}
+
+static void at91_adc_irq_status(struct at91_adc_state *st, u32 *status,
+ u32 *eoc)
+{
+ *status = at91_adc_readl(st, ISR);
+ if (st->soc_info.platform->layout->EOC_ISR)
+ *eoc = at91_adc_readl(st, EOC_ISR);
+ else
+ *eoc = *status;
+}
+
+static void at91_adc_irq_mask(struct at91_adc_state *st, u32 *status, u32 *eoc)
+{
+ *status = at91_adc_readl(st, IMR);
+ if (st->soc_info.platform->layout->EOC_IMR)
+ *eoc = at91_adc_readl(st, EOC_IMR);
+ else
+ *eoc = *status;
+}
+
+static void at91_adc_eoc_dis(struct at91_adc_state *st, unsigned int channel)
+{
+ /*
+ * On some products having the EOC bits in a separate register,
+ * errata recommends not writing this register (EOC_IDR).
+ * On products having the EOC bits in the IDR register, it's fine to write it.
+ */
+ if (!st->soc_info.platform->layout->EOC_IDR)
+ at91_adc_writel(st, IDR, BIT(channel));
+}
+
+static void at91_adc_eoc_ena(struct at91_adc_state *st, unsigned int channel)
+{
+ if (!st->soc_info.platform->layout->EOC_IDR)
+ at91_adc_writel(st, IER, BIT(channel));
+ else
+ at91_adc_writel(st, EOC_IER, BIT(channel));
}
static void at91_adc_config_emr(struct at91_adc_state *st)
{
/* configure the extended mode register */
- unsigned int emr = at91_adc_readl(st, AT91_SAMA5D2_EMR);
+ unsigned int emr = at91_adc_readl(st, EMR);
/* select oversampling per single trigger event */
emr |= AT91_SAMA5D2_EMR_ASTE(1);
@@ -533,7 +752,7 @@ static void at91_adc_config_emr(struct at91_adc_state *st)
break;
}
- at91_adc_writel(st, AT91_SAMA5D2_EMR, emr);
+ at91_adc_writel(st, EMR, emr);
}
static int at91_adc_adjust_val_osr(struct at91_adc_state *st, int *val)
@@ -586,9 +805,9 @@ static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
if (!state) {
/* disabling touch IRQs and setting mode to no touch enabled */
- at91_adc_writel(st, AT91_SAMA5D2_IDR,
+ at91_adc_writel(st, IDR,
AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
- at91_adc_writel(st, AT91_SAMA5D2_TSMR, 0);
+ at91_adc_writel(st, TSMR, 0);
return 0;
}
/*
@@ -614,26 +833,26 @@ static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
tsmr |= AT91_SAMA5D2_TSMR_PENDET_ENA;
tsmr |= AT91_SAMA5D2_TSMR_TSFREQ(2) & AT91_SAMA5D2_TSMR_TSFREQ_MASK;
- at91_adc_writel(st, AT91_SAMA5D2_TSMR, tsmr);
+ at91_adc_writel(st, TSMR, tsmr);
- acr = at91_adc_readl(st, AT91_SAMA5D2_ACR);
+ acr = at91_adc_readl(st, ACR);
acr &= ~AT91_SAMA5D2_ACR_PENDETSENS_MASK;
acr |= 0x02 & AT91_SAMA5D2_ACR_PENDETSENS_MASK;
- at91_adc_writel(st, AT91_SAMA5D2_ACR, acr);
+ at91_adc_writel(st, ACR, acr);
/* Sample Period Time = (TRGPER + 1) / ADCClock */
st->touch_st.sample_period_val =
round_up((AT91_SAMA5D2_TOUCH_SAMPLE_PERIOD_US *
clk_khz / 1000) - 1, 1);
/* enable pen detect IRQ */
- at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_PEN);
+ at91_adc_writel(st, IER, AT91_SAMA5D2_IER_PEN);
return 0;
}
static u16 at91_adc_touch_pos(struct at91_adc_state *st, int reg)
{
- u32 val;
+ u32 val = 0;
u32 scale, result, pos;
/*
@@ -642,7 +861,11 @@ static u16 at91_adc_touch_pos(struct at91_adc_state *st, int reg)
* max = 2^AT91_SAMA5D2_MAX_POS_BITS - 1
*/
/* first half of register is the x or y, second half is the scale */
- val = at91_adc_readl(st, reg);
+ if (reg == st->soc_info.platform->layout->XPOSR)
+ val = at91_adc_readl(st, XPOSR);
+ else if (reg == st->soc_info.platform->layout->YPOSR)
+ val = at91_adc_readl(st, YPOSR);
+
if (!val)
dev_dbg(&st->indio_dev->dev, "pos is 0\n");
@@ -660,13 +883,13 @@ static u16 at91_adc_touch_pos(struct at91_adc_state *st, int reg)
static u16 at91_adc_touch_x_pos(struct at91_adc_state *st)
{
- st->touch_st.x_pos = at91_adc_touch_pos(st, AT91_SAMA5D2_XPOSR);
+ st->touch_st.x_pos = at91_adc_touch_pos(st, st->soc_info.platform->layout->XPOSR);
return st->touch_st.x_pos;
}
static u16 at91_adc_touch_y_pos(struct at91_adc_state *st)
{
- return at91_adc_touch_pos(st, AT91_SAMA5D2_YPOSR);
+ return at91_adc_touch_pos(st, st->soc_info.platform->layout->YPOSR);
}
static u16 at91_adc_touch_pressure(struct at91_adc_state *st)
@@ -678,7 +901,7 @@ static u16 at91_adc_touch_pressure(struct at91_adc_state *st)
u32 factor = 1000;
/* calculate the pressure */
- val = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
+ val = at91_adc_readl(st, PRESSR);
z1 = val & AT91_SAMA5D2_XYZ_MASK;
z2 = (val >> 16) & AT91_SAMA5D2_XYZ_MASK;
@@ -702,9 +925,9 @@ static int at91_adc_read_position(struct at91_adc_state *st, int chan, u16 *val)
*val = 0;
if (!st->touch_st.touching)
return -ENODATA;
- if (chan == AT91_SAMA5D2_TOUCH_X_CHAN_IDX)
+ if (chan == st->soc_info.platform->touch_chan_x)
*val = at91_adc_touch_x_pos(st);
- else if (chan == AT91_SAMA5D2_TOUCH_Y_CHAN_IDX)
+ else if (chan == st->soc_info.platform->touch_chan_y)
*val = at91_adc_touch_y_pos(st);
else
return -ENODATA;
@@ -717,7 +940,7 @@ static int at91_adc_read_pressure(struct at91_adc_state *st, int chan, u16 *val)
*val = 0;
if (!st->touch_st.touching)
return -ENODATA;
- if (chan == AT91_SAMA5D2_TOUCH_P_CHAN_IDX)
+ if (chan == st->soc_info.platform->touch_chan_p)
*val = at91_adc_touch_pressure(st);
else
return -ENODATA;
@@ -729,7 +952,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(indio);
- u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR);
+ u32 status = at91_adc_readl(st, TRGR);
/* clear TRGMOD */
status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK;
@@ -738,7 +961,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
status |= st->selected_trig->trgmod_value;
/* set/unset hw trigger */
- at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
+ at91_adc_writel(st, TRGR, status);
return 0;
}
@@ -755,7 +978,7 @@ static void at91_adc_reenable_trigger(struct iio_trigger *trig)
enable_irq(st->irq);
/* Needed to ACK the DRDY interruption */
- at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+ at91_adc_readl(st, LCDR);
}
static const struct iio_trigger_ops at91_adc_trigger_ops = {
@@ -850,7 +1073,7 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
}
/* enable general overrun error signaling */
- at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_GOVRE);
+ at91_adc_writel(st, IER, AT91_SAMA5D2_IER_GOVRE);
/* Issue pending DMA requests */
dma_async_issue_pending(st->dma_st.dma_chan);
@@ -880,7 +1103,7 @@ static bool at91_adc_current_chan_is_touch(struct iio_dev *indio_dev)
return !!bitmap_subset(indio_dev->active_scan_mask,
&st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1);
+ st->soc_info.platform->max_index + 1);
}
static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
@@ -908,8 +1131,6 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
indio_dev->num_channels) {
struct iio_chan_spec const *chan =
at91_adc_chan_get(indio_dev, bit);
- u32 cor;
-
if (!chan)
continue;
/* these channel types cannot be handled by this trigger */
@@ -917,22 +1138,13 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
chan->type == IIO_PRESSURE)
continue;
- cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
-
- if (chan->differential)
- cor |= (BIT(chan->channel) | BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
- else
- cor &= ~(BIT(chan->channel) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET);
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+ at91_adc_cor(st, chan);
- at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ at91_adc_writel(st, CHER, BIT(chan->channel));
}
if (at91_adc_buffer_check_use_irq(indio_dev, st))
- at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_DRDY);
+ at91_adc_writel(st, IER, AT91_SAMA5D2_IER_DRDY);
return 0;
}
@@ -968,17 +1180,17 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
chan->type == IIO_PRESSURE)
continue;
- at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+ at91_adc_writel(st, CHDR, BIT(chan->channel));
if (st->dma_st.dma_chan)
- at91_adc_readl(st, chan->address);
+ at91_adc_read_chan(st, chan->address);
}
if (at91_adc_buffer_check_use_irq(indio_dev, st))
- at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_DRDY);
+ at91_adc_writel(st, IDR, AT91_SAMA5D2_IER_DRDY);
/* read overflow register to clear possible overflow status */
- at91_adc_readl(st, AT91_SAMA5D2_OVER);
+ at91_adc_readl(st, OVER);
/* if we are using DMA we must clear registers and end DMA */
if (st->dma_st.dma_chan)
@@ -1021,13 +1233,15 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
u8 bit;
u32 mask = at91_adc_active_scan_mask_to_reg(indio_dev);
unsigned int timeout = 50;
+ u32 status, imr, eoc = 0, eoc_imr;
/*
* Check if the conversion is ready. If not, wait a little bit, and
* in case of timeout exit with an error.
*/
- while ((at91_adc_readl(st, AT91_SAMA5D2_ISR) & mask) != mask &&
- timeout) {
+ while (((eoc & mask) != mask) && timeout) {
+ at91_adc_irq_status(st, &status, &eoc);
+ at91_adc_irq_mask(st, &imr, &eoc_imr);
usleep_range(50, 100);
timeout--;
}
@@ -1054,7 +1268,7 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
* Thus, emit a warning.
*/
if (chan->type == IIO_VOLTAGE) {
- val = at91_adc_readl(st, chan->address);
+ val = at91_adc_read_chan(st, chan->address);
at91_adc_adjust_val_osr(st, &val);
st->buffer[i] = val;
} else {
@@ -1075,7 +1289,7 @@ static void at91_adc_trigger_handler_dma(struct iio_dev *indio_dev)
s64 interval;
int sample_index = 0, sample_count, sample_size;
- u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
+ u32 status = at91_adc_readl(st, ISR);
/* if we reached this point, we cannot sample faster */
if (status & AT91_SAMA5D2_IER_GOVRE)
pr_info_ratelimited("%s: conversion overrun detected\n",
@@ -1127,7 +1341,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
* actually polling the trigger now.
*/
if (iio_trigger_validate_own_device(indio_dev->trig, indio_dev))
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+ at91_adc_writel(st, CR, AT91_SAMA5D2_CR_START);
if (st->dma_st.dma_chan)
at91_adc_trigger_handler_dma(indio_dev);
@@ -1174,11 +1388,11 @@ static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq)
startup = at91_adc_startup_time(st->soc_info.startup_time,
freq / 1000);
- mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
+ mr = at91_adc_readl(st, MR);
mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
mr |= AT91_SAMA5D2_MR_STARTUP(startup);
mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
- at91_adc_writel(st, AT91_SAMA5D2_MR, mr);
+ at91_adc_writel(st, MR, mr);
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
freq, startup, prescal);
@@ -1198,7 +1412,7 @@ static void at91_adc_touch_data_handler(struct iio_dev *indio_dev)
int i = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1) {
+ st->soc_info.platform->max_index + 1) {
struct iio_chan_spec const *chan =
at91_adc_chan_get(indio_dev, bit);
@@ -1224,12 +1438,11 @@ static void at91_adc_touch_data_handler(struct iio_dev *indio_dev)
static void at91_adc_pen_detect_interrupt(struct at91_adc_state *st)
{
- at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_PEN);
- at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_NOPEN |
+ at91_adc_writel(st, IDR, AT91_SAMA5D2_IER_PEN);
+ at91_adc_writel(st, IER, AT91_SAMA5D2_IER_NOPEN |
AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
AT91_SAMA5D2_IER_PRDY);
- at91_adc_writel(st, AT91_SAMA5D2_TRGR,
- AT91_SAMA5D2_TRGR_TRGMOD_PERIODIC |
+ at91_adc_writel(st, TRGR, AT91_SAMA5D2_TRGR_TRGMOD_PERIODIC |
AT91_SAMA5D2_TRGR_TRGPER(st->touch_st.sample_period_val));
st->touch_st.touching = true;
}
@@ -1238,16 +1451,15 @@ static void at91_adc_no_pen_detect_interrupt(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- at91_adc_writel(st, AT91_SAMA5D2_TRGR,
- AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER);
- at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_NOPEN |
+ at91_adc_writel(st, TRGR, AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER);
+ at91_adc_writel(st, IDR, AT91_SAMA5D2_IER_NOPEN |
AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
AT91_SAMA5D2_IER_PRDY);
st->touch_st.touching = false;
at91_adc_touch_data_handler(indio_dev);
- at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_PEN);
+ at91_adc_writel(st, IER, AT91_SAMA5D2_IER_PEN);
}
static void at91_adc_workq_handler(struct work_struct *workq)
@@ -1265,12 +1477,14 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
{
struct iio_dev *indio = private;
struct at91_adc_state *st = iio_priv(indio);
- u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
- u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
+ u32 status, eoc, imr, eoc_imr;
u32 rdy_mask = AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
AT91_SAMA5D2_IER_PRDY;
- if (!(status & imr))
+ at91_adc_irq_status(st, &status, &eoc);
+ at91_adc_irq_mask(st, &imr, &eoc_imr);
+
+ if (!(status & imr) && !(eoc & eoc_imr))
return IRQ_NONE;
if (status & AT91_SAMA5D2_IER_PEN) {
/* pen detected IRQ */
@@ -1287,9 +1501,9 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
* touching, but the measurements are not ready yet.
* read and ignore.
*/
- status = at91_adc_readl(st, AT91_SAMA5D2_XPOSR);
- status = at91_adc_readl(st, AT91_SAMA5D2_YPOSR);
- status = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
+ status = at91_adc_readl(st, XPOSR);
+ status = at91_adc_readl(st, YPOSR);
+ status = at91_adc_readl(st, PRESSR);
} else if (iio_buffer_enabled(indio) &&
(status & AT91_SAMA5D2_IER_DRDY)) {
/* triggered buffer without DMA */
@@ -1301,7 +1515,7 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
WARN(true, "Unexpected irq occurred\n");
} else if (!iio_buffer_enabled(indio)) {
/* software requested conversion */
- st->conversion_value = at91_adc_readl(st, st->chan->address);
+ st->conversion_value = at91_adc_read_chan(st, st->chan->address);
st->conversion_done = true;
wake_up_interruptible(&st->wq_data_available);
}
@@ -1312,7 +1526,6 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- u32 cor = 0;
u16 tmp_val;
int ret;
@@ -1358,14 +1571,10 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
st->chan = chan;
- if (chan->differential)
- cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
- at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+ at91_adc_cor(st, chan);
+ at91_adc_writel(st, CHER, BIT(chan->channel));
+ at91_adc_eoc_ena(st, chan->channel);
+ at91_adc_writel(st, CR, AT91_SAMA5D2_CR_START);
ret = wait_event_interruptible_timeout(st->wq_data_available,
st->conversion_done,
@@ -1381,11 +1590,11 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
st->conversion_done = false;
}
- at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+ at91_adc_eoc_dis(st, st->chan->channel);
+ at91_adc_writel(st, CHDR, BIT(chan->channel));
/* Needed to ACK the DRDY interruption */
- at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+ at91_adc_readl(st, LCDR);
mutex_unlock(&st->lock);
@@ -1457,14 +1666,15 @@ static void at91_adc_dma_init(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct at91_adc_state *st = iio_priv(indio_dev);
struct dma_slave_config config = {0};
+ /* we have 2 bytes for each channel */
+ unsigned int sample_size = st->soc_info.platform->nr_channels * 2;
/*
* We make the buffer double the size of the fifo,
* such that DMA uses one half of the buffer (full fifo size)
* and the software uses the other half to read/write.
*/
unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
- AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
- PAGE_SIZE);
+ sample_size * 2, PAGE_SIZE);
if (st->dma_st.dma_chan)
return;
@@ -1488,7 +1698,7 @@ static void at91_adc_dma_init(struct platform_device *pdev)
/* Configure DMA channel to read data register */
config.direction = DMA_DEV_TO_MEM;
config.src_addr = (phys_addr_t)(st->dma_st.phys_addr
- + AT91_SAMA5D2_LCDR);
+ + st->soc_info.platform->layout->LCDR);
config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
config.src_maxburst = 1;
config.dst_maxburst = 1;
@@ -1517,9 +1727,10 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct at91_adc_state *st = iio_priv(indio_dev);
+ /* we have 2 bytes for each channel */
+ unsigned int sample_size = st->soc_info.platform->nr_channels * 2;
unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
- AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
- PAGE_SIZE);
+ sample_size * 2, PAGE_SIZE);
/* if we are not using DMA, just return */
if (!st->dma_st.dma_chan)
@@ -1580,14 +1791,14 @@ static int at91_adc_update_scan_mode(struct iio_dev *indio_dev,
struct at91_adc_state *st = iio_priv(indio_dev);
if (bitmap_subset(scan_mask, &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1))
+ st->soc_info.platform->max_index + 1))
return 0;
/*
* if the new bitmap is a combination of touchscreen and regular
* channels, then we are not fine
*/
if (bitmap_intersects(&st->touch_st.channels_bitmask, scan_mask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1))
+ st->soc_info.platform->max_index + 1))
return -EINVAL;
return 0;
}
@@ -1596,13 +1807,15 @@ static void at91_adc_hw_init(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
- at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+ at91_adc_writel(st, CR, AT91_SAMA5D2_CR_SWRST);
+ if (st->soc_info.platform->layout->EOC_IDR)
+ at91_adc_writel(st, EOC_IDR, 0xffffffff);
+ at91_adc_writel(st, IDR, 0xffffffff);
/*
* Transfer field must be set to 2 according to the datasheet and
* allows different analog settings for each channel.
*/
- at91_adc_writel(st, AT91_SAMA5D2_MR,
+ at91_adc_writel(st, MR,
AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
at91_adc_setup_samp_freq(indio_dev, st->soc_info.min_sample_rate);
@@ -1681,8 +1894,8 @@ static int at91_adc_buffer_and_trigger_init(struct device *dev,
fifo_attrs = NULL;
ret = devm_iio_triggered_buffer_setup_ext(&indio->dev, indio,
- &iio_pollfunc_store_time,
- &at91_adc_trigger_handler, &at91_buffer_setup_ops, fifo_attrs);
+ &iio_pollfunc_store_time, &at91_adc_trigger_handler,
+ IIO_BUFFER_DIRECTION_IN, &at91_buffer_setup_ops, fifo_attrs);
if (ret < 0) {
dev_err(dev, "couldn't initialize the buffer.\n");
return ret;
@@ -1718,21 +1931,23 @@ static int at91_adc_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
+ st = iio_priv(indio_dev);
+ st->indio_dev = indio_dev;
+
+ st->soc_info.platform = of_device_get_match_data(&pdev->dev);
+
indio_dev->name = dev_name(&pdev->dev);
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->info = &at91_adc_info;
- indio_dev->channels = at91_adc_channels;
- indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
-
- st = iio_priv(indio_dev);
- st->indio_dev = indio_dev;
+ indio_dev->channels = *st->soc_info.platform->adc_channels;
+ indio_dev->num_channels = st->soc_info.platform->max_channels;
bitmap_set(&st->touch_st.channels_bitmask,
- AT91_SAMA5D2_TOUCH_X_CHAN_IDX, 1);
+ st->soc_info.platform->touch_chan_x, 1);
bitmap_set(&st->touch_st.channels_bitmask,
- AT91_SAMA5D2_TOUCH_Y_CHAN_IDX, 1);
+ st->soc_info.platform->touch_chan_y, 1);
bitmap_set(&st->touch_st.channels_bitmask,
- AT91_SAMA5D2_TOUCH_P_CHAN_IDX, 1);
+ st->soc_info.platform->touch_chan_p, 1);
st->oversampling_ratio = AT91_OSR_1SAMPLES;
@@ -1772,7 +1987,7 @@ static int at91_adc_probe(struct platform_device *pdev)
st->selected_trig = NULL;
/* find the right trigger, or no trigger at all */
- for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
+ for (i = 0; i < st->soc_info.platform->hw_trig_cnt + 1; i++)
if (at91_adc_trigger_list[i].edge_type == edge_type) {
st->selected_trig = &at91_adc_trigger_list[i];
break;
@@ -1833,12 +2048,12 @@ static int at91_adc_probe(struct platform_device *pdev)
goto vref_disable;
}
- at91_adc_hw_init(indio_dev);
-
ret = clk_prepare_enable(st->per_clk);
if (ret)
goto vref_disable;
+ at91_adc_hw_init(indio_dev);
+
platform_set_drvdata(pdev, indio_dev);
ret = at91_adc_buffer_and_trigger_init(&pdev->dev, indio_dev);
@@ -1857,7 +2072,7 @@ static int at91_adc_probe(struct platform_device *pdev)
st->selected_trig->name);
dev_info(&pdev->dev, "version: %x\n",
- readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
+ readl_relaxed(st->base + st->soc_info.platform->layout->VERSION));
return 0;
@@ -1900,7 +2115,7 @@ static __maybe_unused int at91_adc_suspend(struct device *dev)
* and can be used by for other devices.
* Otherwise, ADC will hog them and we can't go to suspend mode.
*/
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
+ at91_adc_writel(st, CR, AT91_SAMA5D2_CR_SWRST);
clk_disable_unprepare(st->per_clk);
regulator_disable(st->vref);
@@ -1960,6 +2175,10 @@ static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
static const struct of_device_id at91_adc_dt_match[] = {
{
.compatible = "atmel,sama5d2-adc",
+ .data = (const void *)&sama5d2_platform,
+ }, {
+ .compatible = "microchip,sama7g5-adc",
+ .data = (const void *)&sama7g5_platform,
}, {
/* sentinel */
}
@@ -1977,6 +2196,7 @@ static struct platform_driver at91_adc_driver = {
};
module_platform_driver(at91_adc_driver)
-MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@microchip.com>");
+MODULE_AUTHOR("Eugen Hristev <eugen.hristev@microchip.com");
MODULE_DESCRIPTION("Atmel AT91 SAMA5D2 ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 5f5e8b39e4d2..a4b8be5b8f88 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -259,7 +259,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0)
return info->irq;
- platform_set_drvdata(pdev, indio_dev);
+
info->regmap = axp20x->regmap;
/*
* Set ADC to enabled state at all time, including system suspend.
@@ -276,31 +276,12 @@ static int axp288_adc_probe(struct platform_device *pdev)
indio_dev->num_channels = ARRAY_SIZE(axp288_adc_channels);
indio_dev->info = &axp288_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_map_array_register(indio_dev, axp288_adc_default_maps);
+
+ ret = devm_iio_map_array_register(&pdev->dev, indio_dev, axp288_adc_default_maps);
if (ret < 0)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to register iio device\n");
- goto err_array_unregister;
- }
- return 0;
-
-err_array_unregister:
- iio_map_array_unregister(indio_dev);
-
- return ret;
-}
-
-static int axp288_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_device_unregister(indio_dev);
- iio_map_array_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&pdev->dev, indio_dev);
}
static const struct platform_device_id axp288_adc_id_table[] = {
@@ -310,7 +291,6 @@ static const struct platform_device_id axp288_adc_id_table[] = {
static struct platform_driver axp288_adc_driver = {
.probe = axp288_adc_probe,
- .remove = axp288_adc_remove,
.id_table = axp288_adc_id_table,
.driver = {
.name = "axp288_adc",
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 8b04b95b7b7a..03987d7e6b3d 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -280,6 +280,13 @@ static const struct iio_info berlin2_adc_info = {
.read_raw = berlin2_adc_read_raw,
};
+static void berlin2_adc_powerdown(void *regmap)
+{
+ regmap_update_bits(regmap, BERLIN2_SM_CTRL,
+ BERLIN2_SM_CTRL_ADC_POWER, 0);
+
+}
+
static int berlin2_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -293,7 +300,6 @@ static int berlin2_adc_probe(struct platform_device *pdev)
return -ENOMEM;
priv = iio_priv(indio_dev);
- platform_set_drvdata(pdev, indio_dev);
priv->regmap = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
@@ -333,29 +339,12 @@ static int berlin2_adc_probe(struct platform_device *pdev)
BERLIN2_SM_CTRL_ADC_POWER,
BERLIN2_SM_CTRL_ADC_POWER);
- ret = iio_device_register(indio_dev);
- if (ret) {
- /* Power down the ADC */
- regmap_update_bits(priv->regmap, BERLIN2_SM_CTRL,
- BERLIN2_SM_CTRL_ADC_POWER, 0);
+ ret = devm_add_action_or_reset(&pdev->dev, berlin2_adc_powerdown,
+ priv->regmap);
+ if (ret)
return ret;
- }
-
- return 0;
-}
-
-static int berlin2_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct berlin2_adc_priv *priv = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- /* Power down the ADC */
- regmap_update_bits(priv->regmap, BERLIN2_SM_CTRL,
- BERLIN2_SM_CTRL_ADC_POWER, 0);
- return 0;
+ return devm_iio_device_register(&pdev->dev, indio_dev);
}
static const struct of_device_id berlin2_adc_match[] = {
@@ -370,7 +359,6 @@ static struct platform_driver berlin2_adc_driver = {
.of_match_table = berlin2_adc_match,
},
.probe = berlin2_adc_probe,
- .remove = berlin2_adc_remove,
};
module_platform_driver(berlin2_adc_driver);
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index 7a7a54a7ed76..8f0d3fb63b67 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -330,7 +330,6 @@ static int da9150_gpadc_probe(struct platform_device *pdev)
}
gpadc = iio_priv(indio_dev);
- platform_set_drvdata(pdev, indio_dev);
gpadc->da9150 = da9150;
gpadc->dev = dev;
mutex_init(&gpadc->lock);
@@ -347,7 +346,7 @@ static int da9150_gpadc_probe(struct platform_device *pdev)
return ret;
}
- ret = iio_map_array_register(indio_dev, da9150_gpadc_default_maps);
+ ret = devm_iio_map_array_register(&pdev->dev, indio_dev, da9150_gpadc_default_maps);
if (ret) {
dev_err(dev, "Failed to register IIO maps: %d\n", ret);
return ret;
@@ -359,28 +358,7 @@ static int da9150_gpadc_probe(struct platform_device *pdev)
indio_dev->channels = da9150_gpadc_channels;
indio_dev->num_channels = ARRAY_SIZE(da9150_gpadc_channels);
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(dev, "Failed to register IIO device: %d\n", ret);
- goto iio_map_unreg;
- }
-
- return 0;
-
-iio_map_unreg:
- iio_map_array_unregister(indio_dev);
-
- return ret;
-}
-
-static int da9150_gpadc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_device_unregister(indio_dev);
- iio_map_array_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&pdev->dev, indio_dev);
}
static struct platform_driver da9150_gpadc_driver = {
@@ -388,7 +366,6 @@ static struct platform_driver da9150_gpadc_driver = {
.name = "da9150-gpadc",
},
.probe = da9150_gpadc_probe,
- .remove = da9150_gpadc_remove,
};
module_platform_driver(da9150_gpadc_driver);
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index 8edd6407b7c3..fd5a9404c8dc 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -156,15 +156,13 @@ static int ep93xx_adc_probe(struct platform_device *pdev)
struct iio_dev *iiodev;
struct ep93xx_adc_priv *priv;
struct clk *pclk;
- struct resource *res;
iiodev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
if (!iiodev)
return -ENOMEM;
priv = iio_priv(iiodev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index 329c555b55cc..551e83ae573c 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -172,13 +172,35 @@ static const struct regmap_config mx25_gcq_regconfig = {
.reg_stride = 4,
};
+static int mx25_gcq_ext_regulator_setup(struct device *dev,
+ struct mx25_gcq_priv *priv, u32 refp)
+{
+ char reg_name[12];
+ int ret;
+
+ if (priv->vref[refp])
+ return 0;
+
+ ret = snprintf(reg_name, sizeof(reg_name), "vref-%s",
+ mx25_gcq_refp_names[refp]);
+ if (ret < 0)
+ return ret;
+
+ priv->vref[refp] = devm_regulator_get_optional(dev, reg_name);
+ if (IS_ERR(priv->vref[refp]))
+ return dev_err_probe(dev, PTR_ERR(priv->vref[refp]),
+ "Error, trying to use external voltage reference without a %s regulator.",
+ reg_name);
+
+ return 0;
+}
+
static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
struct mx25_gcq_priv *priv)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
struct device *dev = &pdev->dev;
- unsigned int refp_used[4] = {};
int ret, i;
/*
@@ -194,19 +216,6 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
MX25_ADCQ_CFG_IN(i) |
MX25_ADCQ_CFG_REFN_NGND2);
- /*
- * First get all regulators to store them in channel_vref_mv if
- * necessary. Later we use that information for proper IIO scale
- * information.
- */
- priv->vref[MX25_ADC_REFP_INT] = NULL;
- priv->vref[MX25_ADC_REFP_EXT] =
- devm_regulator_get_optional(dev, "vref-ext");
- priv->vref[MX25_ADC_REFP_XP] =
- devm_regulator_get_optional(dev, "vref-xp");
- priv->vref[MX25_ADC_REFP_YP] =
- devm_regulator_get_optional(dev, "vref-yp");
-
for_each_child_of_node(np, child) {
u32 reg;
u32 refp = MX25_ADCQ_CFG_REFP_INT;
@@ -233,11 +242,10 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
case MX25_ADC_REFP_EXT:
case MX25_ADC_REFP_XP:
case MX25_ADC_REFP_YP:
- if (IS_ERR(priv->vref[refp])) {
- dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.",
- mx25_gcq_refp_names[refp]);
+ ret = mx25_gcq_ext_regulator_setup(&pdev->dev, priv, refp);
+ if (ret) {
of_node_put(child);
- return PTR_ERR(priv->vref[refp]);
+ return ret;
}
priv->channel_vref_mv[reg] =
regulator_get_voltage(priv->vref[refp]);
@@ -253,8 +261,6 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
return -EINVAL;
}
- ++refp_used[refp];
-
/*
* Shift the read values to the correct positions within the
* register.
@@ -285,15 +291,6 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
regmap_write(priv->regs, MX25_ADCQ_CR,
MX25_ADCQ_CR_PDMSK | MX25_ADCQ_CR_QSM_FQS);
- /* Remove unused regulators */
- for (i = 0; i != 4; ++i) {
- if (!refp_used[i]) {
- if (!IS_ERR_OR_NULL(priv->vref[i]))
- devm_regulator_put(priv->vref[i]);
- priv->vref[i] = NULL;
- }
- }
-
return 0;
}
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 4969a5f941e3..092f8d296527 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -493,22 +493,16 @@ static int imx7d_adc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return irq;
+ return dev_err_probe(dev, irq, "Failed getting irq\n");
info->clk = devm_clk_get(dev, "adc");
- if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- dev_err(dev, "Failed getting clock, err = %d\n", ret);
- return ret;
- }
+ if (IS_ERR(info->clk))
+ return dev_err_probe(dev, PTR_ERR(info->clk), "Failed getting clock\n");
info->vref = devm_regulator_get(dev, "vref");
- if (IS_ERR(info->vref)) {
- ret = PTR_ERR(info->vref);
- dev_err(dev,
- "Failed getting reference voltage, err = %d\n", ret);
- return ret;
- }
+ if (IS_ERR(info->vref))
+ return dev_err_probe(dev, PTR_ERR(info->vref),
+ "Failed getting reference voltage\n");
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
new file mode 100644
index 000000000000..901dd8e1b32f
--- /dev/null
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NXP i.MX8QXP ADC driver
+ *
+ * Based on the work of Haibo Chen <haibo.chen@nxp.com>
+ * The initial developer of the original code is Haibo Chen.
+ * Portions created by Haibo Chen are Copyright (C) 2018 NXP.
+ * All Rights Reserved.
+ *
+ * Copyright (C) 2018 NXP
+ * Copyright (C) 2021 Cai Huoqing
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+
+#define ADC_DRIVER_NAME "imx8qxp-adc"
+
+/* Register map definition */
+#define IMX8QXP_ADR_ADC_CTRL 0x10
+#define IMX8QXP_ADR_ADC_STAT 0x14
+#define IMX8QXP_ADR_ADC_IE 0x18
+#define IMX8QXP_ADR_ADC_DE 0x1c
+#define IMX8QXP_ADR_ADC_CFG 0x20
+#define IMX8QXP_ADR_ADC_FCTRL 0x30
+#define IMX8QXP_ADR_ADC_SWTRIG 0x34
+#define IMX8QXP_ADR_ADC_TCTRL(tid) (0xc0 + (tid) * 4)
+#define IMX8QXP_ADR_ADC_CMDH(cid) (0x100 + (cid) * 8)
+#define IMX8QXP_ADR_ADC_CMDL(cid) (0x104 + (cid) * 8)
+#define IMX8QXP_ADR_ADC_RESFIFO 0x300
+#define IMX8QXP_ADR_ADC_TST 0xffc
+
+/* ADC bit shift */
+#define IMX8QXP_ADC_IE_FWMIE_MASK GENMASK(1, 0)
+#define IMX8QXP_ADC_CTRL_FIFO_RESET_MASK BIT(8)
+#define IMX8QXP_ADC_CTRL_SOFTWARE_RESET_MASK BIT(1)
+#define IMX8QXP_ADC_CTRL_ADC_EN_MASK BIT(0)
+#define IMX8QXP_ADC_TCTRL_TCMD_MASK GENMASK(31, 24)
+#define IMX8QXP_ADC_TCTRL_TDLY_MASK GENMASK(23, 16)
+#define IMX8QXP_ADC_TCTRL_TPRI_MASK GENMASK(15, 8)
+#define IMX8QXP_ADC_TCTRL_HTEN_MASK GENMASK(7, 0)
+#define IMX8QXP_ADC_CMDL_CSCALE_MASK GENMASK(13, 8)
+#define IMX8QXP_ADC_CMDL_MODE_MASK BIT(7)
+#define IMX8QXP_ADC_CMDL_DIFF_MASK BIT(6)
+#define IMX8QXP_ADC_CMDL_ABSEL_MASK BIT(5)
+#define IMX8QXP_ADC_CMDL_ADCH_MASK GENMASK(2, 0)
+#define IMX8QXP_ADC_CMDH_NEXT_MASK GENMASK(31, 24)
+#define IMX8QXP_ADC_CMDH_LOOP_MASK GENMASK(23, 16)
+#define IMX8QXP_ADC_CMDH_AVGS_MASK GENMASK(15, 12)
+#define IMX8QXP_ADC_CMDH_STS_MASK BIT(8)
+#define IMX8QXP_ADC_CMDH_LWI_MASK GENMASK(7, 7)
+#define IMX8QXP_ADC_CMDH_CMPEN_MASK GENMASK(0, 0)
+#define IMX8QXP_ADC_CFG_PWREN_MASK BIT(28)
+#define IMX8QXP_ADC_CFG_PUDLY_MASK GENMASK(23, 16)
+#define IMX8QXP_ADC_CFG_REFSEL_MASK GENMASK(7, 6)
+#define IMX8QXP_ADC_CFG_PWRSEL_MASK GENMASK(5, 4)
+#define IMX8QXP_ADC_CFG_TPRICTRL_MASK GENMASK(3, 0)
+#define IMX8QXP_ADC_FCTRL_FWMARK_MASK GENMASK(20, 16)
+#define IMX8QXP_ADC_FCTRL_FCOUNT_MASK GENMASK(4, 0)
+#define IMX8QXP_ADC_RESFIFO_VAL_MASK GENMASK(18, 3)
+
+/* ADC PARAMETER*/
+#define IMX8QXP_ADC_CMDL_CHANNEL_SCALE_FULL GENMASK(5, 0)
+#define IMX8QXP_ADC_CMDL_SEL_A_A_B_CHANNEL 0
+#define IMX8QXP_ADC_CMDL_STANDARD_RESOLUTION 0
+#define IMX8QXP_ADC_CMDL_MODE_SINGLE 0
+#define IMX8QXP_ADC_CMDH_LWI_INCREMENT_DIS 0
+#define IMX8QXP_ADC_CMDH_CMPEN_DIS 0
+#define IMX8QXP_ADC_PAUSE_EN BIT(31)
+#define IMX8QXP_ADC_TCTRL_TPRI_PRIORITY_HIGH 0
+
+#define IMX8QXP_ADC_TCTRL_HTEN_HW_TIRG_DIS 0
+
+#define IMX8QXP_ADC_TIMEOUT msecs_to_jiffies(100)
+
+struct imx8qxp_adc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk *ipg_clk;
+ struct regulator *vref;
+ /* Serialise ADC channel reads */
+ struct mutex lock;
+ struct completion completion;
+};
+
+#define IMX8QXP_ADC_CHAN(_idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+static const struct iio_chan_spec imx8qxp_adc_iio_channels[] = {
+ IMX8QXP_ADC_CHAN(0),
+ IMX8QXP_ADC_CHAN(1),
+ IMX8QXP_ADC_CHAN(2),
+ IMX8QXP_ADC_CHAN(3),
+ IMX8QXP_ADC_CHAN(4),
+ IMX8QXP_ADC_CHAN(5),
+ IMX8QXP_ADC_CHAN(6),
+ IMX8QXP_ADC_CHAN(7),
+};
+
+static void imx8qxp_adc_reset(struct imx8qxp_adc *adc)
+{
+ u32 ctrl;
+
+ /*software reset, need to clear the set bit*/
+ ctrl = readl(adc->regs + IMX8QXP_ADR_ADC_CTRL);
+ ctrl |= FIELD_PREP(IMX8QXP_ADC_CTRL_SOFTWARE_RESET_MASK, 1);
+ writel(ctrl, adc->regs + IMX8QXP_ADR_ADC_CTRL);
+ udelay(10);
+ ctrl &= ~FIELD_PREP(IMX8QXP_ADC_CTRL_SOFTWARE_RESET_MASK, 1);
+ writel(ctrl, adc->regs + IMX8QXP_ADR_ADC_CTRL);
+
+ /* reset the fifo */
+ ctrl |= FIELD_PREP(IMX8QXP_ADC_CTRL_FIFO_RESET_MASK, 1);
+ writel(ctrl, adc->regs + IMX8QXP_ADR_ADC_CTRL);
+}
+
+static void imx8qxp_adc_reg_config(struct imx8qxp_adc *adc, int channel)
+{
+ u32 adc_cfg, adc_tctrl, adc_cmdl, adc_cmdh;
+
+ /* ADC configuration */
+ adc_cfg = FIELD_PREP(IMX8QXP_ADC_CFG_PWREN_MASK, 1) |
+ FIELD_PREP(IMX8QXP_ADC_CFG_PUDLY_MASK, 0x80)|
+ FIELD_PREP(IMX8QXP_ADC_CFG_REFSEL_MASK, 0) |
+ FIELD_PREP(IMX8QXP_ADC_CFG_PWRSEL_MASK, 3) |
+ FIELD_PREP(IMX8QXP_ADC_CFG_TPRICTRL_MASK, 0);
+ writel(adc_cfg, adc->regs + IMX8QXP_ADR_ADC_CFG);
+
+ /* config the trigger control */
+ adc_tctrl = FIELD_PREP(IMX8QXP_ADC_TCTRL_TCMD_MASK, 1) |
+ FIELD_PREP(IMX8QXP_ADC_TCTRL_TDLY_MASK, 0) |
+ FIELD_PREP(IMX8QXP_ADC_TCTRL_TPRI_MASK, IMX8QXP_ADC_TCTRL_TPRI_PRIORITY_HIGH) |
+ FIELD_PREP(IMX8QXP_ADC_TCTRL_HTEN_MASK, IMX8QXP_ADC_TCTRL_HTEN_HW_TIRG_DIS);
+ writel(adc_tctrl, adc->regs + IMX8QXP_ADR_ADC_TCTRL(0));
+
+ /* config the cmd */
+ adc_cmdl = FIELD_PREP(IMX8QXP_ADC_CMDL_CSCALE_MASK, IMX8QXP_ADC_CMDL_CHANNEL_SCALE_FULL) |
+ FIELD_PREP(IMX8QXP_ADC_CMDL_MODE_MASK, IMX8QXP_ADC_CMDL_STANDARD_RESOLUTION) |
+ FIELD_PREP(IMX8QXP_ADC_CMDL_DIFF_MASK, IMX8QXP_ADC_CMDL_MODE_SINGLE) |
+ FIELD_PREP(IMX8QXP_ADC_CMDL_ABSEL_MASK, IMX8QXP_ADC_CMDL_SEL_A_A_B_CHANNEL) |
+ FIELD_PREP(IMX8QXP_ADC_CMDL_ADCH_MASK, channel);
+ writel(adc_cmdl, adc->regs + IMX8QXP_ADR_ADC_CMDL(0));
+
+ adc_cmdh = FIELD_PREP(IMX8QXP_ADC_CMDH_NEXT_MASK, 0) |
+ FIELD_PREP(IMX8QXP_ADC_CMDH_LOOP_MASK, 0) |
+ FIELD_PREP(IMX8QXP_ADC_CMDH_AVGS_MASK, 7) |
+ FIELD_PREP(IMX8QXP_ADC_CMDH_STS_MASK, 0) |
+ FIELD_PREP(IMX8QXP_ADC_CMDH_LWI_MASK, IMX8QXP_ADC_CMDH_LWI_INCREMENT_DIS) |
+ FIELD_PREP(IMX8QXP_ADC_CMDH_CMPEN_MASK, IMX8QXP_ADC_CMDH_CMPEN_DIS);
+ writel(adc_cmdh, adc->regs + IMX8QXP_ADR_ADC_CMDH(0));
+}
+
+static void imx8qxp_adc_fifo_config(struct imx8qxp_adc *adc)
+{
+ u32 fifo_ctrl, interrupt_en;
+
+ fifo_ctrl = readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL);
+ fifo_ctrl &= ~IMX8QXP_ADC_FCTRL_FWMARK_MASK;
+ /* set the watermark level to 1 */
+ fifo_ctrl |= FIELD_PREP(IMX8QXP_ADC_FCTRL_FWMARK_MASK, 0);
+ writel(fifo_ctrl, adc->regs + IMX8QXP_ADR_ADC_FCTRL);
+
+ /* FIFO Watermark Interrupt Enable */
+ interrupt_en = readl(adc->regs + IMX8QXP_ADR_ADC_IE);
+ interrupt_en |= FIELD_PREP(IMX8QXP_ADC_IE_FWMIE_MASK, 1);
+ writel(interrupt_en, adc->regs + IMX8QXP_ADR_ADC_IE);
+}
+
+static void imx8qxp_adc_disable(struct imx8qxp_adc *adc)
+{
+ u32 ctrl;
+
+ ctrl = readl(adc->regs + IMX8QXP_ADR_ADC_CTRL);
+ ctrl &= ~FIELD_PREP(IMX8QXP_ADC_CTRL_ADC_EN_MASK, 1);
+ writel(ctrl, adc->regs + IMX8QXP_ADR_ADC_CTRL);
+}
+
+static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct imx8qxp_adc *adc = iio_priv(indio_dev);
+ struct device *dev = adc->dev;
+
+ u32 ctrl, vref_uv;
+ long ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&adc->lock);
+ reinit_completion(&adc->completion);
+
+ imx8qxp_adc_reg_config(adc, chan->channel);
+
+ imx8qxp_adc_fifo_config(adc);
+
+ /* adc enable */
+ ctrl = readl(adc->regs + IMX8QXP_ADR_ADC_CTRL);
+ ctrl |= FIELD_PREP(IMX8QXP_ADC_CTRL_ADC_EN_MASK, 1);
+ writel(ctrl, adc->regs + IMX8QXP_ADR_ADC_CTRL);
+ /* adc start */
+ writel(1, adc->regs + IMX8QXP_ADR_ADC_SWTRIG);
+
+ ret = wait_for_completion_interruptible_timeout(&adc->completion,
+ IMX8QXP_ADC_TIMEOUT);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
+
+ if (ret == 0) {
+ mutex_unlock(&adc->lock);
+ return -ETIMEDOUT;
+ }
+ if (ret < 0) {
+ mutex_unlock(&adc->lock);
+ return ret;
+ }
+
+ *val = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
+ readl(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+
+ mutex_unlock(&adc->lock);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ vref_uv = regulator_get_voltage(adc->vref);
+ *val = vref_uv / 1000;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(adc->clk) / 3;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t imx8qxp_adc_isr(int irq, void *dev_id)
+{
+ struct imx8qxp_adc *adc = dev_id;
+ u32 fifo_count;
+
+ fifo_count = FIELD_GET(IMX8QXP_ADC_FCTRL_FCOUNT_MASK,
+ readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL));
+
+ if (fifo_count)
+ complete(&adc->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int imx8qxp_adc_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct imx8qxp_adc *adc = iio_priv(indio_dev);
+ struct device *dev = adc->dev;
+
+ if (!readval || reg % 4 || reg > IMX8QXP_ADR_ADC_TST)
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev);
+
+ *readval = readl(adc->regs + reg);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
+
+ return 0;
+}
+
+static const struct iio_info imx8qxp_adc_iio_info = {
+ .read_raw = &imx8qxp_adc_read_raw,
+ .debugfs_reg_access = &imx8qxp_adc_reg_access,
+};
+
+static int imx8qxp_adc_probe(struct platform_device *pdev)
+{
+ struct imx8qxp_adc *adc;
+ struct iio_dev *indio_dev;
+ struct device *dev = &pdev->dev;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev) {
+ dev_err(dev, "Failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(indio_dev);
+ adc->dev = dev;
+
+ mutex_init(&adc->lock);
+ adc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adc->regs))
+ return PTR_ERR(adc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ adc->clk = devm_clk_get(dev, "per");
+ if (IS_ERR(adc->clk))
+ return dev_err_probe(dev, PTR_ERR(adc->clk), "Failed getting clock\n");
+
+ adc->ipg_clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(adc->ipg_clk))
+ return dev_err_probe(dev, PTR_ERR(adc->ipg_clk), "Failed getting clock\n");
+
+ adc->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(adc->vref))
+ return dev_err_probe(dev, PTR_ERR(adc->vref), "Failed getting reference voltage\n");
+
+ ret = regulator_enable(adc->vref);
+ if (ret) {
+ dev_err(dev, "Can't enable adc reference top voltage\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ init_completion(&adc->completion);
+
+ indio_dev->name = ADC_DRIVER_NAME;
+ indio_dev->info = &imx8qxp_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = imx8qxp_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(imx8qxp_adc_iio_channels);
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not prepare or enable the clock.\n");
+ goto error_regulator_disable;
+ }
+
+ ret = clk_prepare_enable(adc->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not prepare or enable the clock.\n");
+ goto error_adc_clk_disable;
+ }
+
+ ret = devm_request_irq(dev, irq, imx8qxp_adc_isr, 0, ADC_DRIVER_NAME, adc);
+ if (ret < 0) {
+ dev_err(dev, "Failed requesting irq, irq = %d\n", irq);
+ goto error_ipg_clk_disable;
+ }
+
+ imx8qxp_adc_reset(adc);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ imx8qxp_adc_disable(adc);
+ dev_err(dev, "Couldn't register the device.\n");
+ goto error_ipg_clk_disable;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+error_ipg_clk_disable:
+ clk_disable_unprepare(adc->ipg_clk);
+error_adc_clk_disable:
+ clk_disable_unprepare(adc->clk);
+error_regulator_disable:
+ regulator_disable(adc->vref);
+
+ return ret;
+}
+
+static int imx8qxp_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct imx8qxp_adc *adc = iio_priv(indio_dev);
+ struct device *dev = adc->dev;
+
+ pm_runtime_get_sync(dev);
+
+ iio_device_unregister(indio_dev);
+
+ imx8qxp_adc_disable(adc);
+
+ clk_disable_unprepare(adc->clk);
+ clk_disable_unprepare(adc->ipg_clk);
+ regulator_disable(adc->vref);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
+ return 0;
+}
+
+static __maybe_unused int imx8qxp_adc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct imx8qxp_adc *adc = iio_priv(indio_dev);
+
+ imx8qxp_adc_disable(adc);
+
+ clk_disable_unprepare(adc->clk);
+ clk_disable_unprepare(adc->ipg_clk);
+ regulator_disable(adc->vref);
+
+ return 0;
+}
+
+static __maybe_unused int imx8qxp_adc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct imx8qxp_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(adc->vref);
+ if (ret) {
+ dev_err(dev, "Can't enable adc reference top voltage, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret) {
+ dev_err(dev, "Could not prepare or enable clock.\n");
+ goto err_disable_reg;
+ }
+
+ ret = clk_prepare_enable(adc->ipg_clk);
+ if (ret) {
+ dev_err(dev, "Could not prepare or enable clock.\n");
+ goto err_unprepare_clk;
+ }
+
+ imx8qxp_adc_reset(adc);
+
+ return 0;
+
+err_unprepare_clk:
+ clk_disable_unprepare(adc->clk);
+
+err_disable_reg:
+ regulator_disable(adc->vref);
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx8qxp_adc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(imx8qxp_adc_runtime_suspend, imx8qxp_adc_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx8qxp_adc_match[] = {
+ { .compatible = "nxp,imx8qxp-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8qxp_adc_match);
+
+static struct platform_driver imx8qxp_adc_driver = {
+ .probe = imx8qxp_adc_probe,
+ .remove = imx8qxp_adc_remove,
+ .driver = {
+ .name = ADC_DRIVER_NAME,
+ .of_match_table = imx8qxp_adc_match,
+ .pm = &imx8qxp_adc_pm_ops,
+ },
+};
+
+module_platform_driver(imx8qxp_adc_driver);
+
+MODULE_DESCRIPTION("i.MX8QuadXPlus ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index 75394350eb4c..616de0c3a049 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -205,8 +205,6 @@ static int mrfld_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- platform_set_drvdata(pdev, indio_dev);
-
indio_dev->name = pdev->name;
indio_dev->channels = mrfld_adc_channels;
@@ -214,28 +212,11 @@ static int mrfld_adc_probe(struct platform_device *pdev)
indio_dev->info = &mrfld_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_map_array_register(indio_dev, iio_maps);
+ ret = devm_iio_map_array_register(dev, indio_dev, iio_maps);
if (ret)
return ret;
- ret = devm_iio_device_register(dev, indio_dev);
- if (ret < 0)
- goto err_array_unregister;
-
- return 0;
-
-err_array_unregister:
- iio_map_array_unregister(indio_dev);
- return ret;
-}
-
-static int mrfld_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_map_array_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(dev, indio_dev);
}
static const struct platform_device_id mrfld_adc_id_table[] = {
@@ -249,7 +230,6 @@ static struct platform_driver mrfld_adc_driver = {
.name = "mrfld_bcove_adc",
},
.probe = mrfld_adc_probe,
- .remove = mrfld_adc_remove,
.id_table = mrfld_adc_id_table,
};
module_platform_driver(mrfld_adc_driver);
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index 8fb57e375529..6d9b354bc705 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -163,7 +163,8 @@ static struct iio_map lp8788_default_iio_maps[] = {
{ }
};
-static int lp8788_iio_map_register(struct iio_dev *indio_dev,
+static int lp8788_iio_map_register(struct device *dev,
+ struct iio_dev *indio_dev,
struct lp8788_platform_data *pdata,
struct lp8788_adc *adc)
{
@@ -173,7 +174,7 @@ static int lp8788_iio_map_register(struct iio_dev *indio_dev,
map = (!pdata || !pdata->adc_pdata) ?
lp8788_default_iio_maps : pdata->adc_pdata;
- ret = iio_map_array_register(indio_dev, map);
+ ret = devm_iio_map_array_register(dev, indio_dev, map);
if (ret) {
dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
return ret;
@@ -196,9 +197,8 @@ static int lp8788_adc_probe(struct platform_device *pdev)
adc = iio_priv(indio_dev);
adc->lp = lp;
- platform_set_drvdata(pdev, indio_dev);
- ret = lp8788_iio_map_register(indio_dev, lp->pdata, adc);
+ ret = lp8788_iio_map_register(&pdev->dev, indio_dev, lp->pdata, adc);
if (ret)
return ret;
@@ -210,32 +210,11 @@ static int lp8788_adc_probe(struct platform_device *pdev)
indio_dev->channels = lp8788_adc_channels;
indio_dev->num_channels = ARRAY_SIZE(lp8788_adc_channels);
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&pdev->dev, "iio dev register err: %d\n", ret);
- goto err_iio_device;
- }
-
- return 0;
-
-err_iio_device:
- iio_map_array_unregister(indio_dev);
- return ret;
-}
-
-static int lp8788_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_device_unregister(indio_dev);
- iio_map_array_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&pdev->dev, indio_dev);
}
static struct platform_driver lp8788_adc_driver = {
.probe = lp8788_adc_probe,
- .remove = lp8788_adc_remove,
.driver = {
.name = LP8788_DEV_ADC,
},
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index 3566990ae87d..ceefa4d793cf 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -115,6 +115,23 @@ static const struct iio_info lpc18xx_adc_info = {
.read_raw = lpc18xx_adc_read_raw,
};
+static void lpc18xx_clear_cr_reg(void *data)
+{
+ struct lpc18xx_adc *adc = data;
+
+ writel(0, adc->base + LPC18XX_ADC_CR);
+}
+
+static void lpc18xx_clk_disable(void *clk)
+{
+ clk_disable_unprepare(clk);
+}
+
+static void lpc18xx_regulator_disable(void *vref)
+{
+ regulator_disable(vref);
+}
+
static int lpc18xx_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -127,7 +144,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- platform_set_drvdata(pdev, indio_dev);
adc = iio_priv(indio_dev);
adc->dev = &pdev->dev;
mutex_init(&adc->lock);
@@ -137,19 +153,17 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
return PTR_ERR(adc->base);
adc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(adc->clk)) {
- dev_err(&pdev->dev, "error getting clock\n");
- return PTR_ERR(adc->clk);
- }
+ if (IS_ERR(adc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adc->clk),
+ "error getting clock\n");
rate = clk_get_rate(adc->clk);
clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
adc->vref = devm_regulator_get(&pdev->dev, "vref");
- if (IS_ERR(adc->vref)) {
- dev_err(&pdev->dev, "error getting regulator\n");
- return PTR_ERR(adc->vref);
- }
+ if (IS_ERR(adc->vref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adc->vref),
+ "error getting regulator\n");
indio_dev->name = dev_name(&pdev->dev);
indio_dev->info = &lpc18xx_adc_info;
@@ -163,44 +177,30 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
return ret;
}
+ ret = devm_add_action_or_reset(&pdev->dev, lpc18xx_regulator_disable, adc->vref);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(adc->clk);
if (ret) {
dev_err(&pdev->dev, "unable to enable clock\n");
- goto dis_reg;
+ return ret;
}
+ ret = devm_add_action_or_reset(&pdev->dev, lpc18xx_clk_disable,
+ adc->clk);
+ if (ret)
+ return ret;
+
adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) |
LPC18XX_ADC_CR_PDN;
writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR);
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&pdev->dev, "unable to register device\n");
- goto dis_clk;
- }
-
- return 0;
-
-dis_clk:
- writel(0, adc->base + LPC18XX_ADC_CR);
- clk_disable_unprepare(adc->clk);
-dis_reg:
- regulator_disable(adc->vref);
- return ret;
-}
-
-static int lpc18xx_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct lpc18xx_adc *adc = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- writel(0, adc->base + LPC18XX_ADC_CR);
- clk_disable_unprepare(adc->clk);
- regulator_disable(adc->vref);
+ ret = devm_add_action_or_reset(&pdev->dev, lpc18xx_clear_cr_reg, adc);
+ if (ret)
+ return ret;
- return 0;
+ return devm_iio_device_register(&pdev->dev, indio_dev);
}
static const struct of_device_id lpc18xx_adc_match[] = {
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(of, lpc18xx_adc_match);
static struct platform_driver lpc18xx_adc_driver = {
.probe = lpc18xx_adc_probe,
- .remove = lpc18xx_adc_remove,
.driver = {
.name = "lpc18xx-adc",
.of_match_table = lpc18xx_adc_match,
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index b753658bb41e..4daf1d576c4e 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -60,6 +60,9 @@
#define MAX1027_NAVG_32 (0x03 << 2)
#define MAX1027_AVG_EN BIT(4)
+/* Device can achieve 300ksps so we assume a 3.33us conversion delay */
+#define MAX1027_CONVERSION_UDELAY 4
+
enum max1027_id {
max1027,
max1029,
@@ -172,18 +175,53 @@ static const struct iio_chan_spec max1231_channels[] = {
MAX1X31_CHANNELS(12),
};
+/*
+ * These devices are able to scan from 0 to N, N being the highest voltage
+ * channel requested by the user. The temperature can be included or not,
+ * but cannot be retrieved alone. Based on the below
+ * ->available_scan_masks, the core will select the most appropriate
+ * ->active_scan_mask and the "minimum" number of channels will be
+ * scanned and pushed to the buffers.
+ *
+ * For example, if the user wants channels 1, 4 and 5, all channels from
+ * 0 to 5 will be scanned and pushed to the IIO buffers. The core will then
+ * filter out the unneeded samples based on the ->active_scan_mask that has
+ * been selected and only channels 1, 4 and 5 will be available to the user
+ * in the shared buffer.
+ */
+#define MAX1X27_SCAN_MASK_TEMP BIT(0)
+
+#define MAX1X27_SCAN_MASKS(temp) \
+ GENMASK(1, 1 - (temp)), GENMASK(2, 1 - (temp)), \
+ GENMASK(3, 1 - (temp)), GENMASK(4, 1 - (temp)), \
+ GENMASK(5, 1 - (temp)), GENMASK(6, 1 - (temp)), \
+ GENMASK(7, 1 - (temp)), GENMASK(8, 1 - (temp))
+
+#define MAX1X29_SCAN_MASKS(temp) \
+ MAX1X27_SCAN_MASKS(temp), \
+ GENMASK(9, 1 - (temp)), GENMASK(10, 1 - (temp)), \
+ GENMASK(11, 1 - (temp)), GENMASK(12, 1 - (temp))
+
+#define MAX1X31_SCAN_MASKS(temp) \
+ MAX1X29_SCAN_MASKS(temp), \
+ GENMASK(13, 1 - (temp)), GENMASK(14, 1 - (temp)), \
+ GENMASK(15, 1 - (temp)), GENMASK(16, 1 - (temp))
+
static const unsigned long max1027_available_scan_masks[] = {
- 0x000001ff,
+ MAX1X27_SCAN_MASKS(0),
+ MAX1X27_SCAN_MASKS(1),
0x00000000,
};
static const unsigned long max1029_available_scan_masks[] = {
- 0x00001fff,
+ MAX1X29_SCAN_MASKS(0),
+ MAX1X29_SCAN_MASKS(1),
0x00000000,
};
static const unsigned long max1031_available_scan_masks[] = {
- 0x0001ffff,
+ MAX1X31_SCAN_MASKS(0),
+ MAX1X31_SCAN_MASKS(1),
0x00000000,
};
@@ -232,10 +270,65 @@ struct max1027_state {
struct iio_trigger *trig;
__be16 *buffer;
struct mutex lock;
+ struct completion complete;
u8 reg ____cacheline_aligned;
};
+static int max1027_wait_eoc(struct iio_dev *indio_dev)
+{
+ struct max1027_state *st = iio_priv(indio_dev);
+ unsigned int conversion_time = MAX1027_CONVERSION_UDELAY;
+ int ret;
+
+ if (st->spi->irq) {
+ ret = wait_for_completion_timeout(&st->complete,
+ msecs_to_jiffies(1000));
+ reinit_completion(&st->complete);
+ if (!ret)
+ return -ETIMEDOUT;
+ } else {
+ if (indio_dev->active_scan_mask)
+ conversion_time *= hweight32(*indio_dev->active_scan_mask);
+
+ usleep_range(conversion_time, conversion_time * 2);
+ }
+
+ return 0;
+}
+
+/* Scan from chan 0 to the highest requested channel. Include temperature on demand. */
+static int max1027_configure_chans_and_start(struct iio_dev *indio_dev)
+{
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ st->reg = MAX1027_CONV_REG | MAX1027_SCAN_0_N;
+ st->reg |= MAX1027_CHAN(fls(*indio_dev->active_scan_mask) - 2);
+ if (*indio_dev->active_scan_mask & MAX1X27_SCAN_MASK_TEMP)
+ st->reg |= MAX1027_TEMP;
+
+ return spi_write(st->spi, &st->reg, 1);
+}
+
+static int max1027_enable_trigger(struct iio_dev *indio_dev, bool enable)
+{
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ st->reg = MAX1027_SETUP_REG | MAX1027_REF_MODE2;
+
+ /*
+ * Start acquisition on:
+ * MODE0: external hardware trigger wired to the cnvst input pin
+ * MODE2: conversion register write
+ */
+ if (enable)
+ st->reg |= MAX1027_CKS_MODE0;
+ else
+ st->reg |= MAX1027_CKS_MODE2;
+
+ return spi_write(st->spi, &st->reg, 1);
+}
+
static int max1027_read_single_value(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
@@ -243,19 +336,9 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
int ret;
struct max1027_state *st = iio_priv(indio_dev);
- if (iio_buffer_enabled(indio_dev)) {
- dev_warn(&indio_dev->dev, "trigger mode already enabled");
- return -EBUSY;
- }
-
- /* Start acquisition on conversion register write */
- st->reg = MAX1027_SETUP_REG | MAX1027_REF_MODE2 | MAX1027_CKS_MODE2;
- ret = spi_write(st->spi, &st->reg, 1);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "Failed to configure setup register\n");
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
return ret;
- }
/* Configure conversion register with the requested chan */
st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
@@ -266,18 +349,24 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
if (ret < 0) {
dev_err(&indio_dev->dev,
"Failed to configure conversion register\n");
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
/*
* For an unknown reason, when we use the mode "10" (write
* conversion register), the interrupt doesn't occur every time.
- * So we just wait 1 ms.
+ * So we just wait the maximum conversion time and deliver the value.
*/
- mdelay(1);
+ ret = max1027_wait_eoc(indio_dev);
+ if (ret)
+ return ret;
/* Read result */
ret = spi_read(st->spi, st->buffer, (chan->type == IIO_TEMP) ? 4 : 2);
+
+ iio_device_release_direct_mode(indio_dev);
+
if (ret < 0)
return ret;
@@ -327,8 +416,8 @@ static int max1027_read_raw(struct iio_dev *indio_dev,
}
static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval)
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
{
struct max1027_state *st = iio_priv(indio_dev);
u8 *val = (u8 *)st->buffer;
@@ -343,61 +432,96 @@ static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
return spi_write(st->spi, val, 1);
}
-static int max1027_validate_trigger(struct iio_dev *indio_dev,
- struct iio_trigger *trig)
+static int max1027_set_cnvst_trigger_state(struct iio_trigger *trig, bool state)
{
- struct max1027_state *st = iio_priv(indio_dev);
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ int ret;
+
+ /*
+ * In order to disable the convst trigger, start acquisition on
+ * conversion register write, which basically disables triggering
+ * conversions upon cnvst changes and thus has the effect of disabling
+ * the external hardware trigger.
+ */
+ ret = max1027_enable_trigger(indio_dev, state);
+ if (ret)
+ return ret;
- if (st->trig != trig)
- return -EINVAL;
+ if (state) {
+ ret = max1027_configure_chans_and_start(indio_dev);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
+static int max1027_read_scan(struct iio_dev *indio_dev)
{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct max1027_state *st = iio_priv(indio_dev);
+ unsigned int scanned_chans;
int ret;
- if (state) {
- /* Start acquisition on cnvst */
- st->reg = MAX1027_SETUP_REG | MAX1027_CKS_MODE0 |
- MAX1027_REF_MODE2;
- ret = spi_write(st->spi, &st->reg, 1);
- if (ret < 0)
- return ret;
+ scanned_chans = fls(*indio_dev->active_scan_mask) - 1;
+ if (*indio_dev->active_scan_mask & MAX1X27_SCAN_MASK_TEMP)
+ scanned_chans++;
- /* Scan from 0 to max */
- st->reg = MAX1027_CONV_REG | MAX1027_CHAN(0) |
- MAX1027_SCAN_N_M | MAX1027_TEMP;
- ret = spi_write(st->spi, &st->reg, 1);
- if (ret < 0)
- return ret;
- } else {
- /* Start acquisition on conversion register write */
- st->reg = MAX1027_SETUP_REG | MAX1027_CKS_MODE2 |
- MAX1027_REF_MODE2;
- ret = spi_write(st->spi, &st->reg, 1);
- if (ret < 0)
- return ret;
- }
+ /* fill buffer with all channel */
+ ret = spi_read(st->spi, st->buffer, scanned_chans * 2);
+ if (ret < 0)
+ return ret;
+
+ iio_push_to_buffers(indio_dev, st->buffer);
return 0;
}
+static irqreturn_t max1027_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ /*
+ * If buffers are disabled (raw read) or when using external triggers,
+ * we just need to unlock the waiters which will then handle the data.
+ *
+ * When using the internal trigger, we must hand-off the choice of the
+ * handler to the core which will then lookup through the interrupt tree
+ * for the right handler registered with iio_triggered_buffer_setup()
+ * to execute, as this trigger might very well be used in conjunction
+ * with another device. The core will then call the relevant handler to
+ * perform the data processing step.
+ */
+ if (!iio_buffer_enabled(indio_dev))
+ complete(&st->complete);
+ else
+ iio_trigger_poll(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t max1027_trigger_handler(int irq, void *private)
{
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
- struct max1027_state *st = iio_priv(indio_dev);
+ int ret;
- pr_debug("%s(irq=%d, private=0x%p)\n", __func__, irq, private);
+ if (!iio_trigger_using_own(indio_dev)) {
+ ret = max1027_configure_chans_and_start(indio_dev);
+ if (ret)
+ goto out;
- /* fill buffer with all channel */
- spi_read(st->spi, st->buffer, indio_dev->masklength * 2);
+ /* This is a threaded handler, it is fine to wait for an IRQ */
+ ret = max1027_wait_eoc(indio_dev);
+ if (ret)
+ goto out;
+ }
- iio_push_to_buffers(indio_dev, st->buffer);
+ ret = max1027_read_scan(indio_dev);
+out:
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "Cannot read scanned values (%d)\n", ret);
iio_trigger_notify_done(indio_dev->trig);
@@ -406,12 +530,11 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
static const struct iio_trigger_ops max1027_trigger_ops = {
.validate_device = &iio_trigger_validate_own_device,
- .set_trigger_state = &max1027_set_trigger_state,
+ .set_trigger_state = &max1027_set_cnvst_trigger_state,
};
static const struct iio_info max1027_info = {
.read_raw = &max1027_read_raw,
- .validate_trigger = &max1027_validate_trigger,
.debugfs_reg_access = &max1027_debugfs_reg_access,
};
@@ -421,10 +544,8 @@ static int max1027_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
struct max1027_state *st;
- pr_debug("%s: probe(spi = 0x%p)\n", __func__, spi);
-
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL) {
+ if (!indio_dev) {
pr_err("Can't allocate iio device\n");
return -ENOMEM;
}
@@ -434,6 +555,7 @@ static int max1027_probe(struct spi_device *spi)
st->info = &max1027_chip_info_tbl[spi_get_device_id(spi)->driver_data];
mutex_init(&st->lock);
+ init_completion(&st->complete);
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &max1027_info;
@@ -443,26 +565,26 @@ static int max1027_probe(struct spi_device *spi)
indio_dev->available_scan_masks = st->info->available_scan_masks;
st->buffer = devm_kmalloc_array(&indio_dev->dev,
- indio_dev->num_channels, 2,
- GFP_KERNEL);
- if (st->buffer == NULL) {
- dev_err(&indio_dev->dev, "Can't allocate buffer\n");
+ indio_dev->num_channels, 2,
+ GFP_KERNEL);
+ if (!st->buffer)
return -ENOMEM;
+
+ /* Enable triggered buffers */
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &max1027_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to setup buffer\n");
+ return ret;
}
+ /* If there is an EOC interrupt, register the cnvst hardware trigger */
if (spi->irq) {
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- &iio_pollfunc_store_time,
- &max1027_trigger_handler,
- NULL);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to setup buffer\n");
- return ret;
- }
-
st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
indio_dev->name);
- if (st->trig == NULL) {
+ if (!st->trig) {
ret = -ENOMEM;
dev_err(&indio_dev->dev,
"Failed to allocate iio trigger\n");
@@ -479,12 +601,9 @@ static int max1027_probe(struct spi_device *spi)
return ret;
}
- ret = devm_request_threaded_irq(&spi->dev, spi->irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
- IRQF_TRIGGER_FALLING,
- spi->dev.driver->name,
- st->trig);
+ ret = devm_request_irq(&spi->dev, spi->irq, max1027_handler,
+ IRQF_TRIGGER_FALLING,
+ spi->dev.driver->name, indio_dev);
if (ret < 0) {
dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
return ret;
@@ -507,6 +626,11 @@ static int max1027_probe(struct spi_device *spi)
return ret;
}
+ /* Assume conversion on register write for now */
+ ret = max1027_enable_trigger(indio_dev, false);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(&spi->dev, indio_dev);
}
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 8cec9d949083..a41bc570be21 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -221,10 +221,9 @@ static int max1118_probe(struct spi_device *spi)
if (id->driver_data == max1118) {
adc->reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(adc->reg)) {
- dev_err(&spi->dev, "failed to get vref regulator\n");
- return PTR_ERR(adc->reg);
- }
+ if (IS_ERR(adc->reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(adc->reg),
+ "failed to get vref regulator\n");
ret = regulator_enable(adc->reg);
if (ret)
return ret;
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
index b60f8448f21a..a5afd84af58b 100644
--- a/drivers/iio/adc/max1241.c
+++ b/drivers/iio/adc/max1241.c
@@ -148,10 +148,9 @@ static int max1241_probe(struct spi_device *spi)
mutex_init(&adc->lock);
adc->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(adc->vdd)) {
- dev_err(dev, "failed to get vdd regulator\n");
- return PTR_ERR(adc->vdd);
- }
+ if (IS_ERR(adc->vdd))
+ return dev_err_probe(dev, PTR_ERR(adc->vdd),
+ "failed to get vdd regulator\n");
ret = regulator_enable(adc->vdd);
if (ret)
@@ -164,10 +163,9 @@ static int max1241_probe(struct spi_device *spi)
}
adc->vref = devm_regulator_get(dev, "vref");
- if (IS_ERR(adc->vref)) {
- dev_err(dev, "failed to get vref regulator\n");
- return PTR_ERR(adc->vref);
- }
+ if (IS_ERR(adc->vref))
+ return dev_err_probe(dev, PTR_ERR(adc->vref),
+ "failed to get vref regulator\n");
ret = regulator_enable(adc->vref);
if (ret)
@@ -182,7 +180,8 @@ static int max1241_probe(struct spi_device *spi)
adc->shutdown = devm_gpiod_get_optional(dev, "shutdown",
GPIOD_OUT_HIGH);
if (IS_ERR(adc->shutdown))
- return PTR_ERR(adc->shutdown);
+ return dev_err_probe(dev, PTR_ERR(adc->shutdown),
+ "cannot get shutdown gpio\n");
if (adc->shutdown)
dev_dbg(dev, "shutdown pin passed, low-power mode enabled");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index f2b576c69949..eef55ed4814a 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1577,6 +1577,11 @@ static const struct of_device_id max1363_of_match[] = {
};
MODULE_DEVICE_TABLE(of, max1363_of_match);
+static void max1363_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1590,7 +1595,8 @@ static int max1363_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
- ret = iio_map_array_register(indio_dev, client->dev.platform_data);
+ ret = devm_iio_map_array_register(&client->dev, indio_dev,
+ client->dev.platform_data);
if (ret < 0)
return ret;
@@ -1598,17 +1604,16 @@ static int max1363_probe(struct i2c_client *client,
mutex_init(&st->lock);
st->reg = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto error_unregister_map;
- }
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
ret = regulator_enable(st->reg);
if (ret)
- goto error_unregister_map;
+ return ret;
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
+ ret = devm_add_action_or_reset(&client->dev, max1363_reg_disable, st->reg);
+ if (ret)
+ return ret;
st->chip_info = device_get_match_data(&client->dev);
if (!st->chip_info)
@@ -1622,13 +1627,17 @@ static int max1363_probe(struct i2c_client *client,
ret = regulator_enable(vref);
if (ret)
- goto error_disable_reg;
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, max1363_reg_disable, vref);
+ if (ret)
+ return ret;
+
st->vref = vref;
vref_uv = regulator_get_voltage(vref);
- if (vref_uv <= 0) {
- ret = -EINVAL;
- goto error_disable_reg;
- }
+ if (vref_uv <= 0)
+ return -EINVAL;
+
st->vref_uv = vref_uv;
}
@@ -1640,13 +1649,12 @@ static int max1363_probe(struct i2c_client *client,
st->send = max1363_smbus_send;
st->recv = max1363_smbus_recv;
} else {
- ret = -EOPNOTSUPP;
- goto error_disable_reg;
+ return -EOPNOTSUPP;
}
ret = max1363_alloc_scan_masks(indio_dev);
if (ret)
- goto error_disable_reg;
+ return ret;
indio_dev->name = id->name;
indio_dev->channels = st->chip_info->channels;
@@ -1655,12 +1663,12 @@ static int max1363_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = max1363_initial_setup(st);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- &max1363_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ &max1363_trigger_handler, NULL);
if (ret)
- goto error_disable_reg;
+ return ret;
if (client->irq) {
ret = devm_request_threaded_irq(&client->dev, st->client->irq,
@@ -1671,39 +1679,10 @@ static int max1363_probe(struct i2c_client *client,
indio_dev);
if (ret)
- goto error_uninit_buffer;
+ return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_uninit_buffer;
-
- return 0;
-
-error_uninit_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-error_disable_reg:
- if (st->vref)
- regulator_disable(st->vref);
- regulator_disable(st->reg);
-error_unregister_map:
- iio_map_array_unregister(indio_dev);
- return ret;
-}
-
-static int max1363_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct max1363_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- if (st->vref)
- regulator_disable(st->vref);
- regulator_disable(st->reg);
- iio_map_array_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id max1363_id[] = {
@@ -1756,7 +1735,6 @@ static struct i2c_driver max1363_driver = {
.of_match_table = max1363_of_match,
},
.probe = max1363_probe,
- .remove = max1363_remove,
.id_table = max1363_id,
};
module_i2c_driver(max1363_driver);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 705d5e11a54b..62cc6fb0ef85 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1230,35 +1230,31 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
return ret;
priv->clkin = devm_clk_get(&pdev->dev, "clkin");
- if (IS_ERR(priv->clkin)) {
- dev_err(&pdev->dev, "failed to get clkin\n");
- return PTR_ERR(priv->clkin);
- }
+ if (IS_ERR(priv->clkin))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clkin),
+ "failed to get clkin\n");
priv->core_clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(priv->core_clk)) {
- dev_err(&pdev->dev, "failed to get core clk\n");
- return PTR_ERR(priv->core_clk);
- }
+ if (IS_ERR(priv->core_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->core_clk),
+ "failed to get core clk\n");
priv->adc_clk = devm_clk_get(&pdev->dev, "adc_clk");
if (IS_ERR(priv->adc_clk)) {
- if (PTR_ERR(priv->adc_clk) == -ENOENT) {
+ if (PTR_ERR(priv->adc_clk) == -ENOENT)
priv->adc_clk = NULL;
- } else {
- dev_err(&pdev->dev, "failed to get adc clk\n");
- return PTR_ERR(priv->adc_clk);
- }
+ else
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->adc_clk),
+ "failed to get adc clk\n");
}
priv->adc_sel_clk = devm_clk_get(&pdev->dev, "adc_sel");
if (IS_ERR(priv->adc_sel_clk)) {
- if (PTR_ERR(priv->adc_sel_clk) == -ENOENT) {
+ if (PTR_ERR(priv->adc_sel_clk) == -ENOENT)
priv->adc_sel_clk = NULL;
- } else {
- dev_err(&pdev->dev, "failed to get adc_sel clk\n");
- return PTR_ERR(priv->adc_sel_clk);
- }
+ else
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->adc_sel_clk),
+ "failed to get adc_sel clk\n");
}
/* on pre-GXBB SoCs the SAR ADC itself provides the ADC clock: */
@@ -1269,10 +1265,9 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
}
priv->vref = devm_regulator_get(&pdev->dev, "vref");
- if (IS_ERR(priv->vref)) {
- dev_err(&pdev->dev, "failed to get vref regulator\n");
- return PTR_ERR(priv->vref);
- }
+ if (IS_ERR(priv->vref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->vref),
+ "failed to get vref regulator\n");
priv->calibscale = MILLION;
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index bb70b51d25b1..976c235f3079 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -428,8 +428,6 @@ static int nau7802_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
-
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &nau7802_info;
@@ -495,13 +493,13 @@ static int nau7802_probe(struct i2c_client *client,
* will enable them back when we will need them..
*/
if (client->irq) {
- ret = request_threaded_irq(client->irq,
- NULL,
- nau7802_eoc_trigger,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
- IRQF_NO_AUTOEN,
- client->dev.driver->name,
- indio_dev);
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ nau7802_eoc_trigger,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
+ client->dev.driver->name,
+ indio_dev);
if (ret) {
/*
* What may happen here is that our IRQ controller is
@@ -526,7 +524,7 @@ static int nau7802_probe(struct i2c_client *client,
ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL2,
NAU7802_CTRL2_CRS(st->sample_rate));
if (ret)
- goto error_free_irq;
+ return ret;
}
/* Setup the ADC channels available on the board */
@@ -536,36 +534,7 @@ static int nau7802_probe(struct i2c_client *client,
mutex_init(&st->lock);
mutex_init(&st->data_lock);
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&client->dev, "Couldn't register the device.\n");
- goto error_device_register;
- }
-
- return 0;
-
-error_device_register:
- mutex_destroy(&st->lock);
- mutex_destroy(&st->data_lock);
-error_free_irq:
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- return ret;
-}
-
-static int nau7802_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct nau7802_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- mutex_destroy(&st->lock);
- mutex_destroy(&st->data_lock);
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id nau7802_i2c_id[] = {
@@ -582,7 +551,6 @@ MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
static struct i2c_driver nau7802_driver = {
.probe = nau7802_probe,
- .remove = nau7802_remove,
.id_table = nau7802_i2c_id,
.driver = {
.name = "nau7802",
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 0610bf254771..21d7eff645c3 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -910,16 +910,15 @@ static int pm8xxx_xoadc_probe(struct platform_device *pdev)
map = dev_get_regmap(dev->parent, NULL);
if (!map) {
dev_err(dev, "parent regmap unavailable.\n");
- return -ENXIO;
+ return -ENODEV;
}
adc->map = map;
/* Bring up regulator */
adc->vref = devm_regulator_get(dev, "xoadc-ref");
- if (IS_ERR(adc->vref)) {
- dev_err(dev, "failed to get XOADC VREF regulator\n");
- return PTR_ERR(adc->vref);
- }
+ if (IS_ERR(adc->vref))
+ return dev_err_probe(dev, PTR_ERR(adc->vref),
+ "failed to get XOADC VREF regulator\n");
ret = regulator_enable(adc->vref);
if (ret) {
dev_err(dev, "failed to enable XOADC VREF regulator\n");
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
index c56fccb2c8e1..7d891b4ea461 100644
--- a/drivers/iio/adc/rn5t618-adc.c
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -197,13 +197,6 @@ static struct iio_map rn5t618_maps[] = {
{ /* sentinel */ }
};
-static void unregister_map(void *data)
-{
- struct iio_dev *iio_dev = (struct iio_dev *) data;
-
- iio_map_array_unregister(iio_dev);
-}
-
static int rn5t618_adc_probe(struct platform_device *pdev)
{
int ret;
@@ -254,11 +247,7 @@ static int rn5t618_adc_probe(struct platform_device *pdev)
return ret;
}
- ret = iio_map_array_register(iio_dev, rn5t618_maps);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(adc->dev, unregister_map, iio_dev);
+ ret = devm_iio_map_array_register(adc->dev, iio_dev, rn5t618_maps);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index a237fe469a30..14b8df4ca9c8 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -319,7 +319,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
struct rockchip_saradc *info = NULL;
struct device_node *np = pdev->dev.of_node;
struct iio_dev *indio_dev = NULL;
- struct resource *mem;
const struct of_device_id *match;
int ret;
int irq;
@@ -348,8 +347,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
@@ -362,7 +360,8 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (IS_ERR(info->reset)) {
ret = PTR_ERR(info->reset);
if (ret != -ENOENT)
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get saradc-apb\n");
dev_dbg(&pdev->dev, "no reset control found\n");
info->reset = NULL;
@@ -372,7 +371,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return irq;
+ return dev_err_probe(&pdev->dev, irq, "failed to get irq\n");
ret = devm_request_irq(&pdev->dev, irq, rockchip_saradc_isr,
0, dev_name(&pdev->dev), info);
@@ -382,23 +381,19 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
}
info->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(info->pclk)) {
- dev_err(&pdev->dev, "failed to get pclk\n");
- return PTR_ERR(info->pclk);
- }
+ if (IS_ERR(info->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->pclk),
+ "failed to get pclk\n");
info->clk = devm_clk_get(&pdev->dev, "saradc");
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed to get adc clock\n");
- return PTR_ERR(info->clk);
- }
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
+ "failed to get adc clock\n");
info->vref = devm_regulator_get(&pdev->dev, "vref");
- if (IS_ERR(info->vref)) {
- dev_err(&pdev->dev, "failed to get regulator, %ld\n",
- PTR_ERR(info->vref));
- return PTR_ERR(info->vref);
- }
+ if (IS_ERR(info->vref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->vref),
+ "failed to get regulator\n");
if (info->reset)
rockchip_saradc_reset_controller(info->reset);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index c088cb990193..b6e18eb101f7 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -659,6 +659,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->cfg = (const struct stm32_adc_priv_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
+ spin_lock_init(&priv->common.lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->common.base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 2322809bfd2f..faedf7a49555 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -102,6 +102,9 @@
#define STM32H7_ADC_CALFACT 0xC4
#define STM32H7_ADC_CALFACT2 0xC8
+/* STM32MP1 - ADC2 instance option register */
+#define STM32MP1_ADC2_OR 0xD0
+
/* STM32H7 - common registers for all ADC instances */
#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
@@ -168,23 +171,30 @@ enum stm32h7_adc_dmngt {
#define STM32H7_EOC_MST BIT(2)
/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_VBATEN BIT(24)
+#define STM32H7_VREFEN BIT(22)
#define STM32H7_PRESC_SHIFT 18
#define STM32H7_PRESC_MASK GENMASK(21, 18)
#define STM32H7_CKMODE_SHIFT 16
#define STM32H7_CKMODE_MASK GENMASK(17, 16)
+/* STM32MP1_ADC2_OR - bit fields */
+#define STM32MP1_VDDCOREEN BIT(0)
+
/**
* struct stm32_adc_common - stm32 ADC driver common data (for all instances)
* @base: control registers base cpu addr
* @phys_base: control registers base physical addr
* @rate: clock rate used for analog circuitry
* @vref_mv: vref voltage (mv)
+ * @lock: spinlock
*/
struct stm32_adc_common {
void __iomem *base;
phys_addr_t phys_base;
unsigned long rate;
int vref_mv;
+ spinlock_t lock; /* lock for common register */
};
#endif
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 5088de835bb1..6245434f8377 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -35,12 +36,13 @@
#define STM32H7_BOOST_CLKRATE 20000000UL
#define STM32_ADC_CH_MAX 20 /* max number of channels */
-#define STM32_ADC_CH_SZ 10 /* max channel name size */
+#define STM32_ADC_CH_SZ 16 /* max channel name size */
#define STM32_ADC_MAX_SQ 16 /* SQ1..SQ16 */
#define STM32_ADC_MAX_SMP 7 /* SMPx range is [0..7] */
#define STM32_ADC_TIMEOUT_US 100000
#define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
#define STM32_ADC_HW_STOP_DELAY_MS 100
+#define STM32_ADC_VREFINT_VOLTAGE 3300
#define STM32_DMA_BUFFER_SIZE PAGE_SIZE
@@ -77,6 +79,30 @@ enum stm32_adc_extsel {
STM32_EXT20,
};
+enum stm32_adc_int_ch {
+ STM32_ADC_INT_CH_NONE = -1,
+ STM32_ADC_INT_CH_VDDCORE,
+ STM32_ADC_INT_CH_VREFINT,
+ STM32_ADC_INT_CH_VBAT,
+ STM32_ADC_INT_CH_NB,
+};
+
+/**
+ * struct stm32_adc_ic - ADC internal channels
+ * @name: name of the internal channel
+ * @idx: internal channel enum index
+ */
+struct stm32_adc_ic {
+ const char *name;
+ u32 idx;
+};
+
+static const struct stm32_adc_ic stm32_adc_ic[STM32_ADC_INT_CH_NB] = {
+ { "vddcore", STM32_ADC_INT_CH_VDDCORE },
+ { "vrefint", STM32_ADC_INT_CH_VREFINT },
+ { "vbat", STM32_ADC_INT_CH_VBAT },
+};
+
/**
* struct stm32_adc_trig_info - ADC trigger info
* @name: name of the trigger, corresponding to its source
@@ -114,6 +140,16 @@ struct stm32_adc_regs {
};
/**
+ * struct stm32_adc_vrefint - stm32 ADC internal reference voltage data
+ * @vrefint_cal: vrefint calibration value from nvmem
+ * @vrefint_data: vrefint actual value
+ */
+struct stm32_adc_vrefint {
+ u32 vrefint_cal;
+ u32 vrefint_data;
+};
+
+/**
* struct stm32_adc_regspec - stm32 registers definition
* @dr: data register offset
* @ier_eoc: interrupt enable register & eocie bitfield
@@ -126,6 +162,9 @@ struct stm32_adc_regs {
* @res: resolution selection register & bitfield
* @smpr: smpr1 & smpr2 registers offset array
* @smp_bits: smpr1 & smpr2 index and bitfields
+ * @or_vdd: option register & vddcore bitfield
+ * @ccr_vbat: common register & vbat bitfield
+ * @ccr_vref: common register & vrefint bitfield
*/
struct stm32_adc_regspec {
const u32 dr;
@@ -139,6 +178,9 @@ struct stm32_adc_regspec {
const struct stm32_adc_regs res;
const u32 smpr[2];
const struct stm32_adc_regs *smp_bits;
+ const struct stm32_adc_regs or_vdd;
+ const struct stm32_adc_regs ccr_vbat;
+ const struct stm32_adc_regs ccr_vref;
};
struct stm32_adc;
@@ -156,6 +198,7 @@ struct stm32_adc;
* @unprepare: optional unprepare routine (disable, power-down)
* @irq_clear: routine to clear irqs
* @smp_cycles: programmable sampling time (ADC clock cycles)
+ * @ts_vrefint_ns: vrefint minimum sampling time in ns
*/
struct stm32_adc_cfg {
const struct stm32_adc_regspec *regs;
@@ -169,6 +212,7 @@ struct stm32_adc_cfg {
void (*unprepare)(struct iio_dev *);
void (*irq_clear)(struct iio_dev *indio_dev, u32 msk);
const unsigned int *smp_cycles;
+ const unsigned int ts_vrefint_ns;
};
/**
@@ -193,7 +237,10 @@ struct stm32_adc_cfg {
* @pcsel: bitmask to preselect channels on some devices
* @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
* @cal: optional calibration data on some devices
+ * @vrefint: internal reference voltage data
* @chan_name: channel name array
+ * @num_diff: number of differential channels
+ * @int_ch: internal channel indexes array
*/
struct stm32_adc {
struct stm32_adc_common *common;
@@ -216,7 +263,10 @@ struct stm32_adc {
u32 pcsel;
u32 smpr_val[2];
struct stm32_adc_calib cal;
+ struct stm32_adc_vrefint vrefint;
char chan_name[STM32_ADC_CH_MAX][STM32_ADC_CH_SZ];
+ u32 num_diff;
+ int int_ch[STM32_ADC_INT_CH_NB];
};
struct stm32_adc_diff_channel {
@@ -449,6 +499,24 @@ static const struct stm32_adc_regspec stm32h7_adc_regspec = {
.smp_bits = stm32h7_smp_bits,
};
+static const struct stm32_adc_regspec stm32mp1_adc_regspec = {
+ .dr = STM32H7_ADC_DR,
+ .ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
+ .ier_ovr = { STM32H7_ADC_IER, STM32H7_OVRIE },
+ .isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC },
+ .isr_ovr = { STM32H7_ADC_ISR, STM32H7_OVR },
+ .sqr = stm32h7_sq,
+ .exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT },
+ .extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
+ STM32H7_EXTSEL_SHIFT },
+ .res = { STM32H7_ADC_CFGR, STM32H7_RES_MASK, STM32H7_RES_SHIFT },
+ .smpr = { STM32H7_ADC_SMPR1, STM32H7_ADC_SMPR2 },
+ .smp_bits = stm32h7_smp_bits,
+ .or_vdd = { STM32MP1_ADC2_OR, STM32MP1_VDDCOREEN },
+ .ccr_vbat = { STM32H7_ADC_CCR, STM32H7_VBATEN },
+ .ccr_vref = { STM32H7_ADC_CCR, STM32H7_VREFEN },
+};
+
/*
* STM32 ADC registers access routines
* @adc: stm32 adc instance
@@ -487,6 +555,14 @@ static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
spin_unlock_irqrestore(&adc->lock, flags);
}
+static void stm32_adc_set_bits_common(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ spin_lock(&adc->common->lock);
+ writel_relaxed(readl_relaxed(adc->common->base + reg) | bits,
+ adc->common->base + reg);
+ spin_unlock(&adc->common->lock);
+}
+
static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
{
unsigned long flags;
@@ -496,6 +572,14 @@ static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
spin_unlock_irqrestore(&adc->lock, flags);
}
+static void stm32_adc_clr_bits_common(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ spin_lock(&adc->common->lock);
+ writel_relaxed(readl_relaxed(adc->common->base + reg) & ~bits,
+ adc->common->base + reg);
+ spin_unlock(&adc->common->lock);
+}
+
/**
* stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
* @adc: stm32 adc instance
@@ -577,6 +661,60 @@ err_clk_dis:
return ret;
}
+static void stm32_adc_int_ch_enable(struct iio_dev *indio_dev)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ u32 i;
+
+ for (i = 0; i < STM32_ADC_INT_CH_NB; i++) {
+ if (adc->int_ch[i] == STM32_ADC_INT_CH_NONE)
+ continue;
+
+ switch (i) {
+ case STM32_ADC_INT_CH_VDDCORE:
+ dev_dbg(&indio_dev->dev, "Enable VDDCore\n");
+ stm32_adc_set_bits(adc, adc->cfg->regs->or_vdd.reg,
+ adc->cfg->regs->or_vdd.mask);
+ break;
+ case STM32_ADC_INT_CH_VREFINT:
+ dev_dbg(&indio_dev->dev, "Enable VREFInt\n");
+ stm32_adc_set_bits_common(adc, adc->cfg->regs->ccr_vref.reg,
+ adc->cfg->regs->ccr_vref.mask);
+ break;
+ case STM32_ADC_INT_CH_VBAT:
+ dev_dbg(&indio_dev->dev, "Enable VBAT\n");
+ stm32_adc_set_bits_common(adc, adc->cfg->regs->ccr_vbat.reg,
+ adc->cfg->regs->ccr_vbat.mask);
+ break;
+ }
+ }
+}
+
+static void stm32_adc_int_ch_disable(struct stm32_adc *adc)
+{
+ u32 i;
+
+ for (i = 0; i < STM32_ADC_INT_CH_NB; i++) {
+ if (adc->int_ch[i] == STM32_ADC_INT_CH_NONE)
+ continue;
+
+ switch (i) {
+ case STM32_ADC_INT_CH_VDDCORE:
+ stm32_adc_clr_bits(adc, adc->cfg->regs->or_vdd.reg,
+ adc->cfg->regs->or_vdd.mask);
+ break;
+ case STM32_ADC_INT_CH_VREFINT:
+ stm32_adc_clr_bits_common(adc, adc->cfg->regs->ccr_vref.reg,
+ adc->cfg->regs->ccr_vref.mask);
+ break;
+ case STM32_ADC_INT_CH_VBAT:
+ stm32_adc_clr_bits_common(adc, adc->cfg->regs->ccr_vbat.reg,
+ adc->cfg->regs->ccr_vbat.mask);
+ break;
+ }
+ }
+}
+
/**
* stm32f4_adc_start_conv() - Start conversions for regular channels.
* @indio_dev: IIO device instance
@@ -945,11 +1083,13 @@ static int stm32h7_adc_prepare(struct iio_dev *indio_dev)
goto pwr_dwn;
calib = ret;
+ stm32_adc_int_ch_enable(indio_dev);
+
stm32_adc_writel(adc, STM32H7_ADC_DIFSEL, adc->difsel);
ret = stm32h7_adc_enable(indio_dev);
if (ret)
- goto pwr_dwn;
+ goto ch_disable;
/* Either restore or read calibration result for future reference */
if (calib)
@@ -965,6 +1105,8 @@ static int stm32h7_adc_prepare(struct iio_dev *indio_dev)
disable:
stm32h7_adc_disable(indio_dev);
+ch_disable:
+ stm32_adc_int_ch_disable(adc);
pwr_dwn:
stm32h7_adc_enter_pwr_down(adc);
@@ -976,6 +1118,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
struct stm32_adc *adc = iio_priv(indio_dev);
stm32h7_adc_disable(indio_dev);
+ stm32_adc_int_ch_disable(adc);
stm32h7_adc_enter_pwr_down(adc);
}
@@ -1212,6 +1355,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
@@ -1219,6 +1363,10 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
ret = stm32_adc_single_conv(indio_dev, chan, val);
else
ret = -EINVAL;
+
+ if (mask == IIO_CHAN_INFO_PROCESSED && adc->vrefint.vrefint_cal)
+ *val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val;
+
iio_device_release_direct_mode(indio_dev);
return ret;
@@ -1657,6 +1805,13 @@ static void stm32_adc_smpr_init(struct stm32_adc *adc, int channel, u32 smp_ns)
u32 period_ns, shift = smpr->shift, mask = smpr->mask;
unsigned int smp, r = smpr->reg;
+ /*
+ * For vrefint channel, ensure that the sampling time cannot
+ * be lower than the one specified in the datasheet
+ */
+ if (channel == adc->int_ch[STM32_ADC_INT_CH_VREFINT])
+ smp_ns = max(smp_ns, adc->cfg->ts_vrefint_ns);
+
/* Determine sampling time (ADC clock cycles) */
period_ns = NSEC_PER_SEC / adc->common->rate;
for (smp = 0; smp <= STM32_ADC_MAX_SMP; smp++)
@@ -1688,7 +1843,10 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
chan->datasheet_name = name;
chan->scan_index = scan_index;
chan->indexed = 1;
- chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ if (chan->channel == adc->int_ch[STM32_ADC_INT_CH_VREFINT])
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED);
+ else
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET);
chan->scan_type.sign = 'u';
@@ -1706,17 +1864,11 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
}
}
-static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
+static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm32_adc *adc)
{
struct device_node *node = indio_dev->dev.of_node;
- struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
- struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
- struct property *prop;
- const __be32 *cur;
- struct iio_chan_spec *channels;
- int scan_index = 0, num_channels = 0, num_diff = 0, ret, i;
- u32 val, smp = 0;
+ int num_channels = 0, ret;
ret = of_property_count_u32_elems(node, "st,adc-channels");
if (ret > adc_info->max_channels) {
@@ -1727,24 +1879,13 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
}
ret = of_property_count_elems_of_size(node, "st,adc-diff-channels",
- sizeof(*diff));
+ sizeof(struct stm32_adc_diff_channel));
if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
return -EINVAL;
} else if (ret > 0) {
- int size = ret * sizeof(*diff) / sizeof(u32);
-
- num_diff = ret;
+ adc->num_diff = ret;
num_channels += ret;
- ret = of_property_read_u32_array(node, "st,adc-diff-channels",
- (u32 *)diff, size);
- if (ret)
- return ret;
- }
-
- if (!num_channels) {
- dev_err(&indio_dev->dev, "No channels configured\n");
- return -ENODATA;
}
/* Optional sample time is provided either for each, or all channels */
@@ -1754,13 +1895,45 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
return -EINVAL;
}
- if (timestamping)
- num_channels++;
+ return num_channels;
+}
- channels = devm_kcalloc(&indio_dev->dev, num_channels,
- sizeof(struct iio_chan_spec), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
+static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
+ struct stm32_adc *adc,
+ struct iio_chan_spec *channels)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
+ struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
+ u32 num_diff = adc->num_diff;
+ int size = num_diff * sizeof(*diff) / sizeof(u32);
+ int scan_index = 0, val, ret, i;
+ struct property *prop;
+ const __be32 *cur;
+ u32 smp = 0;
+
+ if (num_diff) {
+ ret = of_property_read_u32_array(node, "st,adc-diff-channels",
+ (u32 *)diff, size);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Failed to get diff channels %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < num_diff; i++) {
+ if (diff[i].vinp >= adc_info->max_channels ||
+ diff[i].vinn >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+ diff[i].vinp, diff[i].vinn);
+ return -EINVAL;
+ }
+
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ diff[i].vinp, diff[i].vinn,
+ scan_index, true);
+ scan_index++;
+ }
+ }
of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
if (val >= adc_info->max_channels) {
@@ -1771,8 +1944,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
/* Channel can't be configured both as single-ended & diff */
for (i = 0; i < num_diff; i++) {
if (val == diff[i].vinp) {
- dev_err(&indio_dev->dev,
- "channel %d miss-configured\n", val);
+ dev_err(&indio_dev->dev, "channel %d misconfigured\n", val);
return -EINVAL;
}
}
@@ -1781,19 +1953,6 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
scan_index++;
}
- for (i = 0; i < num_diff; i++) {
- if (diff[i].vinp >= adc_info->max_channels ||
- diff[i].vinn >= adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
- diff[i].vinp, diff[i].vinn);
- return -EINVAL;
- }
- stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
- diff[i].vinp, diff[i].vinn, scan_index,
- true);
- scan_index++;
- }
-
for (i = 0; i < scan_index; i++) {
/*
* Using of_property_read_u32_index(), smp value will only be
@@ -1801,12 +1960,178 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
* get either no value, 1 shared value for all indexes, or one
* value per channel.
*/
- of_property_read_u32_index(node, "st,min-sample-time-nsecs",
- i, &smp);
+ of_property_read_u32_index(node, "st,min-sample-time-nsecs", i, &smp);
+
/* Prepare sampling time settings */
stm32_adc_smpr_init(adc, channels[i].channel, smp);
}
+ return scan_index;
+}
+
+static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_name,
+ int chan)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ u16 vrefint;
+ int i, ret;
+
+ for (i = 0; i < STM32_ADC_INT_CH_NB; i++) {
+ if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) {
+ adc->int_ch[i] = chan;
+
+ if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT)
+ continue;
+
+ /* Get calibration data for vrefint channel */
+ ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
+ if (ret && ret != -ENOENT) {
+ return dev_err_probe(&indio_dev->dev, ret,
+ "nvmem access error\n");
+ }
+ if (ret == -ENOENT)
+ dev_dbg(&indio_dev->dev, "vrefint calibration not found\n");
+ else
+ adc->vrefint.vrefint_cal = vrefint;
+ }
+ }
+
+ return 0;
+}
+
+static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
+ struct stm32_adc *adc,
+ struct iio_chan_spec *channels)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
+ struct device_node *child;
+ const char *name;
+ int val, scan_index = 0, ret;
+ bool differential;
+ u32 vin[2];
+
+ for_each_available_child_of_node(node, child) {
+ ret = of_property_read_u32(child, "reg", &val);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Missing channel index %d\n", ret);
+ goto err;
+ }
+
+ ret = of_property_read_string(child, "label", &name);
+ /* label is optional */
+ if (!ret) {
+ if (strlen(name) >= STM32_ADC_CH_SZ) {
+ dev_err(&indio_dev->dev, "Label %s exceeds %d characters\n",
+ name, STM32_ADC_CH_SZ);
+ return -EINVAL;
+ }
+ strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
+ ret = stm32_adc_populate_int_ch(indio_dev, name, val);
+ if (ret)
+ goto err;
+ } else if (ret != -EINVAL) {
+ dev_err(&indio_dev->dev, "Invalid label %d\n", ret);
+ goto err;
+ }
+
+ if (val >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ differential = false;
+ ret = of_property_read_u32_array(child, "diff-channels", vin, 2);
+ /* diff-channels is optional */
+ if (!ret) {
+ differential = true;
+ if (vin[0] != val || vin[1] >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+ vin[0], vin[1]);
+ goto err;
+ }
+ } else if (ret != -EINVAL) {
+ dev_err(&indio_dev->dev, "Invalid diff-channels property %d\n", ret);
+ goto err;
+ }
+
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
+ vin[1], scan_index, differential);
+
+ ret = of_property_read_u32(child, "st,min-sample-time-ns", &val);
+ /* st,min-sample-time-ns is optional */
+ if (!ret) {
+ stm32_adc_smpr_init(adc, channels[scan_index].channel, val);
+ if (differential)
+ stm32_adc_smpr_init(adc, vin[1], val);
+ } else if (ret != -EINVAL) {
+ dev_err(&indio_dev->dev, "Invalid st,min-sample-time-ns property %d\n",
+ ret);
+ goto err;
+ }
+
+ scan_index++;
+ }
+
+ return scan_index;
+
+err:
+ of_node_put(child);
+
+ return ret;
+}
+
+static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
+ struct iio_chan_spec *channels;
+ int scan_index = 0, num_channels = 0, ret, i;
+ bool legacy = false;
+
+ for (i = 0; i < STM32_ADC_INT_CH_NB; i++)
+ adc->int_ch[i] = STM32_ADC_INT_CH_NONE;
+
+ num_channels = of_get_available_child_count(node);
+ /* If no channels have been found, fallback to channels legacy properties. */
+ if (!num_channels) {
+ legacy = true;
+
+ ret = stm32_adc_get_legacy_chan_count(indio_dev, adc);
+ if (!ret) {
+ dev_err(indio_dev->dev.parent, "No channel found\n");
+ return -ENODATA;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ num_channels = ret;
+ }
+
+ if (num_channels > adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Channel number [%d] exceeds %d\n",
+ num_channels, adc_info->max_channels);
+ return -EINVAL;
+ }
+
+ if (timestamping)
+ num_channels++;
+
+ channels = devm_kcalloc(&indio_dev->dev, num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ if (legacy)
+ ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels);
+ else
+ ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
+ if (ret < 0)
+ return ret;
+ scan_index = ret;
+
if (timestamping) {
struct iio_chan_spec *timestamp = &channels[scan_index];
@@ -2099,7 +2424,7 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
};
static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
- .regs = &stm32h7_adc_regspec,
+ .regs = &stm32mp1_adc_regspec,
.adc_info = &stm32h7_adc_info,
.trigs = stm32h7_adc_trigs,
.has_vregready = true,
@@ -2109,6 +2434,7 @@ static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
.unprepare = stm32h7_adc_unprepare,
.smp_cycles = stm32h7_adc_smp_cycles,
.irq_clear = stm32h7_adc_irq_clear,
+ .ts_vrefint_ns = 4300,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index db902aef2abe..c8e48881c37f 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -75,9 +75,9 @@ struct adc108s102_state {
* rx_buf: |XX|R0|R1|R2|R3|R4|R5|R6|R7|tt|tt|tt|tt|
*
* tx_buf: 8 channel read commands, plus 1 dummy command
- * rx_buf: 1 dummy response, 8 channel responses, plus 64-bit timestamp
+ * rx_buf: 1 dummy response, 8 channel responses
*/
- __be16 rx_buf[13] ____cacheline_aligned;
+ __be16 rx_buf[9] ____cacheline_aligned;
__be16 tx_buf[9] ____cacheline_aligned;
};
@@ -149,9 +149,10 @@ static irqreturn_t adc108s102_trigger_handler(int irq, void *p)
goto out_notify;
/* Skip the dummy response in the first slot */
- iio_push_to_buffers_with_timestamp(indio_dev,
- (u8 *)&st->rx_buf[1],
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts_unaligned(indio_dev,
+ &st->rx_buf[1],
+ st->ring_xfer.len - sizeof(st->rx_buf[1]),
+ iio_get_time_ns(indio_dev));
out_notify:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 83c1ae07b3e9..8e7adec87755 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -132,6 +132,11 @@ static const struct iio_info adc128_info = {
.read_raw = adc128_read_raw,
};
+static void adc128_disable_regulator(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int adc128_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -151,8 +156,6 @@ static int adc128_probe(struct spi_device *spi)
adc = iio_priv(indio_dev);
adc->spi = spi;
- spi_set_drvdata(spi, indio_dev);
-
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &adc128_info;
@@ -167,29 +170,14 @@ static int adc128_probe(struct spi_device *spi)
ret = regulator_enable(adc->reg);
if (ret < 0)
return ret;
-
- mutex_init(&adc->lock);
-
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, adc128_disable_regulator,
+ adc->reg);
if (ret)
- goto err_disable_regulator;
-
- return 0;
-
-err_disable_regulator:
- regulator_disable(adc->reg);
- return ret;
-}
-
-static int adc128_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adc128 *adc = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- regulator_disable(adc->reg);
+ mutex_init(&adc->lock);
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id adc128_of_match[] = {
@@ -231,7 +219,6 @@ static struct spi_driver adc128_driver = {
.acpi_match_table = ACPI_PTR(adc128_acpi_match),
},
.probe = adc128_probe,
- .remove = adc128_remove,
.id_table = adc128_id,
};
module_spi_driver(adc128_driver);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index a2b83f0bd526..a7efa3eada2c 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -600,8 +600,8 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(st->reg)) {
- dev_err(&spi->dev, "Failed to get regulator \"vref\"\n");
- ret = PTR_ERR(st->reg);
+ ret = dev_err_probe(&spi->dev, PTR_ERR(st->reg),
+ "Failed to get regulator \"vref\"\n");
goto error_destroy_mutex;
}
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
index a345a30d74fa..c96d2a9ba924 100644
--- a/drivers/iio/adc/ti-ads8344.c
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -133,6 +133,11 @@ static const struct iio_info ads8344_info = {
.read_raw = ads8344_read_raw,
};
+static void ads8344_reg_disable(void *data)
+{
+ regulator_disable(data);
+}
+
static int ads8344_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -161,26 +166,11 @@ static int ads8344_probe(struct spi_device *spi)
if (ret)
return ret;
- spi_set_drvdata(spi, indio_dev);
-
- ret = iio_device_register(indio_dev);
- if (ret) {
- regulator_disable(adc->reg);
+ ret = devm_add_action_or_reset(&spi->dev, ads8344_reg_disable, adc->reg);
+ if (ret)
return ret;
- }
-
- return 0;
-}
-
-static int ads8344_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ads8344 *adc = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- regulator_disable(adc->reg);
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id ads8344_of_match[] = {
@@ -195,7 +185,6 @@ static struct spi_driver ads8344_driver = {
.of_match_table = ads8344_of_match,
},
.probe = ads8344_probe,
- .remove = ads8344_remove,
};
module_spi_driver(ads8344_driver);
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 170950d5dd49..d84ae6b008c1 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -398,7 +398,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
priv->xfer.len = size;
priv->time_per_scan_us = size * 8 * priv->time_per_bit_ns / NSEC_PER_USEC;
- if (priv->scan_interval_us > priv->time_per_scan_us)
+ if (priv->scan_interval_us < priv->time_per_scan_us)
dev_warn(&priv->spi->dev, "The scan interval (%d) is less then calculated scan time (%d)\n",
priv->scan_interval_us, priv->time_per_scan_us);
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index c6416ad795ca..afdb59e0b526 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -900,7 +900,7 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
ret = pdata->calibrate(gpadc);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to read calibration registers\n");
+ dev_err(dev, "failed to read calibration registers\n");
return ret;
}
@@ -914,14 +914,14 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable GPADC interrupt\n");
+ dev_err(dev, "failed to enable GPADC interrupt\n");
return ret;
}
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCS,
TWL6030_REG_TOGGLE1);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable GPADC module\n");
+ dev_err(dev, "failed to enable GPADC module\n");
return ret;
}
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 198d2916266d..83bea5ef765d 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1332,7 +1332,6 @@ static int xadc_probe(struct platform_device *pdev)
xadc = iio_priv(indio_dev);
xadc->ops = id->data;
- xadc->irq = irq;
init_completion(&xadc->completion);
mutex_init(&xadc->mutex);
spin_lock_init(&xadc->lock);
@@ -1397,7 +1396,7 @@ static int xadc_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, xadc->irq, xadc->ops->interrupt_handler, 0,
+ ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler, 0,
dev_name(dev), indio_dev);
if (ret)
return ret;
@@ -1407,7 +1406,7 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
+ ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
return ret;
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 8b80195725e9..7d78ce698967 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -67,7 +67,6 @@ struct xadc {
spinlock_t lock;
struct completion completion;
- int irq;
};
enum xadc_type {
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index f77c4538141e..8d4fc97d1005 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -19,6 +19,7 @@
* @indio_dev: IIO device structure
* @h: Function which will be used as pollfunc top half
* @thread: Function which will be used as pollfunc bottom half
+ * @direction: Direction of the data stream (in/out).
* @setup_ops: Buffer setup functions to use for this device.
* If NULL the default setup functions for triggered
* buffers will be used.
@@ -38,6 +39,7 @@
int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
const struct iio_buffer_setup_ops *setup_ops,
const struct attribute **buffer_attrs)
{
@@ -68,6 +70,7 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+ buffer->direction = direction;
buffer->attrs = buffer_attrs;
ret = iio_device_attach_buffer(indio_dev, buffer);
@@ -105,13 +108,14 @@ int devm_iio_triggered_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
const struct iio_buffer_setup_ops *ops,
const struct attribute **buffer_attrs)
{
int ret;
- ret = iio_triggered_buffer_setup_ext(indio_dev, h, thread, ops,
- buffer_attrs);
+ ret = iio_triggered_buffer_setup_ext(indio_dev, h, thread, direction,
+ ops, buffer_attrs);
if (ret)
return ret;
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 516eb3465de1..416d35a61ae2 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -138,10 +138,60 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
kfree(kf);
}
+static size_t iio_kfifo_buf_space_available(struct iio_buffer *r)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ size_t avail;
+
+ mutex_lock(&kf->user_lock);
+ avail = kfifo_avail(&kf->kf);
+ mutex_unlock(&kf->user_lock);
+
+ return avail;
+}
+
+static int iio_kfifo_remove_from(struct iio_buffer *r, void *data)
+{
+ int ret;
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+
+ if (kfifo_size(&kf->kf) < 1)
+ return -EBUSY;
+
+ ret = kfifo_out(&kf->kf, data, 1);
+ if (ret != 1)
+ return -EBUSY;
+
+ wake_up_interruptible_poll(&r->pollq, EPOLLOUT | EPOLLWRNORM);
+
+ return 0;
+}
+
+static int iio_kfifo_write(struct iio_buffer *r, size_t n,
+ const char __user *buf)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ int ret, copied;
+
+ mutex_lock(&kf->user_lock);
+ if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
+ ret = -EINVAL;
+ else
+ ret = kfifo_from_user(&kf->kf, buf, n, &copied);
+ mutex_unlock(&kf->user_lock);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read = &iio_read_kfifo,
.data_available = iio_kfifo_buf_data_available,
+ .remove_from = &iio_kfifo_remove_from,
+ .write = &iio_kfifo_write,
+ .space_available = &iio_kfifo_buf_space_available,
.request_update = &iio_request_update_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
.set_length = &iio_set_length_kfifo,
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index c03667e62732..c30657e10ee1 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -118,6 +118,19 @@ config SCD30_SERIAL
To compile this driver as a module, choose M here: the module will
be called scd30_serial.
+config SCD4X
+ tristate "SCD4X carbon dioxide sensor driver"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ depends on I2C
+ select CRC8
+ help
+ Say Y here to build support for the Sensirion SCD4X sensor with carbon
+ dioxide, relative humidity and temperature sensing capabilities.
+
+ To compile this driver as a module, choose M here: the module will
+ be called scd4x.
+
config SENSIRION_SGP30
tristate "Sensirion SGPxx gas sensors"
depends on I2C
@@ -170,6 +183,17 @@ config SPS30_SERIAL
To compile this driver as a module, choose M here: the module will
be called sps30_serial.
+config SENSEAIR_SUNRISE_CO2
+ tristate "Senseair Sunrise 006-0-0007 CO2 sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Senseair Sunrise 006-0-0007 CO2
+ sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sunrise_co2.
+
config VZ89X
tristate "SGX Sensortech MiCS VZ89X VOC sensor"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index d07af581f234..a11e777a7a00 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_PMS7003) += pms7003.o
obj-$(CONFIG_SCD30_CORE) += scd30_core.o
obj-$(CONFIG_SCD30_I2C) += scd30_i2c.o
obj-$(CONFIG_SCD30_SERIAL) += scd30_serial.o
+obj-$(CONFIG_SCD4X) += scd4x.o
+obj-$(CONFIG_SENSEAIR_SUNRISE_CO2) += sunrise_co2.o
obj-$(CONFIG_SENSIRION_SGP30) += sgp30.o
obj-$(CONFIG_SENSIRION_SGP40) += sgp40.o
obj-$(CONFIG_SPS30) += sps30.o
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
new file mode 100644
index 000000000000..267bc3c05338
--- /dev/null
+++ b/drivers/iio/chemical/scd4x.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensirion SCD4X carbon dioxide sensor i2c driver
+ *
+ * Copyright (C) 2021 Protonic Holland
+ * Author: Roan van Dijk <roan@protonic.nl>
+ *
+ * I2C slave address: 0x62
+ *
+ * Datasheets:
+ * https://www.sensirion.com/file/datasheet_scd4x
+ */
+
+#include <asm/unaligned.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/types.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define SCD4X_CRC8_POLYNOMIAL 0x31
+#define SCD4X_TIMEOUT_ERR 1000
+#define SCD4X_READ_BUF_SIZE 9
+#define SCD4X_COMMAND_BUF_SIZE 2
+#define SCD4X_WRITE_BUF_SIZE 5
+#define SCD4X_FRC_MIN_PPM 0
+#define SCD4X_FRC_MAX_PPM 2000
+#define SCD4X_READY_MASK 0x01
+
+/*Commands SCD4X*/
+enum scd4x_cmd {
+ CMD_START_MEAS = 0x21b1,
+ CMD_READ_MEAS = 0xec05,
+ CMD_STOP_MEAS = 0x3f86,
+ CMD_SET_TEMP_OFFSET = 0x241d,
+ CMD_GET_TEMP_OFFSET = 0x2318,
+ CMD_FRC = 0x362f,
+ CMD_SET_ASC = 0x2416,
+ CMD_GET_ASC = 0x2313,
+ CMD_GET_DATA_READY = 0xe4b8,
+};
+
+enum scd4x_channel_idx {
+ SCD4X_CO2,
+ SCD4X_TEMP,
+ SCD4X_HR,
+};
+
+struct scd4x_state {
+ struct i2c_client *client;
+ /* maintain access to device, to prevent concurrent reads/writes */
+ struct mutex lock;
+ struct regulator *vdd;
+};
+
+DECLARE_CRC8_TABLE(scd4x_crc8_table);
+
+static int scd4x_i2c_xfer(struct scd4x_state *state, char *txbuf, int txsize,
+ char *rxbuf, int rxsize)
+{
+ struct i2c_client *client = state->client;
+ int ret;
+
+ ret = i2c_master_send(client, txbuf, txsize);
+
+ if (ret < 0)
+ return ret;
+ if (ret != txsize)
+ return -EIO;
+
+ if (rxsize == 0)
+ return 0;
+
+ ret = i2c_master_recv(client, rxbuf, rxsize);
+ if (ret < 0)
+ return ret;
+ if (ret != rxsize)
+ return -EIO;
+
+ return 0;
+}
+
+static int scd4x_send_command(struct scd4x_state *state, enum scd4x_cmd cmd)
+{
+ char buf[SCD4X_COMMAND_BUF_SIZE];
+ int ret;
+
+ /*
+ * Measurement needs to be stopped before sending commands.
+ * Except stop and start command.
+ */
+ if ((cmd != CMD_STOP_MEAS) && (cmd != CMD_START_MEAS)) {
+
+ ret = scd4x_send_command(state, CMD_STOP_MEAS);
+ if (ret)
+ return ret;
+
+ /* execution time for stopping measurement */
+ msleep_interruptible(500);
+ }
+
+ put_unaligned_be16(cmd, buf);
+ ret = scd4x_i2c_xfer(state, buf, 2, buf, 0);
+ if (ret)
+ return ret;
+
+ if ((cmd != CMD_STOP_MEAS) && (cmd != CMD_START_MEAS)) {
+ ret = scd4x_send_command(state, CMD_START_MEAS);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scd4x_read(struct scd4x_state *state, enum scd4x_cmd cmd,
+ void *response, int response_sz)
+{
+ struct i2c_client *client = state->client;
+ char buf[SCD4X_READ_BUF_SIZE];
+ char *rsp = response;
+ int i, ret;
+ char crc;
+
+ /*
+ * Measurement needs to be stopped before sending commands.
+ * Except for reading measurement and data ready command.
+ */
+ if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS)) {
+ ret = scd4x_send_command(state, CMD_STOP_MEAS);
+ if (ret)
+ return ret;
+
+ /* execution time for stopping measurement */
+ msleep_interruptible(500);
+ }
+
+ /* CRC byte for every 2 bytes of data */
+ response_sz += response_sz / 2;
+
+ put_unaligned_be16(cmd, buf);
+ ret = scd4x_i2c_xfer(state, buf, 2, buf, response_sz);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < response_sz; i += 3) {
+ crc = crc8(scd4x_crc8_table, buf + i, 2, CRC8_INIT_VALUE);
+ if (crc != buf[i + 2]) {
+ dev_err(&client->dev, "CRC error\n");
+ return -EIO;
+ }
+
+ *rsp++ = buf[i];
+ *rsp++ = buf[i + 1];
+ }
+
+ /* start measurement */
+ if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS)) {
+ ret = scd4x_send_command(state, CMD_START_MEAS);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scd4x_write(struct scd4x_state *state, enum scd4x_cmd cmd, uint16_t arg)
+{
+ char buf[SCD4X_WRITE_BUF_SIZE];
+ int ret;
+ char crc;
+
+ put_unaligned_be16(cmd, buf);
+ put_unaligned_be16(arg, buf + 2);
+
+ crc = crc8(scd4x_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+ buf[4] = crc;
+
+ /* measurement needs to be stopped before sending commands */
+ ret = scd4x_send_command(state, CMD_STOP_MEAS);
+ if (ret)
+ return ret;
+
+ /* execution time */
+ msleep_interruptible(500);
+
+ ret = scd4x_i2c_xfer(state, buf, SCD4X_WRITE_BUF_SIZE, buf, 0);
+ if (ret)
+ return ret;
+
+ /* start measurement, except for forced calibration command */
+ if (cmd != CMD_FRC) {
+ ret = scd4x_send_command(state, CMD_START_MEAS);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scd4x_write_and_fetch(struct scd4x_state *state, enum scd4x_cmd cmd,
+ uint16_t arg, void *response, int response_sz)
+{
+ struct i2c_client *client = state->client;
+ char buf[SCD4X_READ_BUF_SIZE];
+ char *rsp = response;
+ int i, ret;
+ char crc;
+
+ ret = scd4x_write(state, CMD_FRC, arg);
+ if (ret)
+ goto err;
+
+ /* execution time */
+ msleep_interruptible(400);
+
+ /* CRC byte for every 2 bytes of data */
+ response_sz += response_sz / 2;
+
+ ret = i2c_master_recv(client, buf, response_sz);
+ if (ret < 0)
+ goto err;
+ if (ret != response_sz) {
+ ret = -EIO;
+ goto err;
+ }
+
+ for (i = 0; i < response_sz; i += 3) {
+ crc = crc8(scd4x_crc8_table, buf + i, 2, CRC8_INIT_VALUE);
+ if (crc != buf[i + 2]) {
+ dev_err(&client->dev, "CRC error\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ *rsp++ = buf[i];
+ *rsp++ = buf[i + 1];
+ }
+
+ return scd4x_send_command(state, CMD_START_MEAS);
+
+err:
+ /*
+ * on error try to start the measurement,
+ * puts sensor back into continuous measurement
+ */
+ scd4x_send_command(state, CMD_START_MEAS);
+
+ return ret;
+}
+
+static int scd4x_read_meas(struct scd4x_state *state, uint16_t *meas)
+{
+ int i, ret;
+ __be16 buf[3];
+
+ ret = scd4x_read(state, CMD_READ_MEAS, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ meas[i] = be16_to_cpu(buf[i]);
+
+ return 0;
+}
+
+static int scd4x_wait_meas_poll(struct scd4x_state *state)
+{
+ struct i2c_client *client = state->client;
+ int tries = 6;
+ int ret;
+
+ do {
+ __be16 bval;
+ uint16_t val;
+
+ ret = scd4x_read(state, CMD_GET_DATA_READY, &bval, sizeof(bval));
+ if (ret)
+ return -EIO;
+ val = be16_to_cpu(bval);
+
+ /* new measurement available */
+ if (val & 0x7FF)
+ return 0;
+
+ msleep_interruptible(1000);
+ } while (--tries);
+
+ /* try to start sensor on timeout */
+ ret = scd4x_send_command(state, CMD_START_MEAS);
+ if (ret)
+ dev_err(&client->dev, "failed to start measurement: %d\n", ret);
+
+ return -ETIMEDOUT;
+}
+
+static int scd4x_read_poll(struct scd4x_state *state, uint16_t *buf)
+{
+ int ret;
+
+ ret = scd4x_wait_meas_poll(state);
+ if (ret)
+ return ret;
+
+ return scd4x_read_meas(state, buf);
+}
+
+static int scd4x_read_channel(struct scd4x_state *state, int chan)
+{
+ int ret;
+ uint16_t buf[3];
+
+ ret = scd4x_read_poll(state, buf);
+ if (ret)
+ return ret;
+
+ return buf[chan];
+}
+
+static int scd4x_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct scd4x_state *state = iio_priv(indio_dev);
+ int ret;
+ __be16 tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&state->lock);
+ ret = scd4x_read_channel(state, chan->address);
+ mutex_unlock(&state->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_CONCENTRATION) {
+ *val = 0;
+ *val2 = 100;
+ return IIO_VAL_INT_PLUS_MICRO;
+ } else if (chan->type == IIO_TEMP) {
+ *val = 175000;
+ *val2 = 65536;
+ return IIO_VAL_FRACTIONAL;
+ } else if (chan->type == IIO_HUMIDITYRELATIVE) {
+ *val = 100000;
+ *val2 = 65536;
+ return IIO_VAL_FRACTIONAL;
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -16852;
+ *val2 = 114286;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&state->lock);
+ ret = scd4x_read(state, CMD_GET_TEMP_OFFSET, &tmp, sizeof(tmp));
+ mutex_unlock(&state->lock);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(tmp);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int scd4x_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct scd4x_state *state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&state->lock);
+ ret = scd4x_write(state, CMD_SET_TEMP_OFFSET, val);
+ mutex_unlock(&state->lock);
+
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t calibration_auto_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct scd4x_state *state = iio_priv(indio_dev);
+ int ret;
+ __be16 bval;
+ u16 val;
+
+ mutex_lock(&state->lock);
+ ret = scd4x_read(state, CMD_GET_ASC, &bval, sizeof(bval));
+ mutex_unlock(&state->lock);
+ if (ret) {
+ dev_err(dev, "failed to read automatic calibration");
+ return ret;
+ }
+
+ val = (be16_to_cpu(bval) & SCD4X_READY_MASK) ? 1 : 0;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t calibration_auto_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct scd4x_state *state = iio_priv(indio_dev);
+ bool val;
+ int ret;
+ uint16_t value;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ value = val;
+
+ mutex_lock(&state->lock);
+ ret = scd4x_write(state, CMD_SET_ASC, value);
+ mutex_unlock(&state->lock);
+ if (ret)
+ dev_err(dev, "failed to set automatic calibration");
+
+ return ret ?: len;
+}
+
+static ssize_t calibration_forced_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct scd4x_state *state = iio_priv(indio_dev);
+ uint16_t val, arg;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &arg);
+ if (ret)
+ return ret;
+
+ if (arg < SCD4X_FRC_MIN_PPM || arg > SCD4X_FRC_MAX_PPM)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ ret = scd4x_write_and_fetch(state, CMD_FRC, arg, &val, sizeof(val));
+ mutex_unlock(&state->lock);
+
+ if (val == 0xff) {
+ dev_err(dev, "forced calibration has failed");
+ return -EINVAL;
+ }
+
+ return ret ?: len;
+}
+
+static IIO_DEVICE_ATTR_RW(calibration_auto_enable, 0);
+static IIO_DEVICE_ATTR_WO(calibration_forced_value, 0);
+
+static IIO_CONST_ATTR(calibration_forced_value_available,
+ __stringify([SCD4X_FRC_MIN_PPM 1 SCD4X_FRC_MAX_PPM]));
+
+static struct attribute *scd4x_attrs[] = {
+ &iio_dev_attr_calibration_auto_enable.dev_attr.attr,
+ &iio_dev_attr_calibration_forced_value.dev_attr.attr,
+ &iio_const_attr_calibration_forced_value_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group scd4x_attr_group = {
+ .attrs = scd4x_attrs,
+};
+
+static const struct iio_info scd4x_info = {
+ .attrs = &scd4x_attr_group,
+ .read_raw = scd4x_read_raw,
+ .write_raw = scd4x_write_raw,
+};
+
+static const struct iio_chan_spec scd4x_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = SCD4X_CO2,
+ .scan_index = SCD4X_CO2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .address = SCD4X_TEMP,
+ .scan_index = SCD4X_TEMP,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = SCD4X_HR,
+ .scan_index = SCD4X_HR,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+};
+
+static int __maybe_unused scd4x_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct scd4x_state *state = iio_priv(indio_dev);
+ int ret;
+
+ ret = scd4x_send_command(state, CMD_STOP_MEAS);
+ if (ret)
+ return ret;
+
+ return regulator_disable(state->vdd);
+}
+
+static int __maybe_unused scd4x_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct scd4x_state *state = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(state->vdd);
+ if (ret)
+ return ret;
+
+ return scd4x_send_command(state, CMD_START_MEAS);
+}
+
+static __maybe_unused SIMPLE_DEV_PM_OPS(scd4x_pm_ops, scd4x_suspend, scd4x_resume);
+
+static void scd4x_stop_meas(void *state)
+{
+ scd4x_send_command(state, CMD_STOP_MEAS);
+}
+
+static void scd4x_disable_regulator(void *data)
+{
+ struct scd4x_state *state = data;
+
+ regulator_disable(state->vdd);
+}
+
+static irqreturn_t scd4x_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct scd4x_state *state = iio_priv(indio_dev);
+ struct {
+ uint16_t data[3];
+ int64_t ts __aligned(8);
+ } scan;
+ int ret;
+
+ memset(&scan, 0, sizeof(scan));
+ mutex_lock(&state->lock);
+ ret = scd4x_read_poll(state, scan.data);
+ mutex_unlock(&state->lock);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int scd4x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ static const unsigned long scd4x_scan_masks[] = { 0x07, 0x00 };
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct scd4x_state *state;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ mutex_init(&state->lock);
+ state->client = client;
+ crc8_populate_msb(scd4x_crc8_table, SCD4X_CRC8_POLYNOMIAL);
+
+ indio_dev->info = &scd4x_info;
+ indio_dev->name = client->name;
+ indio_dev->channels = scd4x_channels;
+ indio_dev->num_channels = ARRAY_SIZE(scd4x_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = scd4x_scan_masks;
+
+ state->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(state->vdd))
+ return dev_err_probe(dev, PTR_ERR(state->vdd), "failed to get regulator\n");
+
+ ret = regulator_enable(state->vdd);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, scd4x_disable_regulator, state);
+ if (ret)
+ return ret;
+
+ ret = scd4x_send_command(state, CMD_STOP_MEAS);
+ if (ret) {
+ dev_err(dev, "failed to stop measurement: %d\n", ret);
+ return ret;
+ }
+
+ /* execution time */
+ msleep_interruptible(500);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, scd4x_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ ret = scd4x_send_command(state, CMD_START_MEAS);
+ if (ret) {
+ dev_err(dev, "failed to start measurement: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, scd4x_stop_meas, state);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id scd4x_dt_ids[] = {
+ { .compatible = "sensirion,scd40" },
+ { .compatible = "sensirion,scd41" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, scd4x_dt_ids);
+
+static struct i2c_driver scd4x_i2c_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = scd4x_dt_ids,
+ .pm = &scd4x_pm_ops
+ },
+ .probe = scd4x_probe,
+};
+module_i2c_driver(scd4x_i2c_driver);
+
+MODULE_AUTHOR("Roan van Dijk <roan@protonic.nl>");
+MODULE_DESCRIPTION("Sensirion SCD4X carbon dioxide sensor core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/sunrise_co2.c b/drivers/iio/chemical/sunrise_co2.c
new file mode 100644
index 000000000000..233bd0f379c9
--- /dev/null
+++ b/drivers/iio/chemical/sunrise_co2.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Senseair Sunrise 006-0-0007 CO2 sensor driver.
+ *
+ * Copyright (C) 2021 Jacopo Mondi
+ *
+ * List of features not yet supported by the driver:
+ * - controllable EN pin
+ * - single-shot operations using the nDRY pin.
+ * - ABC/target calibration
+ */
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/time64.h>
+
+#include <linux/iio/iio.h>
+
+#define DRIVER_NAME "sunrise_co2"
+
+#define SUNRISE_ERROR_STATUS_REG 0x00
+#define SUNRISE_CO2_FILTERED_COMP_REG 0x06
+#define SUNRISE_CHIP_TEMPERATURE_REG 0x08
+#define SUNRISE_CALIBRATION_STATUS_REG 0x81
+#define SUNRISE_CALIBRATION_COMMAND_REG 0x82
+#define SUNRISE_CALIBRATION_FACTORY_CMD 0x7c02
+#define SUNRISE_CALIBRATION_BACKGROUND_CMD 0x7c06
+/*
+ * The calibration timeout is not characterized in the datasheet.
+ * Use 30 seconds as a reasonable upper limit.
+ */
+#define SUNRISE_CALIBRATION_TIMEOUT_US (30 * USEC_PER_SEC)
+
+struct sunrise_dev {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ /* Protects access to IIO attributes. */
+ struct mutex lock;
+ bool ignore_nak;
+};
+
+/* Custom regmap read/write operations: perform unlocked access to the i2c bus. */
+
+static int sunrise_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ struct i2c_client *client = context;
+ struct sunrise_dev *sunrise = i2c_get_clientdata(client);
+ union i2c_smbus_data data;
+ int ret;
+
+ if (reg_size != 1 || !val_size)
+ return -EINVAL;
+
+ memset(&data, 0, sizeof(data));
+ data.block[0] = val_size;
+
+ /*
+ * Wake up sensor by sending sensor address: START, sensor address,
+ * STOP. Sensor will not ACK this byte.
+ *
+ * The chip enters a low power state after 15ms without
+ * communications or after a complete read/write sequence.
+ */
+ __i2c_smbus_xfer(client->adapter, client->addr,
+ sunrise->ignore_nak ? I2C_M_IGNORE_NAK : 0,
+ I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE_DATA, &data);
+
+ usleep_range(500, 1500);
+
+ ret = __i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, ((u8 *)reg_buf)[0],
+ I2C_SMBUS_I2C_BLOCK_DATA, &data);
+ if (ret < 0)
+ return ret;
+
+ memcpy(val_buf, &data.block[1], data.block[0]);
+
+ return 0;
+}
+
+static int sunrise_regmap_write(void *context, const void *val_buf, size_t count)
+{
+ struct i2c_client *client = context;
+ struct sunrise_dev *sunrise = i2c_get_clientdata(client);
+ union i2c_smbus_data data;
+
+ /* Discard reg address from values count. */
+ if (!count)
+ return -EINVAL;
+ count--;
+
+ memset(&data, 0, sizeof(data));
+ data.block[0] = count;
+ memcpy(&data.block[1], (u8 *)val_buf + 1, count);
+
+ __i2c_smbus_xfer(client->adapter, client->addr,
+ sunrise->ignore_nak ? I2C_M_IGNORE_NAK : 0,
+ I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE_DATA, &data);
+
+ usleep_range(500, 1500);
+
+ return __i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, ((u8 *)val_buf)[0],
+ I2C_SMBUS_I2C_BLOCK_DATA, &data);
+}
+
+/*
+ * Sunrise i2c read/write operations: lock the i2c segment to avoid losing the
+ * wake up session. Use custom regmap operations that perform unlocked access to
+ * the i2c bus.
+ */
+static int sunrise_read_byte(struct sunrise_dev *sunrise, u8 reg)
+{
+ const struct i2c_client *client = sunrise->client;
+ const struct device *dev = &client->dev;
+ unsigned int val;
+ int ret;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ ret = regmap_read(sunrise->regmap, reg, &val);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ if (ret) {
+ dev_err(dev, "Read byte failed: reg 0x%02x (%d)\n", reg, ret);
+ return ret;
+ }
+
+ return val;
+}
+
+static int sunrise_read_word(struct sunrise_dev *sunrise, u8 reg, u16 *val)
+{
+ const struct i2c_client *client = sunrise->client;
+ const struct device *dev = &client->dev;
+ __be16 be_val;
+ int ret;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ ret = regmap_bulk_read(sunrise->regmap, reg, &be_val, sizeof(be_val));
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ if (ret) {
+ dev_err(dev, "Read word failed: reg 0x%02x (%d)\n", reg, ret);
+ return ret;
+ }
+
+ *val = be16_to_cpu(be_val);
+
+ return 0;
+}
+
+static int sunrise_write_byte(struct sunrise_dev *sunrise, u8 reg, u8 val)
+{
+ const struct i2c_client *client = sunrise->client;
+ const struct device *dev = &client->dev;
+ int ret;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ ret = regmap_write(sunrise->regmap, reg, val);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ if (ret)
+ dev_err(dev, "Write byte failed: reg 0x%02x (%d)\n", reg, ret);
+
+ return ret;
+}
+
+static int sunrise_write_word(struct sunrise_dev *sunrise, u8 reg, u16 data)
+{
+ const struct i2c_client *client = sunrise->client;
+ const struct device *dev = &client->dev;
+ __be16 be_data = cpu_to_be16(data);
+ int ret;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ ret = regmap_bulk_write(sunrise->regmap, reg, &be_data, sizeof(be_data));
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ if (ret)
+ dev_err(dev, "Write word failed: reg 0x%02x (%d)\n", reg, ret);
+
+ return ret;
+}
+
+/* Trigger a calibration cycle. */
+
+enum {
+ SUNRISE_CALIBRATION_FACTORY,
+ SUNRISE_CALIBRATION_BACKGROUND,
+};
+
+static const struct sunrise_calib_data {
+ u16 cmd;
+ u8 bit;
+ const char * const name;
+} calib_data[] = {
+ [SUNRISE_CALIBRATION_FACTORY] = {
+ SUNRISE_CALIBRATION_FACTORY_CMD,
+ BIT(2),
+ "factory_calibration",
+ },
+ [SUNRISE_CALIBRATION_BACKGROUND] = {
+ SUNRISE_CALIBRATION_BACKGROUND_CMD,
+ BIT(5),
+ "background_calibration",
+ },
+};
+
+static int sunrise_calibrate(struct sunrise_dev *sunrise,
+ const struct sunrise_calib_data *data)
+{
+ unsigned int status;
+ int ret;
+
+ /* Reset the calibration status reg. */
+ ret = sunrise_write_byte(sunrise, SUNRISE_CALIBRATION_STATUS_REG, 0x00);
+ if (ret)
+ return ret;
+
+ /* Write a calibration command and poll the calibration status bit. */
+ ret = sunrise_write_word(sunrise, SUNRISE_CALIBRATION_COMMAND_REG, data->cmd);
+ if (ret)
+ return ret;
+
+ dev_dbg(&sunrise->client->dev, "%s in progress\n", data->name);
+
+ /*
+ * Calibration takes several seconds, so the sleep time between reads
+ * can be pretty relaxed.
+ */
+ return read_poll_timeout(sunrise_read_byte, status, status & data->bit,
+ 200000, SUNRISE_CALIBRATION_TIMEOUT_US, false,
+ sunrise, SUNRISE_CALIBRATION_STATUS_REG);
+}
+
+static ssize_t sunrise_cal_factory_write(struct iio_dev *iiodev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct sunrise_dev *sunrise = iio_priv(iiodev);
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ if (!enable)
+ return len;
+
+ mutex_lock(&sunrise->lock);
+ ret = sunrise_calibrate(sunrise, &calib_data[SUNRISE_CALIBRATION_FACTORY]);
+ mutex_unlock(&sunrise->lock);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t sunrise_cal_background_write(struct iio_dev *iiodev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct sunrise_dev *sunrise = iio_priv(iiodev);
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ if (!enable)
+ return len;
+
+ mutex_lock(&sunrise->lock);
+ ret = sunrise_calibrate(sunrise, &calib_data[SUNRISE_CALIBRATION_BACKGROUND]);
+ mutex_unlock(&sunrise->lock);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+ /* Enumerate and retrieve the chip error status. */
+enum {
+ SUNRISE_ERROR_FATAL,
+ SUNRISE_ERROR_I2C,
+ SUNRISE_ERROR_ALGORITHM,
+ SUNRISE_ERROR_CALIBRATION,
+ SUNRISE_ERROR_SELF_DIAGNOSTIC,
+ SUNRISE_ERROR_OUT_OF_RANGE,
+ SUNRISE_ERROR_MEMORY,
+ SUNRISE_ERROR_NO_MEASUREMENT,
+ SUNRISE_ERROR_LOW_VOLTAGE,
+ SUNRISE_ERROR_MEASUREMENT_TIMEOUT,
+};
+
+static const char * const sunrise_error_statuses[] = {
+ [SUNRISE_ERROR_FATAL] = "error_fatal",
+ [SUNRISE_ERROR_I2C] = "error_i2c",
+ [SUNRISE_ERROR_ALGORITHM] = "error_algorithm",
+ [SUNRISE_ERROR_CALIBRATION] = "error_calibration",
+ [SUNRISE_ERROR_SELF_DIAGNOSTIC] = "error_self_diagnostic",
+ [SUNRISE_ERROR_OUT_OF_RANGE] = "error_out_of_range",
+ [SUNRISE_ERROR_MEMORY] = "error_memory",
+ [SUNRISE_ERROR_NO_MEASUREMENT] = "error_no_measurement",
+ [SUNRISE_ERROR_LOW_VOLTAGE] = "error_low_voltage",
+ [SUNRISE_ERROR_MEASUREMENT_TIMEOUT] = "error_measurement_timeout",
+};
+
+static const struct iio_enum sunrise_error_statuses_enum = {
+ .items = sunrise_error_statuses,
+ .num_items = ARRAY_SIZE(sunrise_error_statuses),
+};
+
+static ssize_t sunrise_error_status_read(struct iio_dev *iiodev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct sunrise_dev *sunrise = iio_priv(iiodev);
+ unsigned long errors;
+ ssize_t len = 0;
+ u16 value;
+ int ret;
+ u8 i;
+
+ mutex_lock(&sunrise->lock);
+ ret = sunrise_read_word(sunrise, SUNRISE_ERROR_STATUS_REG, &value);
+ if (ret) {
+ mutex_unlock(&sunrise->lock);
+ return ret;
+ }
+
+ errors = value;
+ for_each_set_bit(i, &errors, ARRAY_SIZE(sunrise_error_statuses))
+ len += sysfs_emit_at(buf, len, "%s ", sunrise_error_statuses[i]);
+
+ if (len)
+ buf[len - 1] = '\n';
+
+ mutex_unlock(&sunrise->lock);
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info sunrise_concentration_ext_info[] = {
+ /* Calibration triggers. */
+ {
+ .name = "calibration_factory",
+ .write = sunrise_cal_factory_write,
+ .shared = IIO_SEPARATE,
+ },
+ {
+ .name = "calibration_background",
+ .write = sunrise_cal_background_write,
+ .shared = IIO_SEPARATE,
+ },
+
+ /* Error statuses. */
+ {
+ .name = "error_status",
+ .read = sunrise_error_status_read,
+ .shared = IIO_SHARED_BY_ALL,
+ },
+ {
+ .name = "error_status_available",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = iio_enum_available_read,
+ .private = (uintptr_t)&sunrise_error_statuses_enum,
+ },
+ {}
+};
+
+static const struct iio_chan_spec sunrise_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .modified = 1,
+ .channel2 = IIO_MOD_CO2,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = sunrise_concentration_ext_info,
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int sunrise_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct sunrise_dev *sunrise = iio_priv(iio_dev);
+ u16 value;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_CONCENTRATION:
+ mutex_lock(&sunrise->lock);
+ ret = sunrise_read_word(sunrise, SUNRISE_CO2_FILTERED_COMP_REG,
+ &value);
+ *val = value;
+ mutex_unlock(&sunrise->lock);
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ mutex_lock(&sunrise->lock);
+ ret = sunrise_read_word(sunrise, SUNRISE_CHIP_TEMPERATURE_REG,
+ &value);
+ *val = value;
+ mutex_unlock(&sunrise->lock);
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_CONCENTRATION:
+ /*
+ * 1 / 10^4 to comply with IIO scale for CO2
+ * (percentage). The chip CO2 reading range is [400 -
+ * 5000] ppm which corresponds to [0,004 - 0,5] %.
+ */
+ *val = 1;
+ *val2 = 10000;
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_TEMP:
+ /* x10 to comply with IIO scale (millidegrees celsius). */
+ *val = 10;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sunrise_info = {
+ .read_raw = sunrise_read_raw,
+};
+
+static const struct regmap_bus sunrise_regmap_bus = {
+ .read = sunrise_regmap_read,
+ .write = sunrise_regmap_write,
+};
+
+static const struct regmap_config sunrise_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int sunrise_probe(struct i2c_client *client)
+{
+ struct sunrise_dev *sunrise;
+ struct iio_dev *iio_dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA)) {
+ dev_err(&client->dev,
+ "Adapter does not support required functionalities\n");
+ return -EOPNOTSUPP;
+ }
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*sunrise));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ sunrise = iio_priv(iio_dev);
+ sunrise->client = client;
+ mutex_init(&sunrise->lock);
+
+ i2c_set_clientdata(client, sunrise);
+
+ sunrise->regmap = devm_regmap_init(&client->dev, &sunrise_regmap_bus,
+ client, &sunrise_regmap_config);
+ if (IS_ERR(sunrise->regmap)) {
+ dev_err(&client->dev, "Failed to initialize regmap\n");
+ return PTR_ERR(sunrise->regmap);
+ }
+
+ /*
+ * The chip nacks the wake up message. If the adapter does not support
+ * protocol mangling do not set the I2C_M_IGNORE_NAK flag at the expense
+ * of possible cruft in the logs.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ sunrise->ignore_nak = true;
+
+ iio_dev->info = &sunrise_info;
+ iio_dev->name = DRIVER_NAME;
+ iio_dev->channels = sunrise_channels;
+ iio_dev->num_channels = ARRAY_SIZE(sunrise_channels);
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, iio_dev);
+}
+
+static const struct of_device_id sunrise_of_match[] = {
+ { .compatible = "senseair,sunrise-006-0-0007" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunrise_of_match);
+
+static struct i2c_driver sunrise_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = sunrise_of_match,
+ },
+ .probe_new = sunrise_probe,
+};
+module_i2c_driver(sunrise_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo@jmondi.org>");
+MODULE_DESCRIPTION("Senseair Sunrise 006-0-0007 CO2 sensor IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a4ec11a3b68a..1151434038d4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -241,8 +241,9 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
fifo_attrs = NULL;
ret = iio_triggered_buffer_setup_ext(indio_dev,
- &iio_pollfunc_store_time,
- NULL, NULL, fifo_attrs);
+ &iio_pollfunc_store_time, NULL,
+ IIO_BUFFER_DIRECTION_IN,
+ NULL, fifo_attrs);
if (ret) {
dev_err(&indio_dev->dev, "Triggered Buffer Setup Failed\n");
return ret;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 0bbb090b108c..1de395bda03e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -215,17 +215,23 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
+static void st_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
int st_sensors_power_enable(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct device *parent = indio_dev->dev.parent;
int err;
/* Regulators not mandatory, but if requested we should enable them. */
- pdata->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
- if (IS_ERR(pdata->vdd)) {
- dev_err(&indio_dev->dev, "unable to get Vdd supply\n");
- return PTR_ERR(pdata->vdd);
- }
+ pdata->vdd = devm_regulator_get(parent, "vdd");
+ if (IS_ERR(pdata->vdd))
+ return dev_err_probe(&indio_dev->dev, PTR_ERR(pdata->vdd),
+ "unable to get Vdd supply\n");
+
err = regulator_enable(pdata->vdd);
if (err != 0) {
dev_warn(&indio_dev->dev,
@@ -233,36 +239,26 @@ int st_sensors_power_enable(struct iio_dev *indio_dev)
return err;
}
- pdata->vdd_io = devm_regulator_get(indio_dev->dev.parent, "vddio");
- if (IS_ERR(pdata->vdd_io)) {
- dev_err(&indio_dev->dev, "unable to get Vdd_IO supply\n");
- err = PTR_ERR(pdata->vdd_io);
- goto st_sensors_disable_vdd;
- }
+ err = devm_add_action_or_reset(parent, st_reg_disable, pdata->vdd);
+ if (err)
+ return err;
+
+ pdata->vdd_io = devm_regulator_get(parent, "vddio");
+ if (IS_ERR(pdata->vdd_io))
+ return dev_err_probe(&indio_dev->dev, PTR_ERR(pdata->vdd_io),
+ "unable to get Vdd_IO supply\n");
+
err = regulator_enable(pdata->vdd_io);
if (err != 0) {
dev_warn(&indio_dev->dev,
"Failed to enable specified Vdd_IO supply\n");
- goto st_sensors_disable_vdd;
+ return err;
}
- return 0;
-
-st_sensors_disable_vdd:
- regulator_disable(pdata->vdd);
- return err;
+ return devm_add_action_or_reset(parent, st_reg_disable, pdata->vdd_io);
}
EXPORT_SYMBOL(st_sensors_power_enable);
-void st_sensors_power_disable(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *pdata = iio_priv(indio_dev);
-
- regulator_disable(pdata->vdd);
- regulator_disable(pdata->vdd_io);
-}
-EXPORT_SYMBOL(st_sensors_power_disable);
-
static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index b3ff88700866..18bd3c3d99bc 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -57,7 +57,6 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
indio_dev->name = client->name;
- sdata->dev = &client->dev;
sdata->irq = client->irq;
return 0;
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 0d1d66c77cd8..7c60050e90dc 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -109,7 +109,6 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev,
indio_dev->name = spi->modalias;
- sdata->dev = &spi->dev;
sdata->irq = spi->irq;
return 0;
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 64e0a748a855..392d74449886 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -42,7 +42,8 @@ static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
- dev_err(sdata->dev, "error checking samples available\n");
+ dev_err(indio_dev->dev.parent,
+ "error checking samples available\n");
return false;
}
@@ -87,7 +88,7 @@ static irqreturn_t st_sensors_irq_thread(int irq, void *p)
st_sensors_new_samples_available(indio_dev, sdata)) {
iio_trigger_poll_chained(p);
} else {
- dev_dbg(sdata->dev, "spurious IRQ\n");
+ dev_dbg(indio_dev->dev.parent, "spurious IRQ\n");
return IRQ_NONE;
}
@@ -107,7 +108,8 @@ static irqreturn_t st_sensors_irq_thread(int irq, void *p)
*/
while (sdata->hw_irq_trigger &&
st_sensors_new_samples_available(indio_dev, sdata)) {
- dev_dbg(sdata->dev, "more samples came in during polling\n");
+ dev_dbg(indio_dev->dev.parent,
+ "more samples came in during polling\n");
sdata->hw_timestamp = iio_get_time_ns(indio_dev);
iio_trigger_poll_chained(p);
}
@@ -119,11 +121,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device *parent = indio_dev->dev.parent;
unsigned long irq_trig;
int err;
- sdata->trig = iio_trigger_alloc(sdata->dev, "%s-trigger",
- indio_dev->name);
+ sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger",
+ indio_dev->name);
if (sdata->trig == NULL) {
dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
return -ENOMEM;
@@ -153,7 +156,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.addr_ihl,
sdata->sensor_settings->drdy_irq.mask_ihl, 1);
if (err < 0)
- goto iio_trigger_free;
+ return err;
dev_info(&indio_dev->dev,
"interrupts on the falling edge or active low level\n");
}
@@ -179,8 +182,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
dev_err(&indio_dev->dev,
"edge IRQ not supported w/o stat register.\n");
- err = -EOPNOTSUPP;
- goto iio_trigger_free;
+ return -EOPNOTSUPP;
}
sdata->edge_irq = true;
} else {
@@ -205,44 +207,29 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.stat_drdy.addr)
irq_trig |= IRQF_SHARED;
- err = request_threaded_irq(sdata->irq,
- st_sensors_irq_handler,
- st_sensors_irq_thread,
- irq_trig,
- sdata->trig->name,
- sdata->trig);
+ err = devm_request_threaded_irq(parent,
+ sdata->irq,
+ st_sensors_irq_handler,
+ st_sensors_irq_thread,
+ irq_trig,
+ sdata->trig->name,
+ sdata->trig);
if (err) {
dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
- goto iio_trigger_free;
+ return err;
}
- err = iio_trigger_register(sdata->trig);
+ err = devm_iio_trigger_register(parent, sdata->trig);
if (err < 0) {
dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
- goto iio_trigger_register_error;
+ return err;
}
indio_dev->trig = iio_trigger_get(sdata->trig);
return 0;
-
-iio_trigger_register_error:
- free_irq(sdata->irq, sdata->trig);
-iio_trigger_free:
- iio_trigger_free(sdata->trig);
- return err;
}
EXPORT_SYMBOL(st_sensors_allocate_trigger);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *sdata = iio_priv(indio_dev);
-
- iio_trigger_unregister(sdata->trig);
- free_irq(sdata->irq, sdata->trig);
- iio_trigger_free(sdata->trig);
-}
-EXPORT_SYMBOL(st_sensors_deallocate_trigger);
-
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index dff623b65e4f..fd9cac4f6321 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -843,6 +843,13 @@ static int ad5064_request_vref(struct ad5064_state *st, struct device *dev)
return ret;
}
+static void ad5064_bulk_reg_disable(void *data)
+{
+ struct ad5064_state *st = data;
+
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+}
+
static int ad5064_probe(struct device *dev, enum ad5064_type type,
const char *name, ad5064_write_func write)
{
@@ -858,7 +865,6 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
st = iio_priv(indio_dev);
mutex_init(&st->lock);
- dev_set_drvdata(dev, indio_dev);
st->chip_info = &ad5064_chip_info_tbl[type];
st->dev = dev;
@@ -872,6 +878,10 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
if (ret)
return ret;
+
+ ret = devm_add_action_or_reset(dev, ad5064_bulk_reg_disable, st);
+ if (ret)
+ return ret;
}
indio_dev->name = name;
@@ -887,30 +897,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
st->dac_cache[i] = midscale;
}
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- return 0;
-
-error_disable_reg:
- if (!st->use_internal_vref)
- regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
-
- return ret;
-}
-
-static int ad5064_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5064_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- if (!st->use_internal_vref)
- regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
-
- return 0;
+ return devm_iio_device_register(dev, indio_dev);
}
#if IS_ENABLED(CONFIG_SPI_MASTER)
@@ -932,11 +919,6 @@ static int ad5064_spi_probe(struct spi_device *spi)
ad5064_spi_write);
}
-static int ad5064_spi_remove(struct spi_device *spi)
-{
- return ad5064_remove(&spi->dev);
-}
-
static const struct spi_device_id ad5064_spi_ids[] = {
{"ad5024", ID_AD5024},
{"ad5025", ID_AD5025},
@@ -963,7 +945,6 @@ static struct spi_driver ad5064_spi_driver = {
.name = "ad5064",
},
.probe = ad5064_spi_probe,
- .remove = ad5064_spi_remove,
.id_table = ad5064_spi_ids,
};
@@ -1019,11 +1000,6 @@ static int ad5064_i2c_probe(struct i2c_client *i2c,
ad5064_i2c_write);
}
-static int ad5064_i2c_remove(struct i2c_client *i2c)
-{
- return ad5064_remove(&i2c->dev);
-}
-
static const struct i2c_device_id ad5064_i2c_ids[] = {
{"ad5625", ID_AD5625 },
{"ad5625r-1v25", ID_AD5625R_1V25 },
@@ -1081,7 +1057,6 @@ static struct i2c_driver ad5064_i2c_driver = {
.name = "ad5064",
},
.probe = ad5064_i2c_probe,
- .remove = ad5064_i2c_remove,
.id_table = ad5064_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 53db5b4e4c53..8ca26bb4b62f 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -444,7 +444,7 @@ error_free_reg:
return ret;
}
-static int ad5380_remove(struct device *dev)
+static void ad5380_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5380_state *st = iio_priv(indio_dev);
@@ -453,11 +453,8 @@ static int ad5380_remove(struct device *dev)
kfree(indio_dev->channels);
- if (!IS_ERR(st->vref_reg)) {
+ if (!IS_ERR(st->vref_reg))
regulator_disable(st->vref_reg);
- }
-
- return 0;
}
static bool ad5380_reg_false(struct device *dev, unsigned int reg)
@@ -493,7 +490,9 @@ static int ad5380_spi_probe(struct spi_device *spi)
static int ad5380_spi_remove(struct spi_device *spi)
{
- return ad5380_remove(&spi->dev);
+ ad5380_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id ad5380_spi_ids[] = {
@@ -566,7 +565,9 @@ static int ad5380_i2c_probe(struct i2c_client *i2c,
static int ad5380_i2c_remove(struct i2c_client *i2c)
{
- return ad5380_remove(&i2c->dev);
+ ad5380_remove(&i2c->dev);
+
+ return 0;
}
static const struct i2c_device_id ad5380_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 488ec69967d6..3cc5513a6cbf 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -283,7 +283,7 @@ error_disable_reg:
return ret;
}
-static int ad5446_remove(struct device *dev)
+static void ad5446_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
@@ -291,8 +291,6 @@ static int ad5446_remove(struct device *dev)
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
#if IS_ENABLED(CONFIG_SPI_MASTER)
@@ -495,7 +493,9 @@ static int ad5446_spi_probe(struct spi_device *spi)
static int ad5446_spi_remove(struct spi_device *spi)
{
- return ad5446_remove(&spi->dev);
+ ad5446_remove(&spi->dev);
+
+ return 0;
}
static struct spi_driver ad5446_spi_driver = {
@@ -531,8 +531,15 @@ static int ad5622_write(struct ad5446_state *st, unsigned val)
{
struct i2c_client *client = to_i2c_client(st->dev);
__be16 data = cpu_to_be16(val);
+ int ret;
- return i2c_master_send(client, (char *)&data, sizeof(data));
+ ret = i2c_master_send(client, (char *)&data, sizeof(data));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(data))
+ return -EIO;
+
+ return 0;
}
/*
@@ -572,7 +579,9 @@ static int ad5446_i2c_probe(struct i2c_client *i2c,
static int ad5446_i2c_remove(struct i2c_client *i2c)
{
- return ad5446_remove(&i2c->dev);
+ ad5446_remove(&i2c->dev);
+
+ return 0;
}
static const struct i2c_device_id ad5446_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 0405e92b9e8c..2fcc59728fd6 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -663,7 +663,7 @@ error_disable_reg:
}
EXPORT_SYMBOL_GPL(ad5592r_probe);
-int ad5592r_remove(struct device *dev)
+void ad5592r_remove(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct ad5592r_state *st = iio_priv(iio_dev);
@@ -674,8 +674,6 @@ int ad5592r_remove(struct device *dev)
if (st->reg)
regulator_disable(st->reg);
-
- return 0;
}
EXPORT_SYMBOL_GPL(ad5592r_remove);
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
index 23dac2f1ff8a..2a22ef691996 100644
--- a/drivers/iio/dac/ad5592r-base.h
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -71,6 +71,6 @@ struct ad5592r_state {
int ad5592r_probe(struct device *dev, const char *name,
const struct ad5592r_rw_ops *ops);
-int ad5592r_remove(struct device *dev);
+void ad5592r_remove(struct device *dev);
#endif /* __DRIVERS_IIO_DAC_AD5592R_BASE_H__ */
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 41f651500668..6bfd7951e18c 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -132,7 +132,9 @@ static int ad5592r_spi_probe(struct spi_device *spi)
static int ad5592r_spi_remove(struct spi_device *spi)
{
- return ad5592r_remove(&spi->dev);
+ ad5592r_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id ad5592r_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 5b4df36fdc2a..64dd7a0bddf7 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -99,7 +99,9 @@ static int ad5593r_i2c_probe(struct i2c_client *i2c,
static int ad5593r_i2c_remove(struct i2c_client *i2c)
{
- return ad5592r_remove(&i2c->dev);
+ ad5592r_remove(&i2c->dev);
+
+ return 0;
}
static const struct i2c_device_id ad5593r_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 0188ded5137c..2628810fdbb1 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -97,7 +97,9 @@ static int ad5686_spi_probe(struct spi_device *spi)
static int ad5686_spi_remove(struct spi_device *spi)
{
- return ad5686_remove(&spi->dev);
+ ad5686_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id ad5686_spi_id[] = {
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index fcb64f20ff64..8f001db775f4 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -538,7 +538,7 @@ error_disable_reg:
}
EXPORT_SYMBOL_GPL(ad5686_probe);
-int ad5686_remove(struct device *dev)
+void ad5686_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5686_state *st = iio_priv(indio_dev);
@@ -546,8 +546,6 @@ int ad5686_remove(struct device *dev)
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
EXPORT_SYMBOL_GPL(ad5686_remove);
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index f89a6f92b427..cd5fff9e9d53 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -154,7 +154,7 @@ int ad5686_probe(struct device *dev,
const char *name, ad5686_write_func write,
ad5686_read_func read);
-int ad5686_remove(struct device *dev);
+void ad5686_remove(struct device *dev);
#endif /* __DRIVERS_IIO_DAC_AD5686_H__ */
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 24a6a4a5a2e0..93f0e0e66c22 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -67,7 +67,9 @@ static int ad5686_i2c_probe(struct i2c_client *i2c,
static int ad5686_i2c_remove(struct i2c_client *i2c)
{
- return ad5686_remove(&i2c->dev);
+ ad5686_remove(&i2c->dev);
+
+ return 0;
}
static const struct i2c_device_id ad5686_i2c_id[] = {
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
index 3104ec32dfac..b0d220c3a126 100644
--- a/drivers/iio/dac/ad5766.c
+++ b/drivers/iio/dac/ad5766.c
@@ -5,10 +5,13 @@
* Copyright 2019-2020 Analog Devices Inc.
*/
#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
@@ -455,6 +458,7 @@ static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = (_chan), \
.scan_type = { \
.sign = 'u', \
.realbits = (_bits), \
@@ -503,13 +507,13 @@ static int ad5766_get_output_range(struct ad5766_state *st)
int i, ret, min, max, tmp[2];
ret = device_property_read_u32_array(&st->spi->dev,
- "output-range-voltage",
+ "output-range-microvolts",
tmp, 2);
if (ret)
return ret;
- min = tmp[0] / 1000;
- max = tmp[1] / 1000;
+ min = tmp[0] / 1000000;
+ max = tmp[1] / 1000000;
for (i = 0; i < ARRAY_SIZE(ad5766_span_tbl); i++) {
if (ad5766_span_tbl[i].min != min ||
ad5766_span_tbl[i].max != max)
@@ -576,6 +580,35 @@ static int ad5766_default_setup(struct ad5766_state *st)
return __ad5766_spi_write(st, AD5766_CMD_SPAN_REG, st->crt_range);
}
+static irqreturn_t ad5766_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret, ch, i;
+ u16 data[ARRAY_SIZE(ad5766_channels)];
+
+ ret = iio_pop_from_buffer(buffer, data);
+ if (ret)
+ goto done;
+
+ i = 0;
+ mutex_lock(&st->lock);
+ for_each_set_bit(ch, indio_dev->active_scan_mask,
+ st->chip_info->num_channels - 1)
+ __ad5766_spi_write(st, AD5766_CMD_WR_IN_REG(ch), data[i++]);
+
+ __ad5766_spi_write(st, AD5766_CMD_SW_LDAC,
+ *indio_dev->active_scan_mask);
+ mutex_unlock(&st->lock);
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int ad5766_probe(struct spi_device *spi)
{
enum ad5766_type type;
@@ -609,6 +642,15 @@ static int ad5766_probe(struct spi_device *spi)
if (ret)
return ret;
+ /* Configure trigger buffer */
+ ret = devm_iio_triggered_buffer_setup_ext(&spi->dev, indio_dev, NULL,
+ ad5766_trigger_handler,
+ IIO_BUFFER_DIRECTION_OUT,
+ NULL,
+ NULL);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(&spi->dev, indio_dev);
}
diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
index 8107f7bbbe3c..7e2fd32e993a 100644
--- a/drivers/iio/dac/ad5770r.c
+++ b/drivers/iio/dac/ad5770r.c
@@ -522,7 +522,7 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
return -EINVAL;
device_for_each_child_node(&st->spi->dev, child) {
- ret = fwnode_property_read_u32(child, "num", &num);
+ ret = fwnode_property_read_u32(child, "reg", &num);
if (ret)
goto err_child_out;
if (num >= AD5770R_MAX_CHANNELS) {
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index e1b6a92df12f..91eaaf793b3e 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -198,6 +198,11 @@ static const struct iio_chan_spec ad7303_channels[] = {
AD7303_CHANNEL(1),
};
+static void ad7303_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7303_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -210,7 +215,6 @@ static int ad7303_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
st->spi = spi;
@@ -224,18 +228,27 @@ static int ad7303_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7303_reg_disable, st->vdd_reg);
+ if (ret)
+ return ret;
+
st->vref_reg = devm_regulator_get_optional(&spi->dev, "REF");
if (IS_ERR(st->vref_reg)) {
ret = PTR_ERR(st->vref_reg);
if (ret != -ENODEV)
- goto err_disable_vdd_reg;
+ return ret;
st->vref_reg = NULL;
}
if (st->vref_reg) {
ret = regulator_enable(st->vref_reg);
if (ret)
- goto err_disable_vdd_reg;
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7303_reg_disable,
+ st->vref_reg);
+ if (ret)
+ return ret;
st->config |= AD7303_CFG_EXTERNAL_VREF;
}
@@ -246,32 +259,7 @@ static int ad7303_probe(struct spi_device *spi)
indio_dev->channels = ad7303_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7303_channels);
- ret = iio_device_register(indio_dev);
- if (ret)
- goto err_disable_vref_reg;
-
- return 0;
-
-err_disable_vref_reg:
- if (st->vref_reg)
- regulator_disable(st->vref_reg);
-err_disable_vdd_reg:
- regulator_disable(st->vdd_reg);
- return ret;
-}
-
-static int ad7303_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7303_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- if (st->vref_reg)
- regulator_disable(st->vref_reg);
- regulator_disable(st->vdd_reg);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id ad7303_spi_of_match[] = {
@@ -292,7 +280,6 @@ static struct spi_driver ad7303_driver = {
.of_match_table = ad7303_spi_of_match,
},
.probe = ad7303_probe,
- .remove = ad7303_remove,
.id_table = ad7303_spi_ids,
};
module_spi_driver(ad7303_driver);
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 6354b7c8f052..5ecfdad54dec 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -123,10 +123,9 @@ static int ad8801_probe(struct spi_device *spi)
id = spi_get_device_id(spi);
state->vrefh_reg = devm_regulator_get(&spi->dev, "vrefh");
- if (IS_ERR(state->vrefh_reg)) {
- dev_err(&spi->dev, "Vrefh regulator not specified\n");
- return PTR_ERR(state->vrefh_reg);
- }
+ if (IS_ERR(state->vrefh_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(state->vrefh_reg),
+ "Vrefh regulator not specified\n");
ret = regulator_enable(state->vrefh_reg);
if (ret) {
@@ -146,8 +145,8 @@ static int ad8801_probe(struct spi_device *spi)
if (id->driver_data == ID_AD8803) {
state->vrefl_reg = devm_regulator_get(&spi->dev, "vrefl");
if (IS_ERR(state->vrefl_reg)) {
- dev_err(&spi->dev, "Vrefl regulator not specified\n");
- ret = PTR_ERR(state->vrefl_reg);
+ ret = dev_err_probe(&spi->dev, PTR_ERR(state->vrefl_reg),
+ "Vrefl regulator not specified\n");
goto error_disable_vrefh_reg;
}
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index 79527fbc250a..5a5e967b0be4 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -232,12 +232,9 @@ static int ds4424_probe(struct i2c_client *client,
indio_dev->name = id->name;
data->vcc_reg = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(data->vcc_reg)) {
- dev_err(&client->dev,
- "Failed to get vcc-supply regulator. err: %ld\n",
- PTR_ERR(data->vcc_reg));
- return PTR_ERR(data->vcc_reg);
- }
+ if (IS_ERR(data->vcc_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vcc_reg),
+ "Failed to get vcc-supply regulator.\n");
mutex_init(&data->lock);
ret = regulator_enable(data->vcc_reg);
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 9e38607a189e..5502e4f62f0d 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -121,16 +121,14 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
return PTR_ERR(dac->base);
dac->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dac->clk)) {
- dev_err(&pdev->dev, "error getting clock\n");
- return PTR_ERR(dac->clk);
- }
+ if (IS_ERR(dac->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dac->clk),
+ "error getting clock\n");
dac->vref = devm_regulator_get(&pdev->dev, "vref");
- if (IS_ERR(dac->vref)) {
- dev_err(&pdev->dev, "error getting regulator\n");
- return PTR_ERR(dac->vref);
- }
+ if (IS_ERR(dac->vref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dac->vref),
+ "error getting regulator\n");
indio_dev->name = dev_name(&pdev->dev);
indio_dev->info = &lpc18xx_dac_info;
diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c
index dc10188540ca..f6ec9bf5815e 100644
--- a/drivers/iio/dac/ltc1660.c
+++ b/drivers/iio/dac/ltc1660.c
@@ -172,10 +172,9 @@ static int ltc1660_probe(struct spi_device *spi)
}
priv->vref_reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(priv->vref_reg)) {
- dev_err(&spi->dev, "vref regulator not specified\n");
- return PTR_ERR(priv->vref_reg);
- }
+ if (IS_ERR(priv->vref_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->vref_reg),
+ "vref regulator not specified\n");
ret = regulator_enable(priv->vref_reg);
if (ret) {
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index bd0b7f361154..7da4710a6408 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -321,12 +321,9 @@ static int max5821_probe(struct i2c_client *client,
}
data->vref_reg = devm_regulator_get(&client->dev, "vref");
- if (IS_ERR(data->vref_reg)) {
- ret = PTR_ERR(data->vref_reg);
- dev_err(&client->dev,
- "Failed to get vref regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(data->vref_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vref_reg),
+ "Failed to get vref regulator\n");
ret = regulator_enable(data->vref_reg);
if (ret) {
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index c4e430b4050e..0ae414ee1716 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -130,10 +130,9 @@ static int mcp4922_probe(struct spi_device *spi)
state = iio_priv(indio_dev);
state->spi = spi;
state->vref_reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(state->vref_reg)) {
- dev_err(&spi->dev, "Vref regulator not specified\n");
- return PTR_ERR(state->vref_reg);
- }
+ if (IS_ERR(state->vref_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(state->vref_reg),
+ "Vref regulator not specified\n");
ret = regulator_enable(state->vref_reg);
if (ret) {
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 906436780347..bd7a3b20e645 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -90,7 +90,6 @@ static int stm32_dac_probe(struct platform_device *pdev)
const struct stm32_dac_cfg *cfg;
struct stm32_dac_priv *priv;
struct regmap *regmap;
- struct resource *res;
void __iomem *mmio;
struct reset_control *rst;
int ret;
@@ -106,8 +105,7 @@ static int stm32_dac_probe(struct platform_device *pdev)
cfg = (const struct stm32_dac_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(dev, res);
+ mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
@@ -118,18 +116,12 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv->common.regmap = regmap;
priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- dev_err(dev, "pclk get failed\n");
- return ret;
- }
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(dev, PTR_ERR(priv->pclk), "pclk get failed\n");
priv->vref = devm_regulator_get(dev, "vref");
- if (IS_ERR(priv->vref)) {
- ret = PTR_ERR(priv->vref);
- dev_err(dev, "vref get failed, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->vref))
+ return dev_err_probe(dev, PTR_ERR(priv->vref), "vref get failed\n");
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 9d0b253be841..09218c3029f0 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -266,10 +266,9 @@ static int ti_dac_probe(struct spi_device *spi)
ti_dac->resolution = spec->resolution;
ti_dac->vref = devm_regulator_get(dev, "vref");
- if (IS_ERR(ti_dac->vref)) {
- dev_err(dev, "error to get regulator\n");
- return PTR_ERR(ti_dac->vref);
- }
+ if (IS_ERR(ti_dac->vref))
+ return dev_err_probe(dev, PTR_ERR(ti_dac->vref),
+ "error to get regulator\n");
ret = regulator_enable(ti_dac->vref);
if (ret < 0) {
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
index 240b81502512..2c9e0559e8a4 100644
--- a/drivers/iio/frequency/Kconfig
+++ b/drivers/iio/frequency/Kconfig
@@ -49,5 +49,17 @@ config ADF4371
To compile this driver as a module, choose M here: the
module will be called adf4371.
+
+config ADRF6780
+ tristate "Analog Devices ADRF6780 Microwave Upconverter"
+ depends on SPI
+ depends on COMMON_CLK
+ help
+ Say yes here to build support for Analog Devices ADRF6780
+ 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adrf6780.
+
endmenu
endmenu
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
index 518b1e50caef..ae3136c79202 100644
--- a/drivers/iio/frequency/Makefile
+++ b/drivers/iio/frequency/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_AD9523) += ad9523.o
obj-$(CONFIG_ADF4350) += adf4350.o
obj-$(CONFIG_ADF4371) += adf4371.o
+obj-$(CONFIG_ADRF6780) += adrf6780.o
diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
new file mode 100644
index 000000000000..8255ffd174f6
--- /dev/null
+++ b/drivers/iio/frequency/adrf6780.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADRF6780 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+/* ADRF6780 Register Map */
+#define ADRF6780_REG_CONTROL 0x00
+#define ADRF6780_REG_ALARM_READBACK 0x01
+#define ADRF6780_REG_ALARM_MASKS 0x02
+#define ADRF6780_REG_ENABLE 0x03
+#define ADRF6780_REG_LINEARIZE 0x04
+#define ADRF6780_REG_LO_PATH 0x05
+#define ADRF6780_REG_ADC_CONTROL 0x06
+#define ADRF6780_REG_ADC_OUTPUT 0x0C
+
+/* ADRF6780_REG_CONTROL Map */
+#define ADRF6780_PARITY_EN_MSK BIT(15)
+#define ADRF6780_SOFT_RESET_MSK BIT(14)
+#define ADRF6780_CHIP_ID_MSK GENMASK(11, 4)
+#define ADRF6780_CHIP_ID 0xA
+#define ADRF6780_CHIP_REVISION_MSK GENMASK(3, 0)
+
+/* ADRF6780_REG_ALARM_READBACK Map */
+#define ADRF6780_PARITY_ERROR_MSK BIT(15)
+#define ADRF6780_TOO_FEW_ERRORS_MSK BIT(14)
+#define ADRF6780_TOO_MANY_ERRORS_MSK BIT(13)
+#define ADRF6780_ADDRESS_RANGE_ERROR_MSK BIT(12)
+
+/* ADRF6780_REG_ENABLE Map */
+#define ADRF6780_VGA_BUFFER_EN_MSK BIT(8)
+#define ADRF6780_DETECTOR_EN_MSK BIT(7)
+#define ADRF6780_LO_BUFFER_EN_MSK BIT(6)
+#define ADRF6780_IF_MODE_EN_MSK BIT(5)
+#define ADRF6780_IQ_MODE_EN_MSK BIT(4)
+#define ADRF6780_LO_X2_EN_MSK BIT(3)
+#define ADRF6780_LO_PPF_EN_MSK BIT(2)
+#define ADRF6780_LO_EN_MSK BIT(1)
+#define ADRF6780_UC_BIAS_EN_MSK BIT(0)
+
+/* ADRF6780_REG_LINEARIZE Map */
+#define ADRF6780_RDAC_LINEARIZE_MSK GENMASK(7, 0)
+
+/* ADRF6780_REG_LO_PATH Map */
+#define ADRF6780_LO_SIDEBAND_MSK BIT(10)
+#define ADRF6780_Q_PATH_PHASE_ACCURACY_MSK GENMASK(7, 4)
+#define ADRF6780_I_PATH_PHASE_ACCURACY_MSK GENMASK(3, 0)
+
+/* ADRF6780_REG_ADC_CONTROL Map */
+#define ADRF6780_VDET_OUTPUT_SELECT_MSK BIT(3)
+#define ADRF6780_ADC_START_MSK BIT(2)
+#define ADRF6780_ADC_EN_MSK BIT(1)
+#define ADRF6780_ADC_CLOCK_EN_MSK BIT(0)
+
+/* ADRF6780_REG_ADC_OUTPUT Map */
+#define ADRF6780_ADC_STATUS_MSK BIT(8)
+#define ADRF6780_ADC_VALUE_MSK GENMASK(7, 0)
+
+struct adrf6780_state {
+ struct spi_device *spi;
+ struct clk *clkin;
+ /* Protect against concurrent accesses to the device */
+ struct mutex lock;
+ bool vga_buff_en;
+ bool lo_buff_en;
+ bool if_mode_en;
+ bool iq_mode_en;
+ bool lo_x2_en;
+ bool lo_ppf_en;
+ bool lo_en;
+ bool uc_bias_en;
+ bool lo_sideband;
+ bool vdet_out_en;
+ u8 data[3] ____cacheline_aligned;
+};
+
+static int __adrf6780_spi_read(struct adrf6780_state *st, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct spi_transfer t = {0};
+
+ st->data[0] = 0x80 | (reg << 1);
+ st->data[1] = 0x0;
+ st->data[2] = 0x0;
+
+ t.rx_buf = &st->data[0];
+ t.tx_buf = &st->data[0];
+ t.len = 3;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret)
+ return ret;
+
+ *val = (get_unaligned_be24(&st->data[0]) >> 1) & GENMASK(15, 0);
+
+ return ret;
+}
+
+static int adrf6780_spi_read(struct adrf6780_state *st, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __adrf6780_spi_read(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __adrf6780_spi_write(struct adrf6780_state *st,
+ unsigned int reg,
+ unsigned int val)
+{
+ put_unaligned_be24((val << 1) | (reg << 17), &st->data[0]);
+
+ return spi_write(st->spi, &st->data[0], 3);
+}
+
+static int adrf6780_spi_write(struct adrf6780_state *st, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __adrf6780_spi_write(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __adrf6780_spi_update_bits(struct adrf6780_state *st,
+ unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+ unsigned int data, temp;
+
+ ret = __adrf6780_spi_read(st, reg, &data);
+ if (ret)
+ return ret;
+
+ temp = (data & ~mask) | (val & mask);
+
+ return __adrf6780_spi_write(st, reg, temp);
+}
+
+static int adrf6780_spi_update_bits(struct adrf6780_state *st, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __adrf6780_spi_update_bits(st, reg, mask, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int adrf6780_read_adc_raw(struct adrf6780_state *st, unsigned int *read_val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_ADC_CONTROL,
+ ADRF6780_ADC_EN_MSK |
+ ADRF6780_ADC_CLOCK_EN_MSK |
+ ADRF6780_ADC_START_MSK,
+ FIELD_PREP(ADRF6780_ADC_EN_MSK, 1) |
+ FIELD_PREP(ADRF6780_ADC_CLOCK_EN_MSK, 1) |
+ FIELD_PREP(ADRF6780_ADC_START_MSK, 1));
+ if (ret)
+ goto exit;
+
+ /* Recommended delay for the ADC to be ready*/
+ usleep_range(200, 250);
+
+ ret = __adrf6780_spi_read(st, ADRF6780_REG_ADC_OUTPUT, read_val);
+ if (ret)
+ goto exit;
+
+ if (!(*read_val & ADRF6780_ADC_STATUS_MSK)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_ADC_CONTROL,
+ ADRF6780_ADC_START_MSK,
+ FIELD_PREP(ADRF6780_ADC_START_MSK, 0));
+ if (ret)
+ goto exit;
+
+ ret = __adrf6780_spi_read(st, ADRF6780_REG_ADC_OUTPUT, read_val);
+
+exit:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int adrf6780_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct adrf6780_state *dev = iio_priv(indio_dev);
+ unsigned int data;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = adrf6780_read_adc_raw(dev, &data);
+ if (ret)
+ return ret;
+
+ *val = data & ADRF6780_ADC_VALUE_MSK;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = adrf6780_spi_read(dev, ADRF6780_REG_LINEARIZE, &data);
+ if (ret)
+ return ret;
+
+ *val = data & ADRF6780_RDAC_LINEARIZE_MSK;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PHASE:
+ ret = adrf6780_spi_read(dev, ADRF6780_REG_LO_PATH, &data);
+ if (ret)
+ return ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_I:
+ *val = data & ADRF6780_I_PATH_PHASE_ACCURACY_MSK;
+
+ return IIO_VAL_INT;
+ case IIO_MOD_Q:
+ *val = FIELD_GET(ADRF6780_Q_PATH_PHASE_ACCURACY_MSK,
+ data);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adrf6780_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct adrf6780_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ return adrf6780_spi_write(st, ADRF6780_REG_LINEARIZE, val);
+ case IIO_CHAN_INFO_PHASE:
+ switch (chan->channel2) {
+ case IIO_MOD_I:
+ return adrf6780_spi_update_bits(st,
+ ADRF6780_REG_LO_PATH,
+ ADRF6780_I_PATH_PHASE_ACCURACY_MSK,
+ FIELD_PREP(ADRF6780_I_PATH_PHASE_ACCURACY_MSK, val));
+ case IIO_MOD_Q:
+ return adrf6780_spi_update_bits(st,
+ ADRF6780_REG_LO_PATH,
+ ADRF6780_Q_PATH_PHASE_ACCURACY_MSK,
+ FIELD_PREP(ADRF6780_Q_PATH_PHASE_ACCURACY_MSK, val));
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adrf6780_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct adrf6780_state *st = iio_priv(indio_dev);
+
+ if (read_val)
+ return adrf6780_spi_read(st, reg, read_val);
+ else
+ return adrf6780_spi_write(st, reg, write_val);
+}
+
+static const struct iio_info adrf6780_info = {
+ .read_raw = adrf6780_read_raw,
+ .write_raw = adrf6780_write_raw,
+ .debugfs_reg_access = &adrf6780_reg_access,
+};
+
+#define ADRF6780_CHAN_ADC(_channel) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+}
+
+#define ADRF6780_CHAN_RDAC(_channel) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+#define ADRF6780_CHAN_IQ_PHASE(_channel, rf_comp) { \
+ .type = IIO_ALTVOLTAGE, \
+ .modified = 1, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel2 = IIO_MOD_##rf_comp, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PHASE) \
+}
+
+static const struct iio_chan_spec adrf6780_channels[] = {
+ ADRF6780_CHAN_ADC(0),
+ ADRF6780_CHAN_RDAC(0),
+ ADRF6780_CHAN_IQ_PHASE(0, I),
+ ADRF6780_CHAN_IQ_PHASE(0, Q),
+};
+
+static int adrf6780_reset(struct adrf6780_state *st)
+{
+ int ret;
+ struct spi_device *spi = st->spi;
+
+ ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_CONTROL,
+ ADRF6780_SOFT_RESET_MSK,
+ FIELD_PREP(ADRF6780_SOFT_RESET_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADRF6780 SPI software reset failed.\n");
+ return ret;
+ }
+
+ ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_CONTROL,
+ ADRF6780_SOFT_RESET_MSK,
+ FIELD_PREP(ADRF6780_SOFT_RESET_MSK, 0));
+ if (ret) {
+ dev_err(&spi->dev, "ADRF6780 SPI software reset disable failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adrf6780_init(struct adrf6780_state *st)
+{
+ int ret;
+ unsigned int chip_id, enable_reg, enable_reg_msk;
+ struct spi_device *spi = st->spi;
+
+ /* Perform a software reset */
+ ret = adrf6780_reset(st);
+ if (ret)
+ return ret;
+
+ ret = __adrf6780_spi_read(st, ADRF6780_REG_CONTROL, &chip_id);
+ if (ret)
+ return ret;
+
+ chip_id = FIELD_GET(ADRF6780_CHIP_ID_MSK, chip_id);
+ if (chip_id != ADRF6780_CHIP_ID) {
+ dev_err(&spi->dev, "ADRF6780 Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ enable_reg_msk = ADRF6780_VGA_BUFFER_EN_MSK |
+ ADRF6780_DETECTOR_EN_MSK |
+ ADRF6780_LO_BUFFER_EN_MSK |
+ ADRF6780_IF_MODE_EN_MSK |
+ ADRF6780_IQ_MODE_EN_MSK |
+ ADRF6780_LO_X2_EN_MSK |
+ ADRF6780_LO_PPF_EN_MSK |
+ ADRF6780_LO_EN_MSK |
+ ADRF6780_UC_BIAS_EN_MSK;
+
+ enable_reg = FIELD_PREP(ADRF6780_VGA_BUFFER_EN_MSK, st->vga_buff_en) |
+ FIELD_PREP(ADRF6780_DETECTOR_EN_MSK, 1) |
+ FIELD_PREP(ADRF6780_LO_BUFFER_EN_MSK, st->lo_buff_en) |
+ FIELD_PREP(ADRF6780_IF_MODE_EN_MSK, st->if_mode_en) |
+ FIELD_PREP(ADRF6780_IQ_MODE_EN_MSK, st->iq_mode_en) |
+ FIELD_PREP(ADRF6780_LO_X2_EN_MSK, st->lo_x2_en) |
+ FIELD_PREP(ADRF6780_LO_PPF_EN_MSK, st->lo_ppf_en) |
+ FIELD_PREP(ADRF6780_LO_EN_MSK, st->lo_en) |
+ FIELD_PREP(ADRF6780_UC_BIAS_EN_MSK, st->uc_bias_en);
+
+ ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_ENABLE,
+ enable_reg_msk, enable_reg);
+ if (ret)
+ return ret;
+
+ ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_LO_PATH,
+ ADRF6780_LO_SIDEBAND_MSK,
+ FIELD_PREP(ADRF6780_LO_SIDEBAND_MSK, st->lo_sideband));
+ if (ret)
+ return ret;
+
+ return __adrf6780_spi_update_bits(st, ADRF6780_REG_ADC_CONTROL,
+ ADRF6780_VDET_OUTPUT_SELECT_MSK,
+ FIELD_PREP(ADRF6780_VDET_OUTPUT_SELECT_MSK, st->vdet_out_en));
+}
+
+static void adrf6780_properties_parse(struct adrf6780_state *st)
+{
+ struct spi_device *spi = st->spi;
+
+ st->vga_buff_en = device_property_read_bool(&spi->dev, "adi,vga-buff-en");
+ st->lo_buff_en = device_property_read_bool(&spi->dev, "adi,lo-buff-en");
+ st->if_mode_en = device_property_read_bool(&spi->dev, "adi,if-mode-en");
+ st->iq_mode_en = device_property_read_bool(&spi->dev, "adi,iq-mode-en");
+ st->lo_x2_en = device_property_read_bool(&spi->dev, "adi,lo-x2-en");
+ st->lo_ppf_en = device_property_read_bool(&spi->dev, "adi,lo-ppf-en");
+ st->lo_en = device_property_read_bool(&spi->dev, "adi,lo-en");
+ st->uc_bias_en = device_property_read_bool(&spi->dev, "adi,uc-bias-en");
+ st->lo_sideband = device_property_read_bool(&spi->dev, "adi,lo-sideband");
+ st->vdet_out_en = device_property_read_bool(&spi->dev, "adi,vdet-out-en");
+}
+
+static void adrf6780_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void adrf6780_powerdown(void *data)
+{
+ /* Disable all components in the Enable Register */
+ adrf6780_spi_write(data, ADRF6780_REG_ENABLE, 0x0);
+}
+
+static int adrf6780_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adrf6780_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->info = &adrf6780_info;
+ indio_dev->name = "adrf6780";
+ indio_dev->channels = adrf6780_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adrf6780_channels);
+
+ st->spi = spi;
+
+ adrf6780_properties_parse(st);
+
+ st->clkin = devm_clk_get(&spi->dev, "lo_in");
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
+ "failed to get the LO input clock\n");
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, adrf6780_clk_disable,
+ st->clkin);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+
+ ret = adrf6780_init(st);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, adrf6780_powerdown, st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id adrf6780_id[] = {
+ { "adrf6780", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adrf6780_id);
+
+static const struct of_device_id adrf6780_of_match[] = {
+ { .compatible = "adi,adrf6780" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adrf6780_of_match);
+
+static struct spi_driver adrf6780_driver = {
+ .driver = {
+ .name = "adrf6780",
+ .of_match_table = adrf6780_of_match,
+ },
+ .probe = adrf6780_probe,
+ .id_table = adrf6780_id,
+};
+module_spi_driver(adrf6780_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices ADRF6780");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 20b5ac7ab66a..a672f7d12bbb 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -126,7 +126,6 @@ config MPU3050
config MPU3050_I2C
tristate "Invensense MPU3050 devices on I2C"
- depends on !(INPUT_MPU3050=y || INPUT_MPU3050=m)
depends on I2C
select MPU3050
select REGMAP_I2C
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index e2f4d943e220..acef59d822b1 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -195,8 +195,6 @@ static int adis16080_probe(struct spi_device *spi)
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
@@ -210,13 +208,7 @@ static int adis16080_probe(struct spi_device *spi)
indio_dev->info = &adis16080_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- return iio_device_register(indio_dev);
-}
-
-static int adis16080_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id adis16080_ids[] = {
@@ -231,7 +223,6 @@ static struct spi_driver adis16080_driver = {
.name = "adis16080",
},
.probe = adis16080_probe,
- .remove = adis16080_remove,
.id_table = adis16080_ids,
};
module_spi_driver(adis16080_driver);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 3225de1f023b..ea387efab62d 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -471,13 +471,10 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
int ret;
- /*
- * Temperature 1*16 bits
- * Three axes 3*16 bits
- * Timestamp 64 bits (4*16 bits)
- * Sum total 8*16 bits
- */
- __be16 hw_values[8];
+ struct {
+ __be16 chans[4];
+ s64 timestamp __aligned(8);
+ } scan;
s64 timestamp;
unsigned int datums_from_fifo = 0;
@@ -572,9 +569,10 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
fifo_values[4]);
/* Index past the footer (fifo_values[0]) and push */
- iio_push_to_buffers_with_timestamp(indio_dev,
- &fifo_values[1],
- timestamp);
+ iio_push_to_buffers_with_ts_unaligned(indio_dev,
+ &fifo_values[1],
+ sizeof(__be16) * 4,
+ timestamp);
fifocnt -= toread;
datums_from_fifo++;
@@ -632,15 +630,15 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
goto out_trigger_unlock;
}
- ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values,
- sizeof(hw_values));
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, scan.chans,
+ sizeof(scan.chans));
if (ret) {
dev_err(mpu3050->dev,
"error reading axis data\n");
goto out_trigger_unlock;
}
- iio_push_to_buffers_with_timestamp(indio_dev, hw_values, timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, timestamp);
out_trigger_unlock:
mutex_unlock(&mpu3050->lock);
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index e8fc8af65143..201050b76fe5 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -478,6 +478,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
struct st_sensors_platform_data *pdata;
+ struct device *parent = indio_dev->dev.parent;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -491,7 +492,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- err = iio_read_mount_matrix(gdata->dev, &gdata->mount_matrix);
+ err = iio_read_mount_matrix(parent, &gdata->mount_matrix);
if (err)
return err;
@@ -515,32 +516,10 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
return err;
}
- err = iio_device_register(indio_dev);
- if (err)
- goto st_gyro_device_register_error;
-
- dev_info(&indio_dev->dev, "registered gyroscope %s\n",
- indio_dev->name);
-
- return 0;
-
-st_gyro_device_register_error:
- if (gdata->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
- return err;
+ return devm_iio_device_register(parent, indio_dev);
}
EXPORT_SYMBOL(st_gyro_common_probe);
-void st_gyro_common_remove(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *gdata = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (gdata->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
-}
-EXPORT_SYMBOL(st_gyro_common_remove);
-
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics gyroscopes driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 3ef86e16ee65..163c7ba300c1 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -90,27 +90,7 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
if (err)
return err;
- err = st_gyro_common_probe(indio_dev);
- if (err < 0)
- goto st_gyro_power_off;
-
- return 0;
-
-st_gyro_power_off:
- st_sensors_power_disable(indio_dev);
-
- return err;
-}
-
-static int st_gyro_i2c_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- st_sensors_power_disable(indio_dev);
-
- st_gyro_common_remove(indio_dev);
-
- return 0;
+ return st_gyro_common_probe(indio_dev);
}
static const struct i2c_device_id st_gyro_id_table[] = {
@@ -133,7 +113,6 @@ static struct i2c_driver st_gyro_driver = {
.of_match_table = st_gyro_of_match,
},
.probe = st_gyro_i2c_probe,
- .remove = st_gyro_i2c_remove,
.id_table = st_gyro_id_table,
};
module_i2c_driver(st_gyro_driver);
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 41d835493347..b0023f9b9771 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -94,27 +94,7 @@ static int st_gyro_spi_probe(struct spi_device *spi)
if (err)
return err;
- err = st_gyro_common_probe(indio_dev);
- if (err < 0)
- goto st_gyro_power_off;
-
- return 0;
-
-st_gyro_power_off:
- st_sensors_power_disable(indio_dev);
-
- return err;
-}
-
-static int st_gyro_spi_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- st_sensors_power_disable(indio_dev);
-
- st_gyro_common_remove(indio_dev);
-
- return 0;
+ return st_gyro_common_probe(indio_dev);
}
static const struct spi_device_id st_gyro_id_table[] = {
@@ -137,7 +117,6 @@ static struct spi_driver st_gyro_driver = {
.of_match_table = st_gyro_of_match,
},
.probe = st_gyro_spi_probe,
- .remove = st_gyro_spi_remove,
.id_table = st_gyro_id_table,
};
module_spi_driver(st_gyro_driver);
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index d4921385aaf7..97b82f9a8e45 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -487,10 +487,10 @@ static int afe4403_probe(struct spi_device *spi)
}
afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
- if (IS_ERR(afe->regulator)) {
- dev_err(afe->dev, "Unable to get regulator\n");
- return PTR_ERR(afe->regulator);
- }
+ if (IS_ERR(afe->regulator))
+ return dev_err_probe(afe->dev, PTR_ERR(afe->regulator),
+ "Unable to get regulator\n");
+
ret = regulator_enable(afe->regulator);
if (ret) {
dev_err(afe->dev, "Unable to enable regulator\n");
@@ -589,10 +589,8 @@ static int afe4403_remove(struct spi_device *spi)
iio_trigger_unregister(afe->trig);
ret = regulator_disable(afe->regulator);
- if (ret) {
- dev_err(afe->dev, "Unable to disable regulator\n");
- return ret;
- }
+ if (ret)
+ dev_warn(afe->dev, "Unable to disable regulator\n");
return 0;
}
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index d8a27dfe074a..7ef3f5e34de5 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -494,10 +494,10 @@ static int afe4404_probe(struct i2c_client *client,
}
afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
- if (IS_ERR(afe->regulator)) {
- dev_err(afe->dev, "Unable to get regulator\n");
- return PTR_ERR(afe->regulator);
- }
+ if (IS_ERR(afe->regulator))
+ return dev_err_probe(afe->dev, PTR_ERR(afe->regulator),
+ "Unable to get regulator\n");
+
ret = regulator_enable(afe->regulator);
if (ret) {
dev_err(afe->dev, "Unable to enable regulator\n");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 8f4a9b264962..61e318431de9 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -68,12 +68,15 @@ __poll_t iio_buffer_poll_wrapper(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
+ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
+ size_t n, loff_t *f_ps);
int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev);
void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev);
#define iio_buffer_poll_addr (&iio_buffer_poll_wrapper)
#define iio_buffer_read_outer_addr (&iio_buffer_read_wrapper)
+#define iio_buffer_write_outer_addr (&iio_buffer_write_wrapper)
void iio_disable_all_buffers(struct iio_dev *indio_dev);
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
@@ -83,6 +86,7 @@ void iio_device_detach_buffers(struct iio_dev *indio_dev);
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_outer_addr NULL
+#define iio_buffer_write_outer_addr NULL
static inline int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index b9a06ca29bee..cb0d66bf6561 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -286,6 +286,13 @@ int adis_enable_irq(struct adis *adis, bool enable)
if (adis->data->enable_irq) {
ret = adis->data->enable_irq(adis, enable);
goto out_unlock;
+ } else if (adis->data->unmasked_drdy) {
+ if (enable)
+ enable_irq(adis->spi->irq);
+ else
+ disable_irq(adis->spi->irq);
+
+ goto out_unlock;
}
ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
@@ -430,6 +437,14 @@ int __adis_initial_startup(struct adis *adis)
if (ret)
return ret;
+ /*
+ * don't bother calling this if we can't unmask the IRQ as in this case
+ * the IRQ is most likely not yet requested and we will request it
+ * with 'IRQF_NO_AUTOEN' anyways.
+ */
+ if (!adis->data->unmasked_drdy)
+ adis_enable_irq(adis, false);
+
if (!adis->data->prod_id_reg)
return 0;
@@ -526,7 +541,7 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev,
adis->current_page = 0;
}
- return adis_enable_irq(adis, false);
+ return 0;
}
EXPORT_SYMBOL_GPL(adis_init);
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index b12917a7cb60..9fd30e62d6e8 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -641,13 +641,23 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
if (ret)
dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
- if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) {
buffer = adis->buffer + sizeof(u16);
- else
- buffer = adis->buffer;
+ /*
+ * The size here is always larger than, or equal to the true
+ * size of the channel data. This may result in a larger copy
+ * than necessary, but as the target buffer will be
+ * buffer->scan_bytes this will be safe.
+ */
+ iio_push_to_buffers_with_ts_unaligned(indio_dev, buffer,
+ indio_dev->scan_bytes - sizeof(pf->timestamp),
+ pf->timestamp);
+ } else {
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ adis->buffer,
+ pf->timestamp);
+ }
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index a6f9fba3e03f..b01988170118 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -319,20 +319,6 @@ static const struct iio_info adis16460_info = {
.debugfs_reg_access = adis_debugfs_reg_access,
};
-static int adis16460_enable_irq(struct adis *adis, bool enable)
-{
- /*
- * There is no way to gate the data-ready signal internally inside the
- * ADIS16460 :(
- */
- if (enable)
- enable_irq(adis->spi->irq);
- else
- disable_irq(adis->spi->irq);
-
- return 0;
-}
-
#define ADIS16460_DIAG_STAT_IN_CLK_OOS 7
#define ADIS16460_DIAG_STAT_FLASH_MEM 6
#define ADIS16460_DIAG_STAT_SELF_TEST 5
@@ -373,7 +359,7 @@ static const struct adis_data adis16460_data = {
BIT(ADIS16460_DIAG_STAT_OVERRANGE) |
BIT(ADIS16460_DIAG_STAT_SPI_COMM) |
BIT(ADIS16460_DIAG_STAT_FLASH_UPT),
- .enable_irq = adis16460_enable_irq,
+ .unmasked_drdy = true,
.timeouts = &adis16460_timeouts,
};
@@ -400,8 +386,6 @@ static int adis16460_probe(struct spi_device *spi)
if (ret)
return ret;
- /* We cannot mask the interrupt, so ensure it isn't auto enabled */
- st->adis.irq_flag |= IRQF_NO_AUTOEN;
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
if (ret)
return ret;
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 287fff39a927..ea91d127077d 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -607,20 +607,6 @@ static const char * const adis16475_status_error_msgs[] = {
[ADIS16475_DIAG_STAT_CLK] = "Clock error",
};
-static int adis16475_enable_irq(struct adis *adis, bool enable)
-{
- /*
- * There is no way to gate the data-ready signal internally inside the
- * ADIS16475. We can only control it's polarity...
- */
- if (enable)
- enable_irq(adis->spi->irq);
- else
- disable_irq(adis->spi->irq);
-
- return 0;
-}
-
#define ADIS16475_DATA(_prod_id, _timeouts) \
{ \
.msc_ctrl_reg = ADIS16475_REG_MSG_CTRL, \
@@ -641,7 +627,7 @@ static int adis16475_enable_irq(struct adis *adis, bool enable)
BIT(ADIS16475_DIAG_STAT_SENSOR) | \
BIT(ADIS16475_DIAG_STAT_MEMORY) | \
BIT(ADIS16475_DIAG_STAT_CLK), \
- .enable_irq = adis16475_enable_irq, \
+ .unmasked_drdy = true, \
.timeouts = (_timeouts), \
.burst_reg_cmd = ADIS16475_REG_GLOB_CMD, \
.burst_len = ADIS16475_BURST_MAX_DATA, \
@@ -1255,9 +1241,6 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
return -EINVAL;
}
- /* We cannot mask the interrupt so ensure it's not enabled at request */
- st->adis.irq_flag |= IRQF_NO_AUTOEN;
-
val = ADIS16475_MSG_CTRL_DR_POL(polarity);
ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
ADIS16475_MSG_CTRL_DR_POL_MASK, val);
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 48eedc29b28a..c461bd1e8e69 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -30,6 +30,10 @@ static const struct iio_trigger_ops adis_trigger_ops = {
static int adis_validate_irq_flag(struct adis *adis)
{
unsigned long direction = adis->irq_flag & IRQF_TRIGGER_MASK;
+
+ /* We cannot mask the interrupt so ensure it's not enabled at request */
+ if (adis->data->unmasked_drdy)
+ adis->irq_flag |= IRQF_NO_AUTOEN;
/*
* Typically this devices have data ready either on the rising edge or
* on the falling edge of the data ready pin. This checks enforces that
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 95f16951c8f4..3ef17e3f50e2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -249,7 +249,7 @@ static const struct of_device_id inv_of_match[] = {
};
MODULE_DEVICE_TABLE(of, inv_of_match);
-static const struct acpi_device_id inv_acpi_match[] = {
+static const struct acpi_device_id __maybe_unused inv_acpi_match[] = {
{"INVN6500", INV_MPU6500},
{ },
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
index f282e9cc34c5..6aee6c989485 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -261,6 +261,7 @@ int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate)
*/
int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
{
+ struct device *dev = regmap_get_device(st->map);
const char *orient;
char *str;
int i;
@@ -279,22 +280,27 @@ int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
st->magn_orient.rotation[4] = st->orientation.rotation[1];
st->magn_orient.rotation[5] = st->orientation.rotation[2];
/* z <- -z */
- for (i = 0; i < 3; ++i) {
- orient = st->orientation.rotation[6 + i];
- /* use length + 2 for adding minus sign if needed */
- str = devm_kzalloc(regmap_get_device(st->map),
- strlen(orient) + 2, GFP_KERNEL);
- if (str == NULL)
+ for (i = 6; i < 9; ++i) {
+ orient = st->orientation.rotation[i];
+
+ /*
+ * The value is negated according to one of the following
+ * rules:
+ *
+ * 1) Drop leading minus.
+ * 2) Leave 0 as is.
+ * 3) Add leading minus.
+ */
+ if (orient[0] == '-')
+ str = devm_kstrdup(dev, orient + 1, GFP_KERNEL);
+ else if (!strcmp(orient, "0"))
+ str = devm_kstrdup(dev, orient, GFP_KERNEL);
+ else
+ str = devm_kasprintf(dev, GFP_KERNEL, "-%s", orient);
+ if (!str)
return -ENOMEM;
- if (strcmp(orient, "0") == 0) {
- strcpy(str, orient);
- } else if (orient[0] == '-') {
- strcpy(str, &orient[1]);
- } else {
- str[0] = '-';
- strcpy(&str[1], orient);
- }
- st->magn_orient.rotation[6 + i] = str;
+
+ st->magn_orient.rotation[i] = str;
}
break;
default:
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 5ef55763a6cc..6ac4eac36458 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -143,6 +143,7 @@ struct st_lsm6dsx_fs_table_entry {
* @read_fifo: Read FIFO callback.
* @fifo_th: FIFO threshold register info (addr + mask).
* @fifo_diff: FIFO diff status register info (addr + mask).
+ * @max_size: Sensor max fifo length in FIFO words.
* @th_wl: FIFO threshold word length.
*/
struct st_lsm6dsx_fifo_ops {
@@ -156,6 +157,7 @@ struct st_lsm6dsx_fifo_ops {
u8 addr;
u16 mask;
} fifo_diff;
+ u16 max_size;
u8 th_wl;
};
@@ -271,7 +273,6 @@ struct st_lsm6dsx_ext_dev_settings {
* @reset: register address for reset.
* @boot: register address for boot.
* @bdu: register address for Block Data Update.
- * @max_fifo_size: Sensor max fifo length in FIFO words.
* @id: List of hw id/device name supported by the driver configuration.
* @channels: IIO channels supported by the device.
* @irq_config: interrupts related registers.
@@ -288,7 +289,6 @@ struct st_lsm6dsx_settings {
struct st_lsm6dsx_reg reset;
struct st_lsm6dsx_reg boot;
struct st_lsm6dsx_reg bdu;
- u16 max_fifo_size;
struct {
enum st_lsm6dsx_hw_id hw_id;
const char *name;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index db45f1fc0b81..f2cbbc756459 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -102,7 +102,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x22,
.mask = BIT(6),
},
- .max_fifo_size = 32,
.id = {
{
.hw_id = ST_LSM9DS1_ID,
@@ -194,6 +193,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = BIT(4),
},
},
+ .fifo_ops = {
+ .max_size = 32,
+ },
},
{
.reset = {
@@ -208,7 +210,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x12,
.mask = BIT(6),
},
- .max_fifo_size = 1365,
.id = {
{
.hw_id = ST_LSM6DS3_ID,
@@ -329,6 +330,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x3a,
.mask = GENMASK(11, 0),
},
+ .max_size = 1365,
.th_wl = 3, /* 1LSB = 2B */
},
.ts_settings = {
@@ -374,7 +376,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x12,
.mask = BIT(6),
},
- .max_fifo_size = 682,
.id = {
{
.hw_id = ST_LSM6DS3H_ID,
@@ -495,6 +496,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x3a,
.mask = GENMASK(11, 0),
},
+ .max_size = 682,
.th_wl = 3, /* 1LSB = 2B */
},
.ts_settings = {
@@ -540,7 +542,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x12,
.mask = BIT(6),
},
- .max_fifo_size = 682,
.id = {
{
.hw_id = ST_LSM6DSL_ID,
@@ -677,6 +678,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x3a,
.mask = GENMASK(10, 0),
},
+ .max_size = 682,
.th_wl = 3, /* 1LSB = 2B */
},
.ts_settings = {
@@ -759,7 +761,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x12,
.mask = BIT(6),
},
- .max_fifo_size = 512,
.id = {
{
.hw_id = ST_LSM6DSR_ID,
@@ -910,6 +911,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x3a,
.mask = GENMASK(9, 0),
},
+ .max_size = 512,
.th_wl = 1,
},
.ts_settings = {
@@ -984,7 +986,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x12,
.mask = BIT(6),
},
- .max_fifo_size = 512,
.id = {
{
.hw_id = ST_ASM330LHH_ID,
@@ -1119,6 +1120,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x3a,
.mask = GENMASK(9, 0),
},
+ .max_size = 512,
.th_wl = 1,
},
.ts_settings = {
@@ -1279,6 +1281,8 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
int err;
switch (sensor->id) {
+ case ST_LSM6DSX_ID_GYRO:
+ break;
case ST_LSM6DSX_ID_EXT0:
case ST_LSM6DSX_ID_EXT1:
case ST_LSM6DSX_ID_EXT2:
@@ -1304,8 +1308,8 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
}
break;
}
- default:
- break;
+ default: /* should never occur */
+ return -EINVAL;
}
if (req_odr > 0) {
@@ -1603,7 +1607,7 @@ int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
struct st_lsm6dsx_hw *hw = sensor->hw;
int err;
- if (val < 1 || val > hw->settings->max_fifo_size)
+ if (val < 1 || val > hw->settings->fifo_ops.max_size)
return -EINVAL;
mutex_lock(&hw->conf_lock);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
index 146393afd9a7..76678cdefb07 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
@@ -18,6 +18,5 @@ struct st_lsm9ds0 {
};
int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap);
-int st_lsm9ds0_remove(struct st_lsm9ds0 *lsm9ds0);
#endif /* ST_LSM9DS0_H */
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
index 5e6625140db7..9fb06b7cde3c 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
@@ -24,10 +24,10 @@ static int st_lsm9ds0_power_enable(struct device *dev, struct st_lsm9ds0 *lsm9ds
/* Regulators not mandatory, but if requested we should enable them. */
lsm9ds0->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(lsm9ds0->vdd)) {
- dev_err(dev, "unable to get Vdd supply\n");
- return PTR_ERR(lsm9ds0->vdd);
- }
+ if (IS_ERR(lsm9ds0->vdd))
+ return dev_err_probe(dev, PTR_ERR(lsm9ds0->vdd),
+ "unable to get Vdd supply\n");
+
ret = regulator_enable(lsm9ds0->vdd);
if (ret) {
dev_warn(dev, "Failed to enable specified Vdd supply\n");
@@ -36,9 +36,9 @@ static int st_lsm9ds0_power_enable(struct device *dev, struct st_lsm9ds0 *lsm9ds
lsm9ds0->vdd_io = devm_regulator_get(dev, "vddio");
if (IS_ERR(lsm9ds0->vdd_io)) {
- dev_err(dev, "unable to get Vdd_IO supply\n");
regulator_disable(lsm9ds0->vdd);
- return PTR_ERR(lsm9ds0->vdd_io);
+ return dev_err_probe(dev, PTR_ERR(lsm9ds0->vdd_io),
+ "unable to get Vdd_IO supply\n");
}
ret = regulator_enable(lsm9ds0->vdd_io);
if (ret) {
@@ -90,7 +90,6 @@ static int st_lsm9ds0_probe_accel(struct st_lsm9ds0 *lsm9ds0, struct regmap *reg
data = iio_priv(lsm9ds0->accel);
data->sensor_settings = (struct st_sensor_settings *)settings;
- data->dev = dev;
data->irq = lsm9ds0->irq;
data->regmap = regmap;
data->vdd = lsm9ds0->vdd;
@@ -119,7 +118,6 @@ static int st_lsm9ds0_probe_magn(struct st_lsm9ds0 *lsm9ds0, struct regmap *regm
data = iio_priv(lsm9ds0->magn);
data->sensor_settings = (struct st_sensor_settings *)settings;
- data->dev = dev;
data->irq = lsm9ds0->irq;
data->regmap = regmap;
data->vdd = lsm9ds0->vdd;
@@ -142,23 +140,10 @@ int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap)
return ret;
/* Setup magnetometer device */
- ret = st_lsm9ds0_probe_magn(lsm9ds0, regmap);
- if (ret)
- st_accel_common_remove(lsm9ds0->accel);
-
- return ret;
+ return st_lsm9ds0_probe_magn(lsm9ds0, regmap);
}
EXPORT_SYMBOL_GPL(st_lsm9ds0_probe);
-int st_lsm9ds0_remove(struct st_lsm9ds0 *lsm9ds0)
-{
- st_magn_common_remove(lsm9ds0->magn);
- st_accel_common_remove(lsm9ds0->accel);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(st_lsm9ds0_remove);
-
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_DESCRIPTION("STMicroelectronics LSM9DS0 IMU core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index 78bede358747..8f205c477e6f 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -64,18 +64,12 @@ static int st_lsm9ds0_i2c_probe(struct i2c_client *client)
return st_lsm9ds0_probe(lsm9ds0, regmap);
}
-static int st_lsm9ds0_i2c_remove(struct i2c_client *client)
-{
- return st_lsm9ds0_remove(i2c_get_clientdata(client));
-}
-
static struct i2c_driver st_lsm9ds0_driver = {
.driver = {
.name = "st-lsm9ds0-i2c",
.of_match_table = st_lsm9ds0_of_match,
},
.probe_new = st_lsm9ds0_i2c_probe,
- .remove = st_lsm9ds0_i2c_remove,
.id_table = st_lsm9ds0_id_table,
};
module_i2c_driver(st_lsm9ds0_driver);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index 180b54e66438..0ddfa53166af 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -63,18 +63,12 @@ static int st_lsm9ds0_spi_probe(struct spi_device *spi)
return st_lsm9ds0_probe(lsm9ds0, regmap);
}
-static int st_lsm9ds0_spi_remove(struct spi_device *spi)
-{
- return st_lsm9ds0_remove(spi_get_drvdata(spi));
-}
-
static struct spi_driver st_lsm9ds0_driver = {
.driver = {
.name = "st-lsm9ds0-spi",
.of_match_table = st_lsm9ds0_of_match,
},
.probe = st_lsm9ds0_spi_probe,
- .remove = st_lsm9ds0_spi_remove,
.id_table = st_lsm9ds0_id_table,
};
module_spi_driver(st_lsm9ds0_driver);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a95cc2da56be..e180728914c0 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -120,6 +120,9 @@ static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
if (!rb || !rb->access->read)
return -EINVAL;
+ if (rb->direction != IIO_BUFFER_DIRECTION_IN)
+ return -EPERM;
+
datum_size = rb->bytes_per_datum;
/*
@@ -161,6 +164,65 @@ static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
return ret;
}
+static size_t iio_buffer_space_available(struct iio_buffer *buf)
+{
+ if (buf->access->space_available)
+ return buf->access->space_available(buf);
+
+ return SIZE_MAX;
+}
+
+static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ struct iio_buffer *rb = ib->buffer;
+ struct iio_dev *indio_dev = ib->indio_dev;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+ size_t written;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+
+ if (!rb || !rb->access->write)
+ return -EINVAL;
+
+ if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
+ return -EPERM;
+
+ written = 0;
+ add_wait_queue(&rb->pollq, &wait);
+ do {
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+
+ if (!iio_buffer_space_available(rb)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
+ }
+
+ ret = rb->access->write(rb, n - written, buf + written);
+ if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+ ret = -EAGAIN;
+
+ if (ret > 0) {
+ written += ret;
+ if (written != n && !(filp->f_flags & O_NONBLOCK))
+ continue;
+ }
+ } while (ret == 0);
+ remove_wait_queue(&rb->pollq, &wait);
+
+ return ret < 0 ? ret : n;
+}
+
/**
* iio_buffer_poll() - poll the buffer to find out if it has data
* @filp: File structure pointer for device access
@@ -181,8 +243,18 @@ static __poll_t iio_buffer_poll(struct file *filp,
return 0;
poll_wait(filp, &rb->pollq, wait);
- if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
- return EPOLLIN | EPOLLRDNORM;
+
+ switch (rb->direction) {
+ case IIO_BUFFER_DIRECTION_IN:
+ if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
+ return EPOLLIN | EPOLLRDNORM;
+ break;
+ case IIO_BUFFER_DIRECTION_OUT:
+ if (iio_buffer_space_available(rb))
+ return EPOLLOUT | EPOLLWRNORM;
+ break;
+ }
+
return 0;
}
@@ -199,6 +271,19 @@ ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
return iio_buffer_read(filp, buf, n, f_ps);
}
+ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ struct iio_buffer *rb = ib->buffer;
+
+ /* check if buffer was opened through new API */
+ if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
+ return -EBUSY;
+
+ return iio_buffer_write(filp, buf, n, f_ps);
+}
+
__poll_t iio_buffer_poll_wrapper(struct file *filp,
struct poll_table_struct *wait)
{
@@ -231,6 +316,15 @@ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
}
}
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
+{
+ if (!buffer || !buffer->access || !buffer->access->remove_from)
+ return -EINVAL;
+
+ return buffer->access->remove_from(buffer, data);
+}
+EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
+
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
@@ -1156,6 +1250,10 @@ int iio_update_buffers(struct iio_dev *indio_dev,
if (insert_buffer == remove_buffer)
return 0;
+ if (insert_buffer &&
+ (insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
+ return -EINVAL;
+
mutex_lock(&iio_dev_opaque->info_exist_lock);
mutex_lock(&indio_dev->mlock);
@@ -1277,6 +1375,22 @@ static ssize_t iio_dma_show_data_available(struct device *dev,
return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
}
+static ssize_t direction_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
+
+ switch (buffer->direction) {
+ case IIO_BUFFER_DIRECTION_IN:
+ return sprintf(buf, "in\n");
+ case IIO_BUFFER_DIRECTION_OUT:
+ return sprintf(buf, "out\n");
+ default:
+ return -EINVAL;
+ }
+}
+
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
iio_buffer_write_length);
static struct device_attribute dev_attr_length_ro = __ATTR(length,
@@ -1289,12 +1403,20 @@ static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
S_IRUGO, iio_buffer_show_watermark, NULL);
static DEVICE_ATTR(data_available, S_IRUGO,
iio_dma_show_data_available, NULL);
+static DEVICE_ATTR_RO(direction);
+/*
+ * When adding new attributes here, put the at the end, at least until
+ * the code that handles the length/length_ro & watermark/watermark_ro
+ * assignments gets cleaned up. Otherwise these can create some weird
+ * duplicate attributes errors under some setups.
+ */
static struct attribute *iio_buffer_attrs[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
&dev_attr_watermark.attr,
&dev_attr_data_available.attr,
+ &dev_attr_direction.attr,
};
#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
@@ -1312,6 +1434,11 @@ static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
iio_attr->buffer = buffer;
memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
+ if (!iio_attr->dev_attr.attr.name) {
+ kfree(iio_attr);
+ return NULL;
+ }
+
sysfs_attr_init(&iio_attr->dev_attr.attr);
list_add(&iio_attr->l, &buffer->buffer_attr_list);
@@ -1362,10 +1489,10 @@ static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
return 0;
-error_free_buffer_attrs:
- kfree(iio_dev_opaque->legacy_buffer_group.attrs);
error_free_scan_el_attrs:
kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
+error_free_buffer_attrs:
+ kfree(iio_dev_opaque->legacy_buffer_group.attrs);
return ret;
}
@@ -1397,6 +1524,7 @@ static const struct file_operations iio_buffer_chrdev_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.read = iio_buffer_read,
+ .write = iio_buffer_write,
.poll = iio_buffer_poll,
.release = iio_buffer_chrdev_release,
};
@@ -1531,6 +1659,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
sizeof(struct attribute *) * buffer_attrcount);
buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
+ buffer->buffer_group.attrs = attr;
for (i = 0; i < buffer_attrcount; i++) {
struct attribute *wrapped;
@@ -1538,7 +1667,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
if (!wrapped) {
ret = -ENOMEM;
- goto error_free_scan_mask;
+ goto error_free_buffer_attrs;
}
attr[i] = wrapped;
}
@@ -1553,8 +1682,6 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
goto error_free_buffer_attrs;
}
- buffer->buffer_group.attrs = attr;
-
ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
if (ret)
goto error_free_buffer_attr_group_name;
@@ -1583,8 +1710,12 @@ error_cleanup_dynamic:
return ret;
}
-static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
+static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
+ struct iio_dev *indio_dev,
+ int index)
{
+ if (index == 0)
+ iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
bitmap_free(buffer->scan_mask);
kfree(buffer->buffer_group.name);
kfree(buffer->buffer_group.attrs);
@@ -1616,7 +1747,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
buffer = iio_dev_opaque->attached_buffers[i];
ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
if (ret) {
- unwind_idx = i;
+ unwind_idx = i - 1;
goto error_unwind_sysfs_and_mask;
}
}
@@ -1638,7 +1769,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
error_unwind_sysfs_and_mask:
for (; unwind_idx >= 0; unwind_idx--) {
buffer = iio_dev_opaque->attached_buffers[unwind_idx];
- __iio_buffer_free_sysfs_and_mask(buffer);
+ __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, unwind_idx);
}
return ret;
}
@@ -1655,11 +1786,9 @@ void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
kfree(iio_dev_opaque->buffer_ioctl_handler);
- iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
-
for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
buffer = iio_dev_opaque->attached_buffers[i];
- __iio_buffer_free_sysfs_and_mask(buffer);
+ __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
}
}
@@ -1732,6 +1861,52 @@ int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
EXPORT_SYMBOL_GPL(iio_push_to_buffers);
/**
+ * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
+ * no alignment or space requirements.
+ * @indio_dev: iio_dev structure for device.
+ * @data: channel data excluding the timestamp.
+ * @data_sz: size of data.
+ * @timestamp: timestamp for the sample data.
+ *
+ * This special variant of iio_push_to_buffers_with_timestamp() does
+ * not require space for the timestamp, or 8 byte alignment of data.
+ * It does however require an allocation on first call and additional
+ * copies on all calls, so should be avoided if possible.
+ */
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data,
+ size_t data_sz,
+ int64_t timestamp)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ /*
+ * Conservative estimate - we can always safely copy the minimum
+ * of either the data provided or the length of the destination buffer.
+ * This relaxed limit allows the calling drivers to be lax about
+ * tracking the size of the data they are pushing, at the cost of
+ * unnecessary copying of padding.
+ */
+ data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
+ if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) {
+ void *bb;
+
+ bb = devm_krealloc(&indio_dev->dev,
+ iio_dev_opaque->bounce_buffer,
+ indio_dev->scan_bytes, GFP_KERNEL);
+ if (!bb)
+ return -ENOMEM;
+ iio_dev_opaque->bounce_buffer = bb;
+ iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
+ }
+ memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
+ return iio_push_to_buffers_with_timestamp(indio_dev,
+ iio_dev_opaque->bounce_buffer,
+ timestamp);
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
+
+/**
* iio_buffer_release() - Free a buffer's resources
* @ref: Pointer to the kref embedded in the iio_buffer struct
*
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 2dbb37e09b8c..463a63d5bf56 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1600,6 +1600,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
kfree(iio_dev_opaque->chan_attr_group.attrs);
iio_dev_opaque->chan_attr_group.attrs = NULL;
kfree(iio_dev_opaque->groups);
+ iio_dev_opaque->groups = NULL;
}
static void iio_dev_release(struct device *device)
@@ -1664,7 +1665,13 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
kfree(iio_dev_opaque);
return NULL;
}
- dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id);
+
+ if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
+ ida_simple_remove(&iio_ida, iio_dev_opaque->id);
+ kfree(iio_dev_opaque);
+ return NULL;
+ }
+
INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
@@ -1822,6 +1829,7 @@ static const struct file_operations iio_buffer_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.read = iio_buffer_read_outer_addr,
+ .write = iio_buffer_write_outer_addr,
.poll = iio_buffer_poll_addr,
.unlocked_ioctl = iio_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 391a3380a1d1..0222885b334c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -85,6 +85,23 @@ int iio_map_array_unregister(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(iio_map_array_unregister);
+static void iio_map_array_unregister_cb(void *indio_dev)
+{
+ iio_map_array_unregister(indio_dev);
+}
+
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
+{
+ int ret;
+
+ ret = iio_map_array_register(indio_dev, maps);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
+}
+EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
+
static const struct iio_chan_spec
*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
{
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 4c83953672be..3e7fb16ab1f6 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -159,6 +159,7 @@ static int cm3605_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
enum iio_chan_type ch_type;
u32 rset;
+ int irq;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*cm3605));
@@ -195,12 +196,9 @@ static int cm3605_probe(struct platform_device *pdev)
cm3605->aout = devm_iio_channel_get(dev, "aout");
if (IS_ERR(cm3605->aout)) {
- if (PTR_ERR(cm3605->aout) == -ENODEV) {
- dev_err(dev, "no ADC, deferring...\n");
- return -EPROBE_DEFER;
- }
- dev_err(dev, "failed to get AOUT ADC channel\n");
- return PTR_ERR(cm3605->aout);
+ ret = PTR_ERR(cm3605->aout);
+ ret = (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+ return dev_err_probe(dev, ret, "failed to get AOUT ADC channel\n");
}
ret = iio_get_channel_type(cm3605->aout, &ch_type);
if (ret < 0)
@@ -211,10 +209,10 @@ static int cm3605_probe(struct platform_device *pdev)
}
cm3605->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(cm3605->vdd)) {
- dev_err(dev, "failed to get VDD regulator\n");
- return PTR_ERR(cm3605->vdd);
- }
+ if (IS_ERR(cm3605->vdd))
+ return dev_err_probe(dev, PTR_ERR(cm3605->vdd),
+ "failed to get VDD regulator\n");
+
ret = regulator_enable(cm3605->vdd);
if (ret) {
dev_err(dev, "failed to enable VDD regulator\n");
@@ -223,13 +221,16 @@ static int cm3605_probe(struct platform_device *pdev)
cm3605->aset = devm_gpiod_get(dev, "aset", GPIOD_OUT_HIGH);
if (IS_ERR(cm3605->aset)) {
- dev_err(dev, "no ASET GPIO\n");
- ret = PTR_ERR(cm3605->aset);
+ ret = dev_err_probe(dev, PTR_ERR(cm3605->aset), "no ASET GPIO\n");
goto out_disable_vdd;
}
- ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
- cm3605_prox_irq, NULL, 0, "cm3605", indio_dev);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "failed to get irq\n");
+
+ ret = devm_request_threaded_irq(dev, irq, cm3605_prox_irq,
+ NULL, 0, "cm3605", indio_dev);
if (ret) {
dev_err(dev, "unable to request IRQ\n");
goto out_disable_aset;
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index fd83a19929bc..89f5e48a6642 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -632,10 +632,9 @@ static int cm36651_probe(struct i2c_client *client,
cm36651 = iio_priv(indio_dev);
cm36651->vled_reg = devm_regulator_get(&client->dev, "vled");
- if (IS_ERR(cm36651->vled_reg)) {
- dev_err(&client->dev, "get regulator vled failed\n");
- return PTR_ERR(cm36651->vled_reg);
- }
+ if (IS_ERR(cm36651->vled_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(cm36651->vled_reg),
+ "get regulator vled failed\n");
ret = regulator_enable(cm36651->vled_reg);
if (ret) {
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index f960be7d4001..c6d1d88d3775 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -503,12 +503,9 @@ static int gp2ap002_probe(struct i2c_client *client,
if (!gp2ap002->is_gp2ap002s00f) {
gp2ap002->alsout = devm_iio_channel_get(dev, "alsout");
if (IS_ERR(gp2ap002->alsout)) {
- if (PTR_ERR(gp2ap002->alsout) == -ENODEV) {
- dev_err(dev, "no ADC, deferring...\n");
- return -EPROBE_DEFER;
- }
- dev_err(dev, "failed to get ALSOUT ADC channel\n");
- return PTR_ERR(gp2ap002->alsout);
+ ret = PTR_ERR(gp2ap002->alsout);
+ ret = (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+ return dev_err_probe(dev, ret, "failed to get ALSOUT ADC channel\n");
}
ret = iio_get_channel_type(gp2ap002->alsout, &ch_type);
if (ret < 0)
@@ -521,15 +518,14 @@ static int gp2ap002_probe(struct i2c_client *client,
}
gp2ap002->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(gp2ap002->vdd)) {
- dev_err(dev, "failed to get VDD regulator\n");
- return PTR_ERR(gp2ap002->vdd);
- }
+ if (IS_ERR(gp2ap002->vdd))
+ return dev_err_probe(dev, PTR_ERR(gp2ap002->vdd),
+ "failed to get VDD regulator\n");
+
gp2ap002->vio = devm_regulator_get(dev, "vio");
- if (IS_ERR(gp2ap002->vio)) {
- dev_err(dev, "failed to get VIO regulator\n");
- return PTR_ERR(gp2ap002->vio);
- }
+ if (IS_ERR(gp2ap002->vio))
+ return dev_err_probe(dev, PTR_ERR(gp2ap002->vio),
+ "failed to get VIO regulator\n");
/* Operating voltage 2.4V .. 3.6V according to datasheet */
ret = regulator_set_voltage(gp2ap002->vdd, 2400000, 3600000);
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 1830221da48d..7e51aaac0bf8 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/acpi.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/events.h>
@@ -151,6 +152,7 @@ struct ltr501_chip_info {
struct ltr501_data {
struct i2c_client *client;
+ struct regulator_bulk_data regulators[2];
struct mutex lock_als, lock_ps;
const struct ltr501_chip_info *chip_info;
u8 als_contr, ps_contr;
@@ -1379,6 +1381,13 @@ static const struct regmap_config ltr501_regmap_config = {
.volatile_reg = ltr501_is_volatile_reg,
};
+static void ltr501_disable_regulators(void *d)
+{
+ struct ltr501_data *data = d;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
static int ltr501_powerdown(struct ltr501_data *data)
{
return ltr501_write_contr(data, data->als_contr &
@@ -1423,6 +1432,25 @@ static int ltr501_probe(struct i2c_client *client,
mutex_init(&data->lock_als);
mutex_init(&data->lock_ps);
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ ltr501_disable_regulators, data);
+ if (ret)
+ return ret;
+
data->reg_it = devm_regmap_field_alloc(&client->dev, regmap,
reg_field_it);
if (IS_ERR(data->reg_it)) {
@@ -1581,9 +1609,18 @@ static const struct i2c_device_id ltr501_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltr501_id);
+static const struct of_device_id ltr501_of_match[] = {
+ { .compatible = "liteon,ltr501", },
+ { .compatible = "liteon,ltr559", },
+ { .compatible = "liteon,ltr301", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ltr501_of_match);
+
static struct i2c_driver ltr501_driver = {
.driver = {
.name = LTR501_DRV_NAME,
+ .of_match_table = ltr501_of_match,
.pm = &ltr501_pm_ops,
.acpi_match_table = ACPI_PTR(ltr_acpi_match),
},
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index b8e721bced5b..85689dffbcbf 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -540,7 +540,6 @@ static int max44000_probe(struct i2c_client *client,
return PTR_ERR(data->regmap);
}
- i2c_set_clientdata(client, indio_dev);
mutex_init(&data->lock);
indio_dev->info = &max44000_info;
indio_dev->name = MAX44000_DRV_NAME;
@@ -589,23 +588,14 @@ static int max44000_probe(struct i2c_client *client,
return ret;
}
- ret = iio_triggered_buffer_setup(indio_dev, NULL, max44000_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ max44000_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
return ret;
}
- return iio_device_register(indio_dev);
-}
-
-static int max44000_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id max44000_id[] = {
@@ -628,7 +618,6 @@ static struct i2c_driver max44000_driver = {
.acpi_match_table = ACPI_PTR(max44000_acpi_match),
},
.probe = max44000_probe,
- .remove = max44000_remove,
.id_table = max44000_id,
};
diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c
index a308fbc2fc7b..ee81fe083e4c 100644
--- a/drivers/iio/light/noa1305.c
+++ b/drivers/iio/light/noa1305.c
@@ -217,10 +217,9 @@ static int noa1305_probe(struct i2c_client *client,
priv = iio_priv(indio_dev);
priv->vin_reg = devm_regulator_get(&client->dev, "vin");
- if (IS_ERR(priv->vin_reg)) {
- dev_err(&client->dev, "get regulator vin failed\n");
- return PTR_ERR(priv->vin_reg);
- }
+ if (IS_ERR(priv->vin_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(priv->vin_reg),
+ "get regulator vin failed\n");
ret = regulator_enable(priv->vin_reg);
if (ret) {
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 74ad5701c6c2..565ee41ccb3a 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -28,7 +28,7 @@ config AK8975
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Asahi Kasei AK8975, AK8963,
- AK09911 or AK09912 3-Axis Magnetometer.
+ AK09911, AK09912 or AK09916 3-Axis Magnetometer.
To compile this driver as a module, choose M here: the module
will be called ak8975.
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 42b8a2680e3a..6e82dc54a417 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -78,6 +78,7 @@
*/
#define AK09912_REG_WIA1 0x00
#define AK09912_REG_WIA2 0x01
+#define AK09916_DEVICE_ID 0x09
#define AK09912_DEVICE_ID 0x04
#define AK09911_DEVICE_ID 0x05
@@ -208,6 +209,7 @@ enum asahi_compass_chipset {
AK8963,
AK09911,
AK09912,
+ AK09916,
};
enum ak_ctrl_reg_addr {
@@ -345,6 +347,31 @@ static const struct ak_def ak_def_array[] = {
AK09912_REG_HXL,
AK09912_REG_HYL,
AK09912_REG_HZL},
+ },
+ {
+ .type = AK09916,
+ .raw_to_gauss = ak09912_raw_to_gauss,
+ .range = 32752,
+ .ctrl_regs = {
+ AK09912_REG_ST1,
+ AK09912_REG_ST2,
+ AK09912_REG_CNTL2,
+ AK09912_REG_ASAX,
+ AK09912_MAX_REGS},
+ .ctrl_masks = {
+ AK09912_REG_ST1_DRDY_MASK,
+ AK09912_REG_ST2_HOFL_MASK,
+ 0,
+ AK09912_REG_CNTL2_MODE_MASK},
+ .ctrl_modes = {
+ AK09912_REG_CNTL_MODE_POWER_DOWN,
+ AK09912_REG_CNTL_MODE_ONCE,
+ AK09912_REG_CNTL_MODE_SELF_TEST,
+ AK09912_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK09912_REG_HXL,
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
}
};
@@ -425,6 +452,7 @@ static int ak8975_who_i_am(struct i2c_client *client,
/*
* Signature for each device:
* Device | WIA1 | WIA2
+ * AK09916 | DEVICE_ID_| AK09916_DEVICE_ID
* AK09912 | DEVICE_ID | AK09912_DEVICE_ID
* AK09911 | DEVICE_ID | AK09911_DEVICE_ID
* AK8975 | DEVICE_ID | NA
@@ -452,6 +480,10 @@ static int ak8975_who_i_am(struct i2c_client *client,
if (wia_val[1] == AK09912_DEVICE_ID)
return 0;
break;
+ case AK09916:
+ if (wia_val[1] == AK09916_DEVICE_ID)
+ return 0;
+ break;
default:
dev_err(&client->dev, "Type %d unknown\n", type);
}
@@ -1057,6 +1089,7 @@ static const struct i2c_device_id ak8975_id[] = {
{"AK8963", AK8963},
{"ak09911", AK09911},
{"ak09912", AK09912},
+ {"ak09916", AK09916},
{}
};
@@ -1071,6 +1104,8 @@ static const struct of_device_id ak8975_of_match[] = {
{ .compatible = "ak09911", },
{ .compatible = "asahi-kasei,ak09912", },
{ .compatible = "ak09912", },
+ { .compatible = "asahi-kasei,ak09916", },
+ { .compatible = "ak09916", },
{}
};
MODULE_DEVICE_TABLE(of, ak8975_of_match);
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 242f742f2643..9120c8bbf3dd 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -50,7 +50,7 @@ struct hmc5843_data {
int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
enum hmc5843_ids id, const char *name);
-int hmc5843_common_remove(struct device *dev);
+void hmc5843_common_remove(struct device *dev);
int hmc5843_common_suspend(struct device *dev);
int hmc5843_common_resume(struct device *dev);
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index cf62057480cf..f08726bf5ec3 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -671,7 +671,7 @@ buffer_setup_err:
}
EXPORT_SYMBOL(hmc5843_common_probe);
-int hmc5843_common_remove(struct device *dev)
+void hmc5843_common_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -680,8 +680,6 @@ int hmc5843_common_remove(struct device *dev)
/* sleep mode to save power */
hmc5843_set_mode(iio_priv(indio_dev), HMC5843_MODE_SLEEP);
-
- return 0;
}
EXPORT_SYMBOL(hmc5843_common_remove);
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 67fe657fdb3e..bc6e12f1d521 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -67,7 +67,9 @@ static int hmc5843_i2c_probe(struct i2c_client *cli,
static int hmc5843_i2c_remove(struct i2c_client *client)
{
- return hmc5843_common_remove(&client->dev);
+ hmc5843_common_remove(&client->dev);
+
+ return 0;
}
static const struct i2c_device_id hmc5843_id[] = {
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index d827554c346e..89cf59a62c28 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -76,7 +76,9 @@ static int hmc5843_spi_probe(struct spi_device *spi)
static int hmc5843_spi_remove(struct spi_device *spi)
{
- return hmc5843_common_remove(&spi->dev);
+ hmc5843_common_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id hmc5843_id[] = {
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 9ffd50d796bf..0806a1e65ce4 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -611,7 +611,8 @@ EXPORT_SYMBOL(st_magn_get_settings);
int st_magn_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata = dev_get_platdata(mdata->dev);
+ struct device *parent = indio_dev->dev.parent;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(parent);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -625,7 +626,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- err = iio_read_mount_matrix(mdata->dev, &mdata->mount_matrix);
+ err = iio_read_mount_matrix(parent, &mdata->mount_matrix);
if (err)
return err;
@@ -650,32 +651,10 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
return err;
}
- err = iio_device_register(indio_dev);
- if (err)
- goto st_magn_device_register_error;
-
- dev_info(&indio_dev->dev, "registered magnetometer %s\n",
- indio_dev->name);
-
- return 0;
-
-st_magn_device_register_error:
- if (mdata->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
- return err;
+ return devm_iio_device_register(parent, indio_dev);
}
EXPORT_SYMBOL(st_magn_common_probe);
-void st_magn_common_remove(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *mdata = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (mdata->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
-}
-EXPORT_SYMBOL(st_magn_common_remove);
-
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics magnetometers driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 2dfe4ee99591..7237711fc09b 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -86,27 +86,7 @@ static int st_magn_i2c_probe(struct i2c_client *client,
if (err)
return err;
- err = st_magn_common_probe(indio_dev);
- if (err < 0)
- goto st_magn_power_off;
-
- return 0;
-
-st_magn_power_off:
- st_sensors_power_disable(indio_dev);
-
- return err;
-}
-
-static int st_magn_i2c_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- st_sensors_power_disable(indio_dev);
-
- st_magn_common_remove(indio_dev);
-
- return 0;
+ return st_magn_common_probe(indio_dev);
}
static const struct i2c_device_id st_magn_id_table[] = {
@@ -128,7 +108,6 @@ static struct i2c_driver st_magn_driver = {
.of_match_table = st_magn_of_match,
},
.probe = st_magn_i2c_probe,
- .remove = st_magn_i2c_remove,
.id_table = st_magn_id_table,
};
module_i2c_driver(st_magn_driver);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index fba978796395..489d4462862f 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -80,27 +80,7 @@ static int st_magn_spi_probe(struct spi_device *spi)
if (err)
return err;
- err = st_magn_common_probe(indio_dev);
- if (err < 0)
- goto st_magn_power_off;
-
- return 0;
-
-st_magn_power_off:
- st_sensors_power_disable(indio_dev);
-
- return err;
-}
-
-static int st_magn_spi_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- st_sensors_power_disable(indio_dev);
-
- st_magn_common_remove(indio_dev);
-
- return 0;
+ return st_magn_common_probe(indio_dev);
}
static const struct spi_device_id st_magn_id_table[] = {
@@ -119,7 +99,6 @@ static struct spi_driver st_magn_driver = {
.of_match_table = st_magn_of_match,
},
.probe = st_magn_spi_probe,
- .remove = st_magn_spi_remove,
.id_table = st_magn_id_table,
};
module_spi_driver(st_magn_driver);
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index d54ae5cbe51b..f422d44377df 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -33,6 +33,7 @@ struct mux {
struct iio_chan_spec *chan;
struct iio_chan_spec_ext_info *ext_info;
struct mux_child *child;
+ u32 delay_us;
};
static int iio_mux_select(struct mux *mux, int idx)
@@ -42,7 +43,8 @@ static int iio_mux_select(struct mux *mux, int idx)
int ret;
int i;
- ret = mux_control_select(mux->control, chan->channel);
+ ret = mux_control_select_delay(mux->control, chan->channel,
+ mux->delay_us);
if (ret < 0) {
mux->cached_state = -1;
return ret;
@@ -392,6 +394,9 @@ static int mux_probe(struct platform_device *pdev)
mux->parent = parent;
mux->cached_state = -1;
+ mux->delay_us = 0;
+ of_property_read_u32(np, "settle-time-us", &mux->delay_us);
+
indio_dev->name = dev_name(dev);
indio_dev->info = &mux_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 1c0d46a96200..007c2bd324cb 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -115,11 +115,16 @@ static int max5487_spi_probe(struct spi_device *spi)
static int max5487_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ int ret;
iio_device_unregister(indio_dev);
/* save both wiper regs to NV regs */
- return max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
+ ret = max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
+ if (ret)
+ dev_warn(&spi->dev, "Failed to save wiper regs to NV regs\n");
+
+ return 0;
}
static const struct spi_device_id max5487_id[] = {
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index bc06271fa38b..86b1c4b1820d 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -61,6 +61,6 @@ struct ms5611_state {
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
const char *name, int type);
-int ms5611_remove(struct iio_dev *indio_dev);
+void ms5611_remove(struct iio_dev *indio_dev);
#endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 214b0d25f598..ee75f08655c9 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -474,13 +474,11 @@ err_fini:
}
EXPORT_SYMBOL(ms5611_probe);
-int ms5611_remove(struct iio_dev *indio_dev)
+void ms5611_remove(struct iio_dev *indio_dev)
{
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
ms5611_fini(indio_dev);
-
- return 0;
}
EXPORT_SYMBOL(ms5611_remove);
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 7c04f730430c..5c82d80f85b6 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -110,7 +110,9 @@ static int ms5611_i2c_probe(struct i2c_client *client,
static int ms5611_i2c_remove(struct i2c_client *client)
{
- return ms5611_remove(i2c_get_clientdata(client));
+ ms5611_remove(i2c_get_clientdata(client));
+
+ return 0;
}
static const struct of_device_id ms5611_i2c_matches[] = {
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 45d3a7d5be8e..79bed64c9b68 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -112,7 +112,9 @@ static int ms5611_spi_probe(struct spi_device *spi)
static int ms5611_spi_remove(struct spi_device *spi)
{
- return ms5611_remove(spi_get_drvdata(spi));
+ ms5611_remove(spi_get_drvdata(spi));
+
+ return 0;
}
static const struct of_device_id ms5611_spi_matches[] = {
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index ab1c17fac807..26a1ee43d56e 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(st_press_get_settings);
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata = dev_get_platdata(press_data->dev);
+ struct device *parent = indio_dev->dev.parent;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(parent);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -721,32 +722,10 @@ int st_press_common_probe(struct iio_dev *indio_dev)
return err;
}
- err = iio_device_register(indio_dev);
- if (err)
- goto st_press_device_register_error;
-
- dev_info(&indio_dev->dev, "registered pressure sensor %s\n",
- indio_dev->name);
-
- return err;
-
-st_press_device_register_error:
- if (press_data->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
- return err;
+ return devm_iio_device_register(parent, indio_dev);
}
EXPORT_SYMBOL(st_press_common_probe);
-void st_press_common_remove(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *press_data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (press_data->irq > 0)
- st_sensors_deallocate_trigger(indio_dev);
-}
-EXPORT_SYMBOL(st_press_common_remove);
-
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics pressures driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 52fa98f24478..1939e999a427 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -103,27 +103,7 @@ static int st_press_i2c_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = st_press_common_probe(indio_dev);
- if (ret < 0)
- goto st_press_power_off;
-
- return 0;
-
-st_press_power_off:
- st_sensors_power_disable(indio_dev);
-
- return ret;
-}
-
-static int st_press_i2c_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- st_sensors_power_disable(indio_dev);
-
- st_press_common_remove(indio_dev);
-
- return 0;
+ return st_press_common_probe(indio_dev);
}
static struct i2c_driver st_press_driver = {
@@ -133,7 +113,6 @@ static struct i2c_driver st_press_driver = {
.acpi_match_table = ACPI_PTR(st_press_acpi_match),
},
.probe = st_press_i2c_probe,
- .remove = st_press_i2c_remove,
.id_table = st_press_id_table,
};
module_i2c_driver(st_press_driver);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index ee393df54cee..d6fc954e28f8 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -86,27 +86,7 @@ static int st_press_spi_probe(struct spi_device *spi)
if (err)
return err;
- err = st_press_common_probe(indio_dev);
- if (err < 0)
- goto st_press_power_off;
-
- return 0;
-
-st_press_power_off:
- st_sensors_power_disable(indio_dev);
-
- return err;
-}
-
-static int st_press_spi_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- st_sensors_power_disable(indio_dev);
-
- st_press_common_remove(indio_dev);
-
- return 0;
+ return st_press_common_probe(indio_dev);
}
static const struct spi_device_id st_press_id_table[] = {
@@ -117,6 +97,10 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS33HW_PRESS_DEV_NAME },
{ LPS35HW_PRESS_DEV_NAME },
{ LPS22HH_PRESS_DEV_NAME },
+ { "lps001wp-press" },
+ { "lps25h-press", },
+ { "lps331ap-press" },
+ { "lps22hb-press" },
{},
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
@@ -127,7 +111,6 @@ static struct spi_driver st_press_driver = {
.of_match_table = st_press_of_match,
},
.probe = st_press_spi_probe,
- .remove = st_press_spi_remove,
.id_table = st_press_id_table,
};
module_spi_driver(st_press_driver);
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index f20ae3c963cb..e8ed849e3b76 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -138,4 +138,14 @@ config MAX31856
This driver can also be built as a module. If so, the module
will be called max31856.
+config MAX31865
+ tristate "MAX31865 RTD to Digital converter"
+ depends on SPI
+ help
+ If you say yes here you get support for MAX31865
+ thermocouple sensor chip connected via SPI.
+
+ This driver can also be build as a module. If so, the module
+ will be called max31865.
+
endmenu
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index e3392c4b29b4..dd08e562ffe0 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_LTC2983) += ltc2983.o
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MAX31856) += max31856.o
+obj-$(CONFIG_MAX31865) += max31865.o
obj-$(CONFIG_MLX90614) += mlx90614.o
obj-$(CONFIG_MLX90632) += mlx90632.o
obj-$(CONFIG_TMP006) += tmp006.o
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 3b4a0e60e605..301c3f13fb26 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -1275,6 +1275,11 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
&st->filter_notch_freq);
st->num_channels = of_get_available_child_count(dev->of_node);
+ if (!st->num_channels) {
+ dev_err(&st->spi->dev, "At least one channel must be given!");
+ return -EINVAL;
+ }
+
st->sensors = devm_kcalloc(dev, st->num_channels, sizeof(*st->sensors),
GFP_KERNEL);
if (!st->sensors)
@@ -1470,6 +1475,7 @@ static int ltc2983_probe(struct spi_device *spi)
{
struct ltc2983_data *st;
struct iio_dev *indio_dev;
+ struct gpio_desc *gpio;
const char *name = spi_get_device_id(spi)->name;
int ret;
@@ -1494,6 +1500,16 @@ static int ltc2983_probe(struct spi_device *spi)
if (ret)
return ret;
+ gpio = devm_gpiod_get_optional(&st->spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ /* bring the device out of reset */
+ usleep_range(1000, 1200);
+ gpiod_set_value_cansleep(gpio, 0);
+ }
+
ret = ltc2983_setup(st, true);
if (ret)
return ret;
diff --git a/drivers/iio/temperature/max31865.c b/drivers/iio/temperature/max31865.c
new file mode 100644
index 000000000000..4c8d6e6cf677
--- /dev/null
+++ b/drivers/iio/temperature/max31865.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) Linumiz 2021
+ *
+ * max31865.c - Maxim MAX31865 RTD-to-Digital Converter sensor driver
+ *
+ * Author: Navin Sankar Velliangiri <navin@linumiz.com>
+ */
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
+
+/*
+ * The MSB of the register value determines whether the following byte will
+ * be written or read. If it is 0, read will follow and if it is 1, write
+ * will follow.
+ */
+#define MAX31865_RD_WR_BIT BIT(7)
+
+#define MAX31865_CFG_VBIAS BIT(7)
+#define MAX31865_CFG_1SHOT BIT(5)
+#define MAX31865_3WIRE_RTD BIT(4)
+#define MAX31865_FAULT_STATUS_CLEAR BIT(1)
+#define MAX31865_FILTER_50HZ BIT(0)
+
+/* The MAX31865 registers */
+#define MAX31865_CFG_REG 0x00
+#define MAX31865_RTD_MSB 0x01
+#define MAX31865_FAULT_STATUS 0x07
+
+#define MAX31865_FAULT_OVUV BIT(2)
+
+static const char max31865_show_samp_freq[] = "50 60";
+
+static const struct iio_chan_spec max31865_channels[] = {
+ { /* RTD Temperature */
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)
+ },
+};
+
+struct max31865_data {
+ struct spi_device *spi;
+ struct mutex lock;
+ bool filter_50hz;
+ bool three_wire;
+ u8 buf[2] ____cacheline_aligned;
+};
+
+static int max31865_read(struct max31865_data *data, u8 reg,
+ unsigned int read_size)
+{
+ return spi_write_then_read(data->spi, &reg, 1, data->buf, read_size);
+}
+
+static int max31865_write(struct max31865_data *data, size_t len)
+{
+ return spi_write(data->spi, data->buf, len);
+}
+
+static int enable_bias(struct max31865_data *data)
+{
+ u8 cfg;
+ int ret;
+
+ ret = max31865_read(data, MAX31865_CFG_REG, 1);
+ if (ret)
+ return ret;
+
+ cfg = data->buf[0];
+
+ data->buf[0] = MAX31865_CFG_REG | MAX31865_RD_WR_BIT;
+ data->buf[1] = cfg | MAX31865_CFG_VBIAS;
+
+ return max31865_write(data, 2);
+}
+
+static int disable_bias(struct max31865_data *data)
+{
+ u8 cfg;
+ int ret;
+
+ ret = max31865_read(data, MAX31865_CFG_REG, 1);
+ if (ret)
+ return ret;
+
+ cfg = data->buf[0];
+ cfg &= ~MAX31865_CFG_VBIAS;
+
+ data->buf[0] = MAX31865_CFG_REG | MAX31865_RD_WR_BIT;
+ data->buf[1] = cfg;
+
+ return max31865_write(data, 2);
+}
+
+static int max31865_rtd_read(struct max31865_data *data, int *val)
+{
+ u8 reg;
+ int ret;
+
+ /* Enable BIAS to start the conversion */
+ ret = enable_bias(data);
+ if (ret)
+ return ret;
+
+ /* wait 10.5ms before initiating the conversion */
+ msleep(11);
+
+ ret = max31865_read(data, MAX31865_CFG_REG, 1);
+ if (ret)
+ return ret;
+
+ reg = data->buf[0];
+ reg |= MAX31865_CFG_1SHOT | MAX31865_FAULT_STATUS_CLEAR;
+ data->buf[0] = MAX31865_CFG_REG | MAX31865_RD_WR_BIT;
+ data->buf[1] = reg;
+
+ ret = max31865_write(data, 2);
+ if (ret)
+ return ret;
+
+ if (data->filter_50hz) {
+ /* 50Hz filter mode requires 62.5ms to complete */
+ msleep(63);
+ } else {
+ /* 60Hz filter mode requires 52ms to complete */
+ msleep(52);
+ }
+
+ ret = max31865_read(data, MAX31865_RTD_MSB, 2);
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_be16(&data->buf) >> 1;
+
+ return disable_bias(data);
+}
+
+static int max31865_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max31865_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = max31865_rtd_read(data, val);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* Temp. Data resolution is 0.03125 degree centigrade */
+ *val = 31;
+ *val2 = 250000; /* 1000 * 0.03125 */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max31865_init(struct max31865_data *data)
+{
+ u8 cfg;
+ int ret;
+
+ ret = max31865_read(data, MAX31865_CFG_REG, 1);
+ if (ret)
+ return ret;
+
+ cfg = data->buf[0];
+
+ if (data->three_wire)
+ /* 3-wire RTD connection */
+ cfg |= MAX31865_3WIRE_RTD;
+
+ if (data->filter_50hz)
+ /* 50Hz noise rejection filter */
+ cfg |= MAX31865_FILTER_50HZ;
+
+ data->buf[0] = MAX31865_CFG_REG | MAX31865_RD_WR_BIT;
+ data->buf[1] = cfg;
+
+ return max31865_write(data, 2);
+}
+
+static ssize_t show_fault(struct device *dev, u8 faultbit, char *buf)
+{
+ int ret;
+ bool fault;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31865_data *data = iio_priv(indio_dev);
+
+ ret = max31865_read(data, MAX31865_FAULT_STATUS, 1);
+ if (ret)
+ return ret;
+
+ fault = data->buf[0] & faultbit;
+
+ return sprintf(buf, "%d\n", fault);
+}
+
+static ssize_t show_fault_ovuv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return show_fault(dev, MAX31865_FAULT_OVUV, buf);
+}
+
+static ssize_t show_filter(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31865_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", data->filter_50hz ? 50 : 60);
+}
+
+static ssize_t set_filter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31865_data *data = iio_priv(indio_dev);
+ unsigned int freq;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &freq);
+ if (ret)
+ return ret;
+
+ switch (freq) {
+ case 50:
+ data->filter_50hz = true;
+ break;
+ case 60:
+ data->filter_50hz = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->lock);
+ ret = max31865_init(data);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(max31865_show_samp_freq);
+static IIO_DEVICE_ATTR(fault_ovuv, 0444, show_fault_ovuv, NULL, 0);
+static IIO_DEVICE_ATTR(in_filter_notch_center_frequency, 0644,
+ show_filter, set_filter, 0);
+
+static struct attribute *max31865_attributes[] = {
+ &iio_dev_attr_fault_ovuv.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_filter_notch_center_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group max31865_group = {
+ .attrs = max31865_attributes,
+};
+
+static const struct iio_info max31865_info = {
+ .read_raw = max31865_read_raw,
+ .attrs = &max31865_group,
+};
+
+static int max31865_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct max31865_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->spi = spi;
+ data->filter_50hz = false;
+ mutex_init(&data->lock);
+
+ indio_dev->info = &max31865_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max31865_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max31865_channels);
+
+ if (of_property_read_bool(spi->dev.of_node, "maxim,3-wire")) {
+ /* select 3 wire */
+ data->three_wire = 1;
+ } else {
+ /* select 2 or 4 wire */
+ data->three_wire = 0;
+ }
+
+ ret = max31865_init(data);
+ if (ret) {
+ dev_err(&spi->dev, "error: Failed to configure max31865\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id max31865_id[] = {
+ { "max31865", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max31865_id);
+
+static const struct of_device_id max31865_of_match[] = {
+ { .compatible = "maxim,max31865" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max31865_of_match);
+
+static struct spi_driver max31865_driver = {
+ .driver = {
+ .name = "max31865",
+ .of_match_table = max31865_of_match,
+ },
+ .probe = max31865_probe,
+ .id_table = max31865_id,
+};
+module_spi_driver(max31865_driver);
+
+MODULE_AUTHOR("Navin Sankar Velliangiri <navin@linumiz.com>");
+MODULE_DESCRIPTION("Maxim MAX31865 RTD-to-Digital Converter sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 704ce595542c..835ac54d4a24 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -453,7 +453,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
- list_add_tail(&id_priv->list, &cma_dev->id_list);
+ list_add_tail(&id_priv->device_item, &cma_dev->id_list);
trace_cm_id_attach(id_priv, cma_dev->device);
}
@@ -470,7 +470,7 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
static void cma_release_dev(struct rdma_id_private *id_priv)
{
mutex_lock(&lock);
- list_del(&id_priv->list);
+ list_del_init(&id_priv->device_item);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
id_priv->id.device = NULL;
@@ -854,6 +854,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
init_completion(&id_priv->comp);
refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
+ INIT_LIST_HEAD(&id_priv->device_item);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -1647,7 +1648,7 @@ static struct rdma_id_private *cma_find_listener(
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
- listen_list) {
+ listen_item) {
if (id_priv_dev->id.device == cm_id->device &&
cma_match_net_dev(&id_priv_dev->id,
net_dev, req))
@@ -1756,14 +1757,15 @@ static void _cma_cancel_listens(struct rdma_id_private *id_priv)
* Remove from listen_any_list to prevent added devices from spawning
* additional listen requests.
*/
- list_del(&id_priv->list);
+ list_del_init(&id_priv->listen_any_item);
while (!list_empty(&id_priv->listen_list)) {
- dev_id_priv = list_entry(id_priv->listen_list.next,
- struct rdma_id_private, listen_list);
+ dev_id_priv =
+ list_first_entry(&id_priv->listen_list,
+ struct rdma_id_private, listen_item);
/* sync with device removal to avoid duplicate destruction */
- list_del_init(&dev_id_priv->list);
- list_del(&dev_id_priv->listen_list);
+ list_del_init(&dev_id_priv->device_item);
+ list_del_init(&dev_id_priv->listen_item);
mutex_unlock(&lock);
rdma_destroy_id(&dev_id_priv->id);
@@ -2564,7 +2566,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret)
goto err_listen;
- list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
+ list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
return 0;
err_listen:
/* Caller must destroy this after releasing lock */
@@ -2580,13 +2582,13 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
int ret;
mutex_lock(&lock);
- list_add_tail(&id_priv->list, &listen_any_list);
+ list_add_tail(&id_priv->listen_any_item, &listen_any_list);
list_for_each_entry(cma_dev, &dev_list, list) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret) {
/* Prevent racing with cma_process_remove() */
if (to_destroy)
- list_del_init(&to_destroy->list);
+ list_del_init(&to_destroy->device_item);
goto err_listen;
}
}
@@ -4895,7 +4897,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list)
- list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+ list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
ret = cma_netdev_change(ndev, id_priv);
if (ret)
goto out;
@@ -4955,10 +4957,10 @@ static void cma_process_remove(struct cma_device *cma_dev)
mutex_lock(&lock);
while (!list_empty(&cma_dev->id_list)) {
struct rdma_id_private *id_priv = list_first_entry(
- &cma_dev->id_list, struct rdma_id_private, list);
+ &cma_dev->id_list, struct rdma_id_private, device_item);
- list_del(&id_priv->listen_list);
- list_del_init(&id_priv->list);
+ list_del_init(&id_priv->listen_item);
+ list_del_init(&id_priv->device_item);
cma_id_get(id_priv);
mutex_unlock(&lock);
@@ -5035,7 +5037,7 @@ static int cma_add_one(struct ib_device *device)
mutex_lock(&lock);
list_add_tail(&cma_dev->list, &dev_list);
- list_for_each_entry(id_priv, &listen_any_list, list) {
+ list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret)
goto free_listen;
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index f92f101ea981..757a0ef79872 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -55,8 +55,15 @@ struct rdma_id_private {
struct rdma_bind_list *bind_list;
struct hlist_node node;
- struct list_head list; /* listen_any_list or cma_device.list */
- struct list_head listen_list; /* per device listens */
+ union {
+ struct list_head device_item; /* On cma_device->id_list */
+ struct list_head listen_any_item; /* On listen_any_list */
+ };
+ union {
+ /* On rdma_id_private->listen_list */
+ struct list_head listen_item;
+ struct list_head listen_list;
+ };
struct cma_device *cma_dev;
struct list_head mc_list;
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index df9e6c5e4ddf..af59486fe418 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -106,6 +106,38 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
return ret;
}
+int rdma_counter_modify(struct ib_device *dev, u32 port,
+ unsigned int index, bool enable)
+{
+ struct rdma_hw_stats *stats;
+ int ret = 0;
+
+ if (!dev->ops.modify_hw_stat)
+ return -EOPNOTSUPP;
+
+ stats = ib_get_hw_stats_port(dev, port);
+ if (!stats || index >= stats->num_counters ||
+ !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ mutex_lock(&stats->lock);
+
+ if (enable != test_bit(index, stats->is_disabled))
+ goto out;
+
+ ret = dev->ops.modify_hw_stat(dev, port, index, enable);
+ if (ret)
+ goto out;
+
+ if (enable)
+ clear_bit(index, stats->is_disabled);
+ else
+ set_bit(index, stats->is_disabled);
+out:
+ mutex_unlock(&stats->lock);
+ return ret;
+}
+
static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
struct ib_qp *qp,
enum rdma_nl_counter_mode mode)
@@ -165,7 +197,7 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
return counter;
err_mode:
- kfree(counter->stats);
+ rdma_free_hw_stats_struct(counter->stats);
err_stats:
rdma_restrack_put(&counter->res);
kfree(counter);
@@ -186,7 +218,7 @@ static void rdma_counter_free(struct rdma_counter *counter)
mutex_unlock(&port_counter->lock);
rdma_restrack_del(&counter->res);
- kfree(counter->stats);
+ rdma_free_hw_stats_struct(counter->stats);
kfree(counter);
}
@@ -618,7 +650,7 @@ void rdma_counter_init(struct ib_device *dev)
fail:
for (i = port; i >= rdma_start_port(dev); i--) {
port_counter = &dev->port_data[port].port_counter;
- kfree(port_counter->hstats);
+ rdma_free_hw_stats_struct(port_counter->hstats);
port_counter->hstats = NULL;
mutex_destroy(&port_counter->lock);
}
@@ -631,7 +663,7 @@ void rdma_counter_release(struct ib_device *dev)
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
- kfree(port_counter->hstats);
+ rdma_free_hw_stats_struct(port_counter->hstats);
mutex_destroy(&port_counter->lock);
}
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index f4814bb7f082..22a4adda7981 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2676,6 +2676,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
+ SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
SET_DEVICE_OP(dev_ops, modify_srq);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 54f4feb604d8..358a2db38d23 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -762,7 +762,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
- const char *err_str = "";
+ const char *err_str;
int ret = -EINVAL;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e9b4b2cccaa0..fedc0fa6ebf9 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -154,6 +154,8 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
[RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
[RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -968,14 +970,21 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg,
if (!table_attr)
return -EMSGSIZE;
- for (i = 0; i < st->num_counters; i++)
- if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
+ mutex_lock(&st->lock);
+ for (i = 0; i < st->num_counters; i++) {
+ if (test_bit(i, st->is_disabled))
+ continue;
+ if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name,
+ st->value[i]))
goto err;
+ }
+ mutex_unlock(&st->lock);
nla_nest_end(msg, table_attr);
return 0;
err:
+ mutex_unlock(&st->lock);
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}
@@ -1888,24 +1897,111 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
+static int nldev_stat_set_mode_doit(struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ struct nlattr *tb[],
+ struct ib_device *device, u32 port)
+{
+ u32 mode, mask = 0, qpn, cntn = 0;
+ int ret;
+
+ /* Currently only counter for QP is supported */
+ if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ return -EINVAL;
+
+ mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
+ if (mode == RDMA_COUNTER_MODE_AUTO) {
+ if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
+ mask = nla_get_u32(
+ tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
+ return rdma_counter_set_auto_mode(device, port, mask, extack);
+ }
+
+ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
+ return -EINVAL;
+
+ qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
+ if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
+ cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
+ ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
+ if (ret)
+ return ret;
+ } else {
+ ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn);
+ if (ret)
+ return ret;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
+ ret = -EMSGSIZE;
+ goto err_fill;
+ }
+
+ return 0;
+
+err_fill:
+ rdma_counter_unbind_qpn(device, port, qpn, cntn);
+ return ret;
+}
+
+static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
+ struct ib_device *device,
+ u32 port)
+{
+ struct rdma_hw_stats *stats;
+ int rem, i, index, ret = 0;
+ struct nlattr *entry_attr;
+ unsigned long *target;
+
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats)
+ return -EINVAL;
+
+ target = kcalloc(BITS_TO_LONGS(stats->num_counters),
+ sizeof(*stats->is_disabled), GFP_KERNEL);
+ if (!target)
+ return -ENOMEM;
+
+ nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS],
+ rem) {
+ index = nla_get_u32(entry_attr);
+ if ((index >= stats->num_counters) ||
+ !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_bit(index, target);
+ }
+
+ for (i = 0; i < stats->num_counters; i++) {
+ if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL))
+ continue;
+
+ ret = rdma_counter_modify(device, port, i, test_bit(i, target));
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(target);
+ return ret;
+}
+
static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
- u32 index, port, mode, mask = 0, qpn, cntn = 0;
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
struct ib_device *device;
struct sk_buff *msg;
+ u32 index, port;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
- /* Currently only counter for QP is supported */
- if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
- !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
- !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
- return -EINVAL;
-
- if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
@@ -1916,59 +2012,49 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
if (!rdma_is_port_valid(device, port)) {
ret = -EINVAL;
- goto err;
+ goto err_put_device;
+ }
+
+ if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] &&
+ !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
+ ret = -EINVAL;
+ goto err_put_device;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto err;
+ goto err_put_device;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
RDMA_NLDEV_CMD_STAT_SET),
0, 0);
+ if (fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
+ ret = -EMSGSIZE;
+ goto err_free_msg;
+ }
- mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
- if (mode == RDMA_COUNTER_MODE_AUTO) {
- if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
- mask = nla_get_u32(
- tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
- ret = rdma_counter_set_auto_mode(device, port, mask, extack);
- if (ret)
- goto err_msg;
- } else {
- if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
- goto err_msg;
- qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
- if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
- cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
- ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
- } else {
- ret = rdma_counter_bind_qpn_alloc(device, port,
- qpn, &cntn);
- }
+ if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) {
+ ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port);
if (ret)
- goto err_msg;
+ goto err_free_msg;
+ }
- if (fill_nldev_handle(msg, device) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
- ret = -EMSGSIZE;
- goto err_fill;
- }
+ if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
+ ret = nldev_stat_set_counter_dynamic_doit(tb, device, port);
+ if (ret)
+ goto err_free_msg;
}
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
-err_fill:
- rdma_counter_unbind_qpn(device, port, qpn, cntn);
-err_msg:
+err_free_msg:
nlmsg_free(msg);
-err:
+err_put_device:
ib_device_put(device);
return ret;
}
@@ -2103,9 +2189,13 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
goto err_stats;
}
for (i = 0; i < num_cnts; i++) {
+ if (test_bit(i, stats->is_disabled))
+ continue;
+
v = stats->value[i] +
rdma_counter_get_hwstat_value(device, port, i);
- if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
+ if (rdma_nl_stat_hwcounter_entry(msg,
+ stats->descs[i].name, v)) {
ret = -EMSGSIZE;
goto err_table;
}
@@ -2253,6 +2343,99 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
return ret;
}
+static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry;
+ struct rdma_hw_stats *stats;
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 devid, port;
+ int ret, i;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), devid);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(
+ msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS),
+ 0, 0);
+
+ ret = -EMSGSIZE;
+ if (fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
+ goto err_msg;
+
+ table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+ if (!table)
+ goto err_msg;
+
+ mutex_lock(&stats->lock);
+ for (i = 0; i < stats->num_counters; i++) {
+ entry = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
+ if (!entry)
+ goto err_msg_table;
+
+ if (nla_put_string(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
+ stats->descs[i].name) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i))
+ goto err_msg_entry;
+
+ if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) &&
+ (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC,
+ !test_bit(i, stats->is_disabled))))
+ goto err_msg_entry;
+
+ nla_nest_end(msg, entry);
+ }
+ mutex_unlock(&stats->lock);
+
+ nla_nest_end(msg, table);
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_msg_entry:
+ nla_nest_cancel(msg, entry);
+err_msg_table:
+ mutex_unlock(&stats->lock);
+ nla_nest_cancel(msg, table);
+err_msg:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -2342,6 +2525,9 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.dump = nldev_res_get_mr_raw_dumpit,
.flags = RDMA_NL_ADMIN_PERM,
},
+ [RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
+ .doit = nldev_stat_get_counter_status_doit,
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5221cce65675..5a3bd41b331c 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -282,15 +282,22 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
}
-static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
- u32 sg_cnt, enum dma_data_direction dir)
+static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir)
{
- if (is_pci_p2pdma_page(sg_page(sg))) {
+ int nents;
+
+ if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
return 0;
- return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
+ sgt->orig_nents, dir);
+ if (!nents)
+ return -EIO;
+ sgt->nents = nents;
+ return 0;
}
- return ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
}
/**
@@ -313,12 +320,16 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct ib_device *dev = qp->pd->device;
+ struct sg_table sgt = {
+ .sgl = sg,
+ .orig_nents = sg_cnt,
+ };
int ret;
- ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
- if (!ret)
- return -ENOMEM;
- sg_cnt = ret;
+ ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ if (ret)
+ return ret;
+ sg_cnt = sgt.nents;
/*
* Skip to the S/G entry that sg_offset falls into:
@@ -354,7 +365,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -385,6 +396,14 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
struct ib_device *dev = qp->pd->device;
u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
qp->integrity_en);
+ struct sg_table sgt = {
+ .sgl = sg,
+ .orig_nents = sg_cnt,
+ };
+ struct sg_table prot_sgt = {
+ .sgl = prot_sg,
+ .orig_nents = prot_sg_cnt,
+ };
struct ib_rdma_wr *rdma_wr;
int count = 0, ret;
@@ -394,18 +413,14 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
- if (!ret)
- return -ENOMEM;
- sg_cnt = ret;
+ ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ if (ret)
+ return ret;
if (prot_sg_cnt) {
- ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir);
- if (!ret) {
- ret = -ENOMEM;
+ ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
+ if (ret)
goto out_unmap_sg;
- }
- prot_sg_cnt = ret;
}
ctx->type = RDMA_RW_SIG_MR;
@@ -426,10 +441,11 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
- ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg,
- prot_sg_cnt, NULL, SZ_4K);
+ ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg,
+ prot_sgt.nents, NULL, SZ_4K);
if (unlikely(ret)) {
- pr_err("failed to map PI sg (%u)\n", sg_cnt + prot_sg_cnt);
+ pr_err("failed to map PI sg (%u)\n",
+ sgt.nents + prot_sgt.nents);
goto out_destroy_sig_mr;
}
@@ -468,10 +484,10 @@ out_destroy_sig_mr:
out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
- if (prot_sg_cnt)
- rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
+ if (prot_sgt.nents)
+ rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a20b8108e160..74ecd7456a11 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
- LS_DEVICE_NAME_MAX);
+ strscpy_pad(header->device_name,
+ dev_name(&query->port->agent->device->dev),
+ LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
@@ -2261,7 +2262,6 @@ err1:
void ib_sa_cleanup(void)
{
cancel_delayed_work(&ib_nl_timed_work);
- flush_workqueue(ib_nl_wq);
destroy_workqueue(ib_nl_wq);
mcast_cleanup();
ib_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 6146c3c1cbe5..a3f84b50c46a 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -755,9 +755,9 @@ static void ib_port_release(struct kobject *kobj)
for (i = 0; i != ARRAY_SIZE(port->groups); i++)
kfree(port->groups[i].attrs);
if (port->hw_stats_data)
- kfree(port->hw_stats_data->stats);
+ rdma_free_hw_stats_struct(port->hw_stats_data->stats);
kfree(port->hw_stats_data);
- kfree(port);
+ kvfree(port);
}
static void ib_port_gid_attr_release(struct kobject *kobj)
@@ -895,7 +895,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
stats = ibdev->ops.alloc_hw_device_stats(ibdev);
if (!stats)
return ERR_PTR(-ENOMEM);
- if (!stats->names || stats->num_counters <= 0)
+ if (!stats->descs || stats->num_counters <= 0)
goto err_free_stats;
/*
@@ -911,7 +911,6 @@ alloc_hw_stats_device(struct ib_device *ibdev)
if (!data->group.attrs)
goto err_free_data;
- mutex_init(&stats->lock);
data->group.name = "hw_counters";
data->stats = stats;
return data;
@@ -919,14 +918,14 @@ alloc_hw_stats_device(struct ib_device *ibdev)
err_free_data:
kfree(data);
err_free_stats:
- kfree(stats);
+ rdma_free_hw_stats_struct(stats);
return ERR_PTR(-ENOMEM);
}
void ib_device_release_hw_stats(struct hw_stats_device_data *data)
{
kfree(data->group.attrs);
- kfree(data->stats);
+ rdma_free_hw_stats_struct(data->stats);
kfree(data);
}
@@ -934,7 +933,8 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
{
struct hw_stats_device_attribute *attr;
struct hw_stats_device_data *data;
- int i, ret;
+ bool opstat_skipped = false;
+ int i, ret, pos = 0;
data = alloc_hw_stats_device(ibdev);
if (IS_ERR(data)) {
@@ -955,16 +955,23 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
data->stats->timestamp = jiffies;
for (i = 0; i < data->stats->num_counters; i++) {
- attr = &data->attrs[i];
+ if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) {
+ opstat_skipped = true;
+ continue;
+ }
+
+ WARN_ON(opstat_skipped);
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
- attr->attr.attr.name = data->stats->names[i];
+ attr->attr.attr.name = data->stats->descs[i].name;
attr->attr.attr.mode = 0444;
attr->attr.show = hw_stat_device_show;
attr->show = show_hw_stats;
- data->group.attrs[i] = &attr->attr.attr;
+ data->group.attrs[pos] = &attr->attr.attr;
+ pos++;
}
- attr = &data->attrs[i];
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
attr->attr.attr.name = "lifespan";
attr->attr.attr.mode = 0644;
@@ -972,7 +979,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
attr->show = show_stats_lifespan;
attr->attr.store = hw_stat_device_store;
attr->store = set_stats_lifespan;
- data->group.attrs[i] = &attr->attr.attr;
+ data->group.attrs[pos] = &attr->attr.attr;
for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
if (!ibdev->groups[i]) {
ibdev->groups[i] = &data->group;
@@ -994,7 +1001,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
stats = ibdev->ops.alloc_hw_port_stats(port->ibdev, port->port_num);
if (!stats)
return ERR_PTR(-ENOMEM);
- if (!stats->names || stats->num_counters <= 0)
+ if (!stats->descs || stats->num_counters <= 0)
goto err_free_stats;
/*
@@ -1010,7 +1017,6 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
if (!group->attrs)
goto err_free_data;
- mutex_init(&stats->lock);
group->name = "hw_counters";
data->stats = stats;
return data;
@@ -1018,7 +1024,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
err_free_data:
kfree(data);
err_free_stats:
- kfree(stats);
+ rdma_free_hw_stats_struct(stats);
return ERR_PTR(-ENOMEM);
}
@@ -1027,7 +1033,8 @@ static int setup_hw_port_stats(struct ib_port *port,
{
struct hw_stats_port_attribute *attr;
struct hw_stats_port_data *data;
- int i, ret;
+ bool opstat_skipped = false;
+ int i, ret, pos = 0;
data = alloc_hw_stats_port(port, group);
if (IS_ERR(data))
@@ -1045,16 +1052,23 @@ static int setup_hw_port_stats(struct ib_port *port,
data->stats->timestamp = jiffies;
for (i = 0; i < data->stats->num_counters; i++) {
- attr = &data->attrs[i];
+ if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) {
+ opstat_skipped = true;
+ continue;
+ }
+
+ WARN_ON(opstat_skipped);
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
- attr->attr.attr.name = data->stats->names[i];
+ attr->attr.attr.name = data->stats->descs[i].name;
attr->attr.attr.mode = 0444;
attr->attr.show = hw_stat_port_show;
attr->show = show_hw_stats;
- group->attrs[i] = &attr->attr.attr;
+ group->attrs[pos] = &attr->attr.attr;
+ pos++;
}
- attr = &data->attrs[i];
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
attr->attr.attr.name = "lifespan";
attr->attr.attr.mode = 0644;
@@ -1062,7 +1076,7 @@ static int setup_hw_port_stats(struct ib_port *port,
attr->show = show_stats_lifespan;
attr->attr.store = hw_stat_port_store;
attr->store = set_stats_lifespan;
- group->attrs[i] = &attr->attr.attr;
+ group->attrs[pos] = &attr->attr.attr;
port->hw_stats_data = data;
return 0;
@@ -1189,7 +1203,7 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
struct ib_port *p;
int ret;
- p = kzalloc(struct_size(p, attrs_list,
+ p = kvzalloc(struct_size(p, attrs_list,
attr->gid_tbl_len + attr->pkey_tbl_len),
GFP_KERNEL);
if (!p)
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index e824baf4640d..f0760741f281 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -6,9 +6,12 @@
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include "uverbs.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
struct sg_table *sgt;
@@ -163,12 +166,63 @@ out_release_dmabuf:
}
EXPORT_SYMBOL(ib_umem_dmabuf_get);
+static void
+ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+
+ ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
+ "Invalidate callback should not be called when memory is pinned\n");
+}
+
+static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
+ .allow_peer2peer = true,
+ .move_notify = ib_umem_dmabuf_unsupported_move_notify,
+};
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access)
+{
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int err;
+
+ umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
+ &ib_umem_dmabuf_attach_pinned_ops);
+ if (IS_ERR(umem_dmabuf))
+ return umem_dmabuf;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = dma_buf_pin(umem_dmabuf->attach);
+ if (err)
+ goto err_release;
+ umem_dmabuf->pinned = 1;
+
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err)
+ goto err_unpin;
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ return umem_dmabuf;
+
+err_unpin:
+ dma_buf_unpin(umem_dmabuf->attach);
+err_release:
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
+
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ if (umem_dmabuf->pinned)
+ dma_buf_unpin(umem_dmabuf->attach);
dma_resv_unlock(dmabuf->resv);
dma_buf_detach(dmabuf, umem_dmabuf->attach);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 740e6b2efe0e..d1345d76d9b1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -837,11 +837,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
new_mr->device = new_pd->device;
new_mr->pd = new_pd;
new_mr->type = IB_MR_TYPE_USER;
- new_mr->dm = NULL;
- new_mr->sig_attrs = NULL;
new_mr->uobject = uobj;
atomic_inc(&new_pd->usecnt);
- new_mr->iova = cmd.hca_va;
new_uobj->object = new_mr;
rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 89a2b21976d6..692d5ff657df 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2976,3 +2976,52 @@ bool __rdma_block_iter_next(struct ib_block_iter *biter)
return true;
}
EXPORT_SYMBOL(__rdma_block_iter_next);
+
+/**
+ * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
+ * for the drivers.
+ * @descs: array of static descriptors
+ * @num_counters: number of elements in array
+ * @lifespan: milliseconds between updates
+ */
+struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const struct rdma_stat_desc *descs, int num_counters,
+ unsigned long lifespan)
+{
+ struct rdma_hw_stats *stats;
+
+ stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
+ if (!stats)
+ return NULL;
+
+ stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
+ sizeof(*stats->is_disabled), GFP_KERNEL);
+ if (!stats->is_disabled)
+ goto err;
+
+ stats->descs = descs;
+ stats->num_counters = num_counters;
+ stats->lifespan = msecs_to_jiffies(lifespan);
+ mutex_init(&stats->lock);
+
+ return stats;
+
+err:
+ kfree(stats);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
+
+/**
+ * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
+ * @stats: statistics to release
+ */
+void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
+{
+ if (!stats)
+ return;
+
+ kfree(stats->is_disabled);
+ kfree(stats);
+}
+EXPORT_SYMBOL(rdma_free_hw_stats_struct);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ba26d8e6a9c2..79401e6c6aa9 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -39,22 +39,13 @@
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
+#include "hw_counters.h"
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
-#define BNXT_RE_PAGE_SHIFT_4K (12)
-#define BNXT_RE_PAGE_SHIFT_8K (13)
-#define BNXT_RE_PAGE_SHIFT_64K (16)
-#define BNXT_RE_PAGE_SHIFT_2M (21)
-#define BNXT_RE_PAGE_SHIFT_8M (23)
-#define BNXT_RE_PAGE_SHIFT_1G (30)
-#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
-#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
-#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
-#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
-#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
-#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+#define BNXT_RE_PAGE_SIZE_SUPPORTED 0x7FFFF000 /* 4kb - 1G */
#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
@@ -177,15 +168,17 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
+ atomic_t ah_count;
+ atomic_t pd_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
/* QP for for handling QP1 packets */
struct bnxt_re_gsi_context gsi_ctx;
+ struct bnxt_re_stats stats;
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
- struct bnxt_qplib_roce_stats stats;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 7ba07797845c..825d512799d9 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -57,69 +57,208 @@
#include "bnxt_re.h"
#include "hw_counters.h"
-static const char * const bnxt_re_stat_name[] = {
- [BNXT_RE_ACTIVE_QP] = "active_qps",
- [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
- [BNXT_RE_ACTIVE_CQ] = "active_cqs",
- [BNXT_RE_ACTIVE_MR] = "active_mrs",
- [BNXT_RE_ACTIVE_MW] = "active_mws",
- [BNXT_RE_RX_PKTS] = "rx_pkts",
- [BNXT_RE_RX_BYTES] = "rx_bytes",
- [BNXT_RE_TX_PKTS] = "tx_pkts",
- [BNXT_RE_TX_BYTES] = "tx_bytes",
- [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
- [BNXT_RE_RX_DROPS] = "rx_roce_drops",
- [BNXT_RE_RX_DISCARDS] = "rx_roce_discards",
- [BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
- [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
- [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
- [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
- [BNXT_RE_MISSING_RESP] = "missing_resp",
- [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
- [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
- [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",
- [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err",
- [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err",
- [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err",
- [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err",
- [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err",
- [BNXT_RE_DUP_REQ] = "dup_req",
- [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max",
- [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch",
- [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe",
- [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err",
- [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey",
- [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err",
- [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm",
- [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err",
- [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey",
- [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err",
- [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm",
- [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err",
- [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow",
- [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode",
- [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic",
- [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err",
- [BNXT_RE_RES_MEM_ERROR] = "res_mem_err",
- [BNXT_RE_RES_SRQ_ERR] = "res_srq_err",
- [BNXT_RE_RES_CMP_ERR] = "res_cmp_err",
- [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey",
- [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err",
- [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
- [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
- [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
- [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err",
- [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count"
+static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
+ [BNXT_RE_ACTIVE_PD].name = "active_pds",
+ [BNXT_RE_ACTIVE_AH].name = "active_ahs",
+ [BNXT_RE_ACTIVE_QP].name = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ].name = "active_cqs",
+ [BNXT_RE_ACTIVE_MR].name = "active_mrs",
+ [BNXT_RE_ACTIVE_MW].name = "active_mws",
+ [BNXT_RE_RX_PKTS].name = "rx_pkts",
+ [BNXT_RE_RX_BYTES].name = "rx_bytes",
+ [BNXT_RE_TX_PKTS].name = "tx_pkts",
+ [BNXT_RE_TX_BYTES].name = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors",
+ [BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
+ [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
+ [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd",
+ [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd",
+ [BNXT_RE_MISSING_RESP].name = "missing_resp",
+ [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err",
+ [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err",
+ [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
+ [BNXT_RE_DUP_REQ].name = "dup_req",
+ [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch",
+ [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
+ [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count",
+ [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
+ [BNXT_RE_TX_READ_REQ].name = "tx_read_req",
+ [BNXT_RE_TX_READ_RES].name = "tx_read_resp",
+ [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req",
+ [BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
+ [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
+ [BNXT_RE_RX_READ_REQ].name = "rx_read_req",
+ [BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
+ [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
+ [BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
+ [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
+ [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
+ [BNXT_RE_OOB].name = "rx_out_of_buffer"
};
+static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats,
+ struct bnxt_qplib_ext_stat *s)
+{
+ stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req;
+ stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req;
+ stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res;
+ stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req;
+ stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req;
+ stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
+ stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req;
+ stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res;
+ stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req;
+ stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req;
+ stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
+ stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
+ stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
+}
+
+static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ u32 fid;
+ int rc;
+
+ fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ goto done;
+ bnxt_re_copy_ext_stats(rdev, stats, estat);
+
+done:
+ return rc;
+}
+
+static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats,
+ struct bnxt_qplib_roce_stats *err_s)
+{
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ err_s->to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ err_s->seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ err_s->max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ err_s->rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ err_s->missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ err_s->unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ err_s->bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ err_s->local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ err_s->local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ err_s->mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ err_s->remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ err_s->remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ err_s->dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ err_s->res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ err_s->res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ err_s->res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ err_s->res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ err_s->res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ err_s->res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ err_s->res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ err_s->res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ err_s->res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ err_s->res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ err_s->res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ err_s->res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ err_s->res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ err_s->res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ err_s->res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ err_s->res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ err_s->res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ err_s->res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ err_s->res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ err_s->res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ err_s->res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ err_s->res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ err_s->res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ err_s->res_rx_pci_err;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
+ err_s->res_oos_drop_count;
+}
+
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+ struct ctx_hw_stats *hw_stats = NULL;
+ struct bnxt_qplib_roce_stats *err_s = NULL;
int rc = 0;
+ hw_stats = rdev->qplib_ctx.stats.dma;
if (!port || !stats)
return -EINVAL;
@@ -128,118 +267,61 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
- if (bnxt_re_stats) {
+ stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->pd_count);
+ stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->ah_count);
+
+ if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
- le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
- stats->value[BNXT_RE_RX_DROPS] =
- le64_to_cpu(bnxt_re_stats->rx_error_pkts);
+ le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_ERRORS] =
+ le64_to_cpu(hw_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
- le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
+ le64_to_cpu(hw_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
- le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
+ le64_to_cpu(hw_stats->rx_ucast_pkts);
stats->value[BNXT_RE_RX_BYTES] =
- le64_to_cpu(bnxt_re_stats->rx_ucast_bytes);
+ le64_to_cpu(hw_stats->rx_ucast_bytes);
stats->value[BNXT_RE_TX_PKTS] =
- le64_to_cpu(bnxt_re_stats->tx_ucast_pkts);
+ le64_to_cpu(hw_stats->tx_ucast_pkts);
stats->value[BNXT_RE_TX_BYTES] =
- le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
+ le64_to_cpu(hw_stats->tx_ucast_bytes);
}
+ err_s = &rdev->stats.rstat.errs;
if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
- rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
- if (rc)
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s);
+ if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
&rdev->flags);
- stats->value[BNXT_RE_TO_RETRANSMITS] =
- rdev->stats.to_retransmits;
- stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
- rdev->stats.seq_err_naks_rcvd;
- stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
- rdev->stats.max_retry_exceeded;
- stats->value[BNXT_RE_RNR_NAKS_RCVD] =
- rdev->stats.rnr_naks_rcvd;
- stats->value[BNXT_RE_MISSING_RESP] =
- rdev->stats.missing_resp;
- stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
- rdev->stats.unrecoverable_err;
- stats->value[BNXT_RE_BAD_RESP_ERR] =
- rdev->stats.bad_resp_err;
- stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
- rdev->stats.local_qp_op_err;
- stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
- rdev->stats.local_protection_err;
- stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
- rdev->stats.mem_mgmt_op_err;
- stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
- rdev->stats.remote_invalid_req_err;
- stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
- rdev->stats.remote_access_err;
- stats->value[BNXT_RE_REMOTE_OP_ERR] =
- rdev->stats.remote_op_err;
- stats->value[BNXT_RE_DUP_REQ] =
- rdev->stats.dup_req;
- stats->value[BNXT_RE_RES_EXCEED_MAX] =
- rdev->stats.res_exceed_max;
- stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
- rdev->stats.res_length_mismatch;
- stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
- rdev->stats.res_exceeds_wqe;
- stats->value[BNXT_RE_RES_OPCODE_ERR] =
- rdev->stats.res_opcode_err;
- stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
- rdev->stats.res_rx_invalid_rkey;
- stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
- rdev->stats.res_rx_domain_err;
- stats->value[BNXT_RE_RES_RX_NO_PERM] =
- rdev->stats.res_rx_no_perm;
- stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
- rdev->stats.res_rx_range_err;
- stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
- rdev->stats.res_tx_invalid_rkey;
- stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
- rdev->stats.res_tx_domain_err;
- stats->value[BNXT_RE_RES_TX_NO_PERM] =
- rdev->stats.res_tx_no_perm;
- stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
- rdev->stats.res_tx_range_err;
- stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
- rdev->stats.res_irrq_oflow;
- stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
- rdev->stats.res_unsup_opcode;
- stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
- rdev->stats.res_unaligned_atomic;
- stats->value[BNXT_RE_RES_REM_INV_ERR] =
- rdev->stats.res_rem_inv_err;
- stats->value[BNXT_RE_RES_MEM_ERROR] =
- rdev->stats.res_mem_error;
- stats->value[BNXT_RE_RES_SRQ_ERR] =
- rdev->stats.res_srq_err;
- stats->value[BNXT_RE_RES_CMP_ERR] =
- rdev->stats.res_cmp_err;
- stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
- rdev->stats.res_invalid_dup_rkey;
- stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
- rdev->stats.res_wqe_format_err;
- stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
- rdev->stats.res_cq_load_err;
- stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
- rdev->stats.res_srq_load_err;
- stats->value[BNXT_RE_RES_TX_PCI_ERR] =
- rdev->stats.res_tx_pci_err;
- stats->value[BNXT_RE_RES_RX_PCI_ERR] =
- rdev->stats.res_rx_pci_err;
- stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
- rdev->stats.res_oos_drop_count;
+ goto done;
+ }
+ if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
+ !rdev->is_virtfn) {
+ rc = bnxt_re_get_ext_stat(rdev, stats);
+ if (rc) {
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ goto done;
+ }
+ }
+ bnxt_re_copy_err_stats(rdev, stats, err_s);
}
- return ARRAY_SIZE(bnxt_re_stat_name);
+done:
+ return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
}
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ int num_counters = 0;
+
+ if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
+ num_counters = BNXT_RE_NUM_EXT_COUNTERS;
+ else
+ num_counters = BNXT_RE_NUM_STD_COUNTERS;
- return rdma_alloc_hw_stats_struct(bnxt_re_stat_name,
- ARRAY_SIZE(bnxt_re_stat_name),
+ return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 6f2d2f91d9ff..7943b2c393e4 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -41,6 +41,8 @@
#define __BNXT_RE_HW_STATS_H__
enum bnxt_re_hw_stats {
+ BNXT_RE_ACTIVE_PD,
+ BNXT_RE_ACTIVE_AH,
BNXT_RE_ACTIVE_QP,
BNXT_RE_ACTIVE_SRQ,
BNXT_RE_ACTIVE_CQ,
@@ -51,7 +53,7 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
- BNXT_RE_RX_DROPS,
+ BNXT_RE_RX_ERRORS,
BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
BNXT_RE_SEQ_ERR_NAKS_RCVD,
@@ -93,7 +95,31 @@ enum bnxt_re_hw_stats {
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_OUT_OF_SEQ_ERR,
- BNXT_RE_NUM_COUNTERS
+ BNXT_RE_TX_ATOMIC_REQ,
+ BNXT_RE_TX_READ_REQ,
+ BNXT_RE_TX_READ_RES,
+ BNXT_RE_TX_WRITE_REQ,
+ BNXT_RE_TX_SEND_REQ,
+ BNXT_RE_RX_ATOMIC_REQ,
+ BNXT_RE_RX_READ_REQ,
+ BNXT_RE_RX_READ_RESP,
+ BNXT_RE_RX_WRITE_REQ,
+ BNXT_RE_RX_SEND_REQ,
+ BNXT_RE_RX_ROCE_GOOD_PKTS,
+ BNXT_RE_RX_ROCE_GOOD_BYTES,
+ BNXT_RE_OOB,
+ BNXT_RE_NUM_EXT_COUNTERS
+};
+
+#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
+
+struct bnxt_re_rstat {
+ struct bnxt_qplib_roce_stats errs;
+ struct bnxt_qplib_ext_stat ext_stat;
+};
+
+struct bnxt_re_stats {
+ struct bnxt_re_rstat rstat;
};
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 408dfbcc47b5..29cc0d14399a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -41,6 +41,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -130,10 +131,10 @@ int bnxt_re_query_device(struct ib_device *ibdev,
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
min(sizeof(dev_attr->fw_ver),
sizeof(ib_attr->fw_ver)));
- bnxt_qplib_get_guid(rdev->netdev->dev_addr,
- (u8 *)&ib_attr->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
+ rdev->netdev->dev_addr);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -541,9 +542,12 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
bnxt_re_destroy_fence_mr(pd);
- if (pd->qplib_pd.id)
- bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
+ if (pd->qplib_pd.id) {
+ if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd))
+ atomic_dec(&rdev->pd_count);
+ }
return 0;
}
@@ -595,6 +599,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (bnxt_re_create_fence_mr(pd))
ibdev_warn(&rdev->ibdev,
"Failed to create Fence-MR\n");
+ atomic_inc(&rdev->pd_count);
+
return 0;
dbfail:
bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -611,6 +617,8 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
!(flags & RDMA_DESTROY_AH_SLEEPABLE));
+ atomic_dec(&rdev->ah_count);
+
return 0;
}
@@ -695,15 +703,11 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
wmb(); /* make sure cache is updated. */
spin_unlock_irqrestore(&uctx->sh_lock, flag);
}
+ atomic_inc(&rdev->ah_count);
return 0;
}
-int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
-{
- return 0;
-}
-
int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
@@ -760,6 +764,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
true);
+ atomic_dec(&rdev->ah_count);
bnxt_qplib_clean_qp(&qp->qplib_qp);
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
@@ -1006,6 +1011,7 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
"Failed to allocate HW AH for Shadow QP");
goto fail;
}
+ atomic_inc(&rdev->ah_count);
return ah;
@@ -2478,7 +2484,8 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.l_key = wr->key;
wqe->frmr.length = wr->mr->length;
- wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
+ wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
+ wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
wqe->frmr.va = wr->mr->iova;
return 0;
}
@@ -3354,8 +3361,11 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
+ struct bnxt_re_dev *rdev;
+ u16 vlan_id = 0;
u8 nw_type;
+ rdev = qp->rdev;
wc->opcode = IB_WC_RECV;
wc->status = __rc_to_ib_wc_status(cqe->status);
@@ -3367,9 +3377,12 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= IB_WC_WITH_SMAC;
if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
- wc->vlan_id = (cqe->cfa_meta & 0xFFF);
- if (wc->vlan_id < 0x1000)
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ vlan_id = (cqe->cfa_meta & 0xFFF);
+ }
+ /* Mark only if vlan_id is non zero */
+ if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
}
nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
@@ -3798,7 +3811,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.va = virt_addr;
page_size = ib_umem_find_best_pgsz(
- umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
+ umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
if (!page_size) {
ibdev_err(&rdev->ibdev, "umem page size unsupported!");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b5c6e0f4f877..94326267f9bb 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -166,7 +166,6 @@ int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
int bnxt_re_create_srq(struct ib_srq *srq,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 66268e41b470..b44944fb9b24 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -127,6 +127,8 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
+ rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->qplib_res.is_vf = BNXT_VF(bp);
bnxt_re_set_drv_mode(rdev, wqe_mode);
if (bnxt_qplib_determine_atomics(en_dev->pdev))
@@ -523,7 +525,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
u32 fw_stats_ctx_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
- struct hwrm_stat_ctx_free_input req = {0};
+ struct hwrm_stat_ctx_free_input req = {};
+ struct hwrm_stat_ctx_free_output resp = {};
struct bnxt_fw_msg fw_msg;
int rc = -EINVAL;
@@ -537,8 +540,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
- bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
- sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
@@ -693,7 +696,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.get_port_immutable = bnxt_re_get_port_immutable,
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
- .modify_ah = bnxt_re_modify_ah,
.modify_qp = bnxt_re_modify_qp,
.modify_srq = bnxt_re_modify_srq,
.poll_cq = bnxt_re_poll_cq,
@@ -727,7 +729,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
- bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
ibdev->num_comp_vectors = rdev->num_msix - 1;
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
@@ -777,6 +779,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
atomic_set(&rdev->srq_count, 0);
atomic_set(&rdev->mr_count, 0);
atomic_set(&rdev->mw_count, 0);
+ atomic_set(&rdev->ah_count, 0);
+ atomic_set(&rdev->pd_count, 0);
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
@@ -1725,7 +1729,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
}
if (sch_work) {
/* Allocate for the deferred task */
- re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
+ re_work = kzalloc(sizeof(*re_work), GFP_KERNEL);
if (re_work) {
get_device(&rdev->ibdev.dev);
re_work->rdev = rdev;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index d4d4959c2434..ca88849559bf 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -707,12 +707,13 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
int rc = 0;
RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
- req.srq_cid = cpu_to_le32(srq->id);
/* Configure the request */
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf)
return -ENOMEM;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ req.srq_cid = cpu_to_le32(srq->id);
sb = sbuf->sb;
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
(void *)sbuf, 0);
@@ -1049,6 +1050,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
+ if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
+
req.qp_flags = cpu_to_le32(qp_flags);
/* ORRQ and IRRQ */
@@ -2851,6 +2855,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
int budget, rc = 0;
+ u8 type;
raw_cons = cq->hwq.cons;
budget = num_cqes;
@@ -2869,7 +2874,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
*/
dma_rmb();
/* From the device's respective CQE format to qplib_wc*/
- switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
+ type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ switch (type) {
case CQ_BASE_CQE_TYPE_REQ:
rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe,
@@ -2916,8 +2922,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
/* Error while processing the CQE, just skip to the
* next one
*/
- dev_err(&cq->hwq.pdev->dev,
- "process_cqe error rc = 0x%x\n", rc);
+ if (type != CQ_BASE_CQE_TYPE_TERMINAL)
+ dev_err(&cq->hwq.pdev->dev,
+ "process_cqe error rc = 0x%x\n", rc);
}
raw_cons++;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5d384def5e5f..3de854727460 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -78,7 +78,7 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
if (!test_bit(cbit, cmdq->cmdq_bitmap))
goto done;
do {
- mdelay(1); /* 1m sec */
+ udelay(1);
bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
} while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
done:
@@ -848,13 +848,13 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
{
struct bnxt_qplib_rcfw_sbuf *sbuf;
- sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
+ sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL);
if (!sbuf)
return NULL;
sbuf->size = size;
sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_ATOMIC);
+ &sbuf->dma_addr, GFP_KERNEL);
if (!sbuf->sb)
goto bail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 9474c0046582..82faa4e4cda8 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -96,7 +96,7 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
-#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
+#define RCFW_BLOCKED_CMD_WAIT_COUNT 20000000UL /* 20 sec */
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 44282a8cdd4f..bc1ba4b51ba4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -228,15 +228,16 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
npages++;
}
- if (npages == MAX_PBL_LVL_0_PGS) {
+ if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
/* This request is Level 0, map PTE */
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
if (rc)
goto fail;
hwq->level = PBL_LVL_0;
+ goto done;
}
- if (npages > MAX_PBL_LVL_0_PGS) {
+ if (npages >= MAX_PBL_LVL_0_PGS) {
if (npages > MAX_PBL_LVL_1_PGS) {
u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
0 : PTU_PTE_VALID;
@@ -571,23 +572,6 @@ fail:
return rc;
}
-/* GUID */
-void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
-{
- u8 mac[ETH_ALEN];
-
- /* MAC-48 to EUI-64 mapping */
- memcpy(mac, dev_addr, ETH_ALEN);
- guid[0] = mac[0] ^ 2;
- guid[1] = mac[1];
- guid[2] = mac[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = mac[3];
- guid[6] = mac[4];
- guid[7] = mac[5];
-}
-
static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl)
{
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 91031502e8f5..e1411a2352a7 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -253,14 +253,15 @@ struct bnxt_qplib_ctx {
struct bnxt_qplib_res {
struct pci_dev *pdev;
struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_dev_attr *dattr;
struct net_device *netdev;
-
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
bool prio;
+ bool is_vf;
};
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
@@ -345,7 +346,6 @@ void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_hwq_attr *hwq_attr);
-void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
@@ -450,4 +450,10 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
else
bnxt_qplib_ring_db32(info, arm);
}
+
+static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+}
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 3d9259632eb3..379e715ebd30 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -161,6 +161,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
bnxt_qplib_query_version(rcfw, attr->fw_ver);
@@ -286,8 +287,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u8 *smac, u16 vlan_id,
- bool update, u32 *index)
+ struct bnxt_qplib_gid *gid, const u8 *smac,
+ u16 vlan_id, bool update, u32 *index)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -378,7 +379,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
- u8 *smac)
+ const u8 *smac)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -869,3 +870,53 @@ bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
}
+
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat)
+{
+ struct creq_query_roce_stats_ext_resp resp = {};
+ struct creq_query_roce_stats_ext_resp_sb *sb;
+ struct cmdq_query_roce_stats_ext req = {};
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ u16 cmd_flags = 0;
+ int rc;
+
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
+ dev_err(&rcfw->pdev->dev,
+ "SP: QUERY_ROCE_STATS_EXT alloc sb failed");
+ return -ENOMEM;
+ }
+
+ RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags);
+
+ req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ req.resp_addr = cpu_to_le64(sbuf->dma_addr);
+ req.function_id = cpu_to_le32(fid);
+ req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+
+ sb = sbuf->sb;
+ estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
+ estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
+ estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
+ estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
+ estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
+ estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
+ estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
+ estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
+ estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
+ estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
+ estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
+ estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
+ estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
+ estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
+
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 260104783691..a18f568cb23e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -71,6 +71,7 @@ struct bnxt_qplib_dev_attr {
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
bool is_atomic;
+ u16 dev_cap_flags;
};
struct bnxt_qplib_pd {
@@ -219,16 +220,41 @@ struct bnxt_qplib_roce_stats {
/* port 3 active qps */
};
+struct bnxt_qplib_ext_stat {
+ u64 tx_atomic_req;
+ u64 tx_read_req;
+ u64 tx_read_res;
+ u64 tx_write_req;
+ u64 tx_send_req;
+ u64 tx_roce_pkts;
+ u64 tx_roce_bytes;
+ u64 rx_atomic_req;
+ u64 rx_read_req;
+ u64 rx_read_res;
+ u64 rx_write_req;
+ u64 rx_send_req;
+ u64 rx_roce_pkts;
+ u64 rx_roce_bytes;
+ u64 rx_roce_good_pkts;
+ u64 rx_roce_good_bytes;
+ u64 rx_out_of_buffer;
+ u64 rx_out_of_sequence;
+ u64 tx_cnp;
+ u64 rx_cnp;
+ u64 rx_ecn_marked;
+};
+
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
+ struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
bool update, u32 *index);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u16 gid_idx, u8 *smac);
+ struct bnxt_qplib_gid *gid, u16 gid_idx,
+ const u8 *smac);
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey);
@@ -263,4 +289,7 @@ int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_roce_stats *stats);
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat);
+
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 3e40e0d76efd..ecb719098b75 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1102,6 +1102,7 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL
#define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL
#define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -1127,6 +1128,10 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL
#define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL
#define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_LAST \
+ CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
+
u8 type;
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
@@ -2848,6 +2853,7 @@ struct creq_query_func_resp_sb {
__le16 max_qp_wr;
__le16 dev_cap_flags;
#define CREQ_QUERY_FUNC_RESP_SB_DEV_CAP_FLAGS_RESIZE_QP 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL
__le32 max_cq;
__le32 max_cqe;
__le32 max_pd;
@@ -3087,6 +3093,85 @@ struct creq_query_roce_stats_resp_sb {
__le64 active_qp_count_p3;
};
+/* cmdq_query_roce_stats_ext (size:192b/24B) */
+struct cmdq_query_roce_stats_ext {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 collection_id;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL
+ __le32 reserved32;
+};
+
+/* creq_query_roce_stats_ext_resp (size:128b/16B) */
+struct creq_query_roce_stats_ext_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_ext_resp_sb (size:1536b/192B) */
+struct creq_query_roce_stats_ext_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_send_req_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_send_req_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_ecn_marked_pkts;
+ __le64 tx_cnp_bytes;
+ __le64 rx_cnp_bytes;
+};
+
/* QP error notification event (16 bytes) */
struct creq_qp_error_notification {
u8 type;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 291471d12197..913f39ee4416 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -4464,6 +4464,5 @@ int __init c4iw_cm_init(void)
void c4iw_cm_term(void)
{
WARN_ON(!list_empty(&timeout_list));
- flush_workqueue(workq);
destroy_workqueue(workq);
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 541dbcf22d0e..80970a1738f8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1562,7 +1562,6 @@ static void __exit c4iw_exit_module(void)
kfree(ctx);
}
mutex_unlock(&dev_mutex);
- flush_workqueue(reg_workq);
destroy_workqueue(reg_workq);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
c4iw_cm_term();
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e7337662aff8..0c8fd5a85fcb 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -366,23 +366,23 @@ enum counters {
NR_COUNTERS
};
-static const char * const names[] = {
- [IP4INSEGS] = "ip4InSegs",
- [IP4OUTSEGS] = "ip4OutSegs",
- [IP4RETRANSSEGS] = "ip4RetransSegs",
- [IP4OUTRSTS] = "ip4OutRsts",
- [IP6INSEGS] = "ip6InSegs",
- [IP6OUTSEGS] = "ip6OutSegs",
- [IP6RETRANSSEGS] = "ip6RetransSegs",
- [IP6OUTRSTS] = "ip6OutRsts"
+static const struct rdma_stat_desc cxgb4_descs[] = {
+ [IP4INSEGS].name = "ip4InSegs",
+ [IP4OUTSEGS].name = "ip4OutSegs",
+ [IP4RETRANSSEGS].name = "ip4RetransSegs",
+ [IP4OUTRSTS].name = "ip4OutRsts",
+ [IP6INSEGS].name = "ip6InSegs",
+ [IP6OUTSEGS].name = "ip6OutSegs",
+ [IP6RETRANSSEGS].name = "ip6RetransSegs",
+ [IP6OUTRSTS].name = "ip6OutRsts"
};
static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev)
{
- BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+ BUILD_BUG_ON(ARRAY_SIZE(cxgb4_descs) != NR_COUNTERS);
/* FIXME: these look like port stats */
- return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+ return rdma_alloc_hw_stats_struct(cxgb4_descs, NR_COUNTERS,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 87b1dadeb7fe..7352a1f5d811 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -20,14 +20,14 @@
#define EFA_IRQNAME_SIZE 40
-/* 1 for AENQ + ADMIN */
-#define EFA_NUM_MSIX_VEC 1
#define EFA_MGMNT_MSIX_VEC_IDX 0
+#define EFA_COMP_EQS_VEC_BASE 1
struct efa_irq {
irq_handler_t handler;
void *data;
u32 irqn;
+ u32 vector;
cpumask_t affinity_hint_mask;
char name[EFA_IRQNAME_SIZE];
};
@@ -61,6 +61,13 @@ struct efa_dev {
struct efa_irq admin_irq;
struct efa_stats stats;
+
+ /* Array of completion EQs */
+ struct efa_eq *eqs;
+ unsigned int neqs;
+
+ /* Only stores CQs with interrupts enabled */
+ struct xarray cqs_xa;
};
struct efa_ucontext {
@@ -84,8 +91,11 @@ struct efa_cq {
dma_addr_t dma_addr;
void *cpu_addr;
struct rdma_user_mmap_entry *mmap_entry;
+ struct rdma_user_mmap_entry *db_mmap_entry;
size_t size;
u16 cq_idx;
+ /* NULL when no interrupts requested */
+ struct efa_eq *eq;
};
struct efa_qp {
@@ -116,6 +126,11 @@ struct efa_ah {
u8 id[EFA_GID_SIZE];
};
+struct efa_eq {
+ struct efa_com_eq eeq;
+ struct efa_irq irq;
+};
+
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata);
@@ -139,6 +154,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index fa38b34eddb8..0b0b93b529f3 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -28,7 +28,9 @@ enum efa_admin_aq_opcode {
EFA_ADMIN_DEALLOC_PD = 15,
EFA_ADMIN_ALLOC_UAR = 16,
EFA_ADMIN_DEALLOC_UAR = 17,
- EFA_ADMIN_MAX_OPCODE = 17,
+ EFA_ADMIN_CREATE_EQ = 18,
+ EFA_ADMIN_DESTROY_EQ = 19,
+ EFA_ADMIN_MAX_OPCODE = 19,
};
enum efa_admin_aq_feature_id {
@@ -38,6 +40,7 @@ enum efa_admin_aq_feature_id {
EFA_ADMIN_QUEUE_ATTR = 4,
EFA_ADMIN_HW_HINTS = 5,
EFA_ADMIN_HOST_INFO = 6,
+ EFA_ADMIN_EVENT_QUEUE_ATTR = 7,
};
/* QP transport type */
@@ -430,8 +433,8 @@ struct efa_admin_create_cq_cmd {
/*
* 4:0 : reserved5 - MBZ
* 5 : interrupt_mode_enabled - if set, cq operates
- * in interrupt mode (i.e. CQ events and MSI-X are
- * generated), otherwise - polling
+ * in interrupt mode (i.e. CQ events and EQ elements
+ * are generated), otherwise - polling
* 6 : virt - If set, ring base address is virtual
* (IOVA returned by MR registration)
* 7 : reserved6 - MBZ
@@ -448,8 +451,11 @@ struct efa_admin_create_cq_cmd {
/* completion queue depth in # of entries. must be power of 2 */
u16 cq_depth;
- /* msix vector assigned to this cq */
- u32 msix_vector_idx;
+ /* EQ number assigned to this cq */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
/*
* CQ ring base address, virtual or physical depending on 'virt'
@@ -480,6 +486,15 @@ struct efa_admin_create_cq_resp {
/* actual cq depth in number of entries */
u16 cq_actual_depth;
+
+ /* CQ doorbell address, as offset to PCIe DB BAR */
+ u32 db_offset;
+
+ /*
+ * 0 : db_valid - If set, doorbell offset is valid.
+ * Always set when interrupts are requested.
+ */
+ u32 flags;
};
struct efa_admin_destroy_cq_cmd {
@@ -669,6 +684,17 @@ struct efa_admin_feature_queue_attr_desc {
u16 max_tx_batch;
};
+struct efa_admin_event_queue_attr_desc {
+ /* The maximum number of event queues supported */
+ u32 max_eq;
+
+ /* Maximum number of EQEs per Event Queue */
+ u32 max_eq_depth;
+
+ /* Supported events bitmask */
+ u32 event_bitmask;
+};
+
struct efa_admin_feature_aenq_desc {
/* bitmask for AENQ groups the device can report */
u32 supported_groups;
@@ -727,6 +753,8 @@ struct efa_admin_get_feature_resp {
struct efa_admin_feature_queue_attr_desc queue_attr;
+ struct efa_admin_event_queue_attr_desc event_queue_attr;
+
struct efa_admin_hw_hints hw_hints;
} u;
};
@@ -810,6 +838,60 @@ struct efa_admin_dealloc_uar_resp {
struct efa_admin_acq_common_desc acq_common_desc;
};
+struct efa_admin_create_eq_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* Size of the EQ in entries, must be power of 2 */
+ u16 depth;
+
+ /* MSI-X table entry index */
+ u8 msix_vec;
+
+ /*
+ * 4:0 : entry_size_words - size of EQ entry in
+ * 32-bit words
+ * 7:5 : reserved - MBZ
+ */
+ u8 caps;
+
+ /* EQ ring base address */
+ struct efa_common_mem_addr ba;
+
+ /*
+ * Enabled events on this EQ
+ * 0 : completion_events - Enable completion events
+ * 31:1 : reserved - MBZ
+ */
+ u32 event_bitmask;
+
+ /* MBZ */
+ u32 reserved;
+};
+
+struct efa_admin_create_eq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* EQ number */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_eq_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* EQ number */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_eq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
/* asynchronous event notification groups */
enum efa_admin_aenq_group {
EFA_ADMIN_FATAL_ERROR = 1,
@@ -899,10 +981,18 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+/* create_cq_resp */
+#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)
+
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
+/* create_eq_cmd */
+#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK BIT(6)
+#define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK BIT(0)
+
/* host_info */
#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
#define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8)
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
index 78ff9389ae25..83f20c38a840 100644
--- a/drivers/infiniband/hw/efa/efa_admin_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -118,6 +118,43 @@ struct efa_admin_aenq_entry {
u32 inline_data_w4[12];
};
+enum efa_admin_eqe_event_type {
+ EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION = 0,
+};
+
+/* Completion event */
+struct efa_admin_comp_event {
+ /* CQ number */
+ u16 cqn;
+
+ /* MBZ */
+ u16 reserved;
+
+ /* MBZ */
+ u32 reserved2;
+};
+
+/* Event Queue Element */
+struct efa_admin_eqe {
+ /*
+ * 0 : phase
+ * 8:1 : event_type - Event type
+ * 31:9 : reserved - MBZ
+ */
+ u32 common;
+
+ /* MBZ */
+ u32 reserved;
+
+ union {
+ /* Event data */
+ u32 event_data[2];
+
+ /* Completion Event */
+ struct efa_admin_comp_event comp_event;
+ } u;
+};
+
/* aq_common_desc */
#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -131,4 +168,8 @@ struct efa_admin_aenq_entry {
/* aenq_common_desc */
#define EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+/* eqe */
+#define EFA_ADMIN_EQE_PHASE_MASK BIT(0)
+#define EFA_ADMIN_EQE_EVENT_TYPE_MASK GENMASK(8, 1)
+
#endif /* _EFA_ADMIN_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 0d523ad736c7..16a24a05fc2a 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -56,11 +56,19 @@ static const char *efa_com_cmd_str(u8 cmd)
EFA_CMD_STR_CASE(DEALLOC_PD);
EFA_CMD_STR_CASE(ALLOC_UAR);
EFA_CMD_STR_CASE(DEALLOC_UAR);
+ EFA_CMD_STR_CASE(CREATE_EQ);
+ EFA_CMD_STR_CASE(DESTROY_EQ);
default: return "unknown command opcode";
}
#undef EFA_CMD_STR_CASE
}
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
+{
+ *addr_low = lower_32_bits(addr);
+ *addr_high = upper_32_bits(addr);
+}
+
static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
{
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
@@ -1081,3 +1089,159 @@ int efa_com_dev_reset(struct efa_com_dev *edev,
return 0;
}
+
+static int efa_com_create_eq(struct efa_com_dev *edev,
+ struct efa_com_create_eq_params *params,
+ struct efa_com_create_eq_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_create_eq_resp resp = {};
+ struct efa_admin_create_eq_cmd cmd = {};
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
+ EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
+ params->entry_size_in_bytes / 4);
+ cmd.depth = params->depth;
+ cmd.event_bitmask = params->event_bitmask;
+ cmd.msix_vec = params->msix_vec;
+
+ efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high,
+ &cmd.ba.mem_addr_low);
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create eq[%d]\n", err);
+ return err;
+ }
+
+ result->eqn = resp.eqn;
+
+ return 0;
+}
+
+static void efa_com_destroy_eq(struct efa_com_dev *edev,
+ struct efa_com_destroy_eq_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_destroy_eq_resp resp = {};
+ struct efa_admin_destroy_eq_cmd cmd = {};
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
+ cmd.eqn = params->eqn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err)
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
+ err);
+}
+
+static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
+{
+ u32 val = 0;
+
+ EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
+ EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
+
+ writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF);
+}
+
+void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
+ struct efa_com_eq *eeq)
+{
+ struct efa_admin_eqe *eqe;
+ u32 processed = 0;
+ u8 phase;
+ u32 ci;
+
+ ci = eeq->cc & (eeq->depth - 1);
+ phase = eeq->phase;
+ eqe = &eeq->eqes[ci];
+
+ /* Go over all the events */
+ while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+
+ eeq->cb(eeq, eqe);
+
+ /* Get next event entry */
+ ci++;
+ processed++;
+
+ if (ci == eeq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+
+ eqe = &eeq->eqes[ci];
+ }
+
+ eeq->cc += processed;
+ eeq->phase = phase;
+ efa_com_arm_eq(eeq->edev, eeq);
+}
+
+void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
+{
+ struct efa_com_destroy_eq_params params = {
+ .eqn = eeq->eqn,
+ };
+
+ efa_com_destroy_eq(edev, &params);
+ dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes),
+ eeq->eqes, eeq->dma_addr);
+}
+
+int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
+ efa_eqe_handler cb, u16 depth, u8 msix_vec)
+{
+ struct efa_com_create_eq_params params = {};
+ struct efa_com_create_eq_result result = {};
+ int err;
+
+ params.depth = depth;
+ params.entry_size_in_bytes = sizeof(*eeq->eqes);
+ EFA_SET(&params.event_bitmask,
+ EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
+ params.msix_vec = msix_vec;
+
+ eeq->eqes = dma_alloc_coherent(edev->dmadev,
+ params.depth * sizeof(*eeq->eqes),
+ &params.dma_addr, GFP_KERNEL);
+ if (!eeq->eqes)
+ return -ENOMEM;
+
+ err = efa_com_create_eq(edev, &params, &result);
+ if (err)
+ goto err_free_coherent;
+
+ eeq->eqn = result.eqn;
+ eeq->edev = edev;
+ eeq->dma_addr = params.dma_addr;
+ eeq->phase = 1;
+ eeq->depth = params.depth;
+ eeq->cb = cb;
+ efa_com_arm_eq(edev, eeq);
+
+ return 0;
+
+err_free_coherent:
+ dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes),
+ eeq->eqes, params.dma_addr);
+ return err;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index 5e4c88877ddb..77282234ce68 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -80,6 +80,9 @@ struct efa_com_admin_queue {
};
struct efa_aenq_handlers;
+struct efa_com_eq;
+typedef void (*efa_eqe_handler)(struct efa_com_eq *eeq,
+ struct efa_admin_eqe *eqe);
struct efa_com_aenq {
struct efa_admin_aenq_entry *entries;
@@ -112,6 +115,33 @@ struct efa_com_dev {
struct efa_com_mmio_read mmio_read;
};
+struct efa_com_eq {
+ struct efa_com_dev *edev;
+ struct efa_admin_eqe *eqes;
+ dma_addr_t dma_addr;
+ u32 cc; /* Consumer counter */
+ u16 eqn;
+ u16 depth;
+ u8 phase;
+ efa_eqe_handler cb;
+};
+
+struct efa_com_create_eq_params {
+ dma_addr_t dma_addr;
+ u32 event_bitmask;
+ u16 depth;
+ u8 entry_size_in_bytes;
+ u8 msix_vec;
+};
+
+struct efa_com_create_eq_result {
+ u16 eqn;
+};
+
+struct efa_com_destroy_eq_params {
+ u16 eqn;
+};
+
typedef void (*efa_aenq_handler)(void *data,
struct efa_admin_aenq_entry *aenq_e);
@@ -121,9 +151,13 @@ struct efa_aenq_handlers {
efa_aenq_handler unimplemented_handler;
};
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_admin_init(struct efa_com_dev *edev,
struct efa_aenq_handlers *aenq_handlers);
void efa_com_admin_destroy(struct efa_com_dev *edev);
+int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
+ efa_eqe_handler cb, u16 depth, u8 msix_vec);
+void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq);
int efa_com_dev_reset(struct efa_com_dev *edev,
enum efa_regs_reset_reason_types reset_reason);
void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling);
@@ -140,5 +174,7 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
struct efa_admin_acq_entry *comp,
size_t comp_size);
void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data);
+void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
+ struct efa_com_eq *eeq);
#endif /* _EFA_COM_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index f752ef64159c..fb405da4e1db 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
#include "efa_com_cmd.h"
-void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
-{
- *addr_low = lower_32_bits(addr);
- *addr_high = upper_32_bits(addr);
-}
-
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
struct efa_com_create_qp_result *res)
@@ -157,7 +151,7 @@ int efa_com_create_cq(struct efa_com_dev *edev,
struct efa_com_create_cq_params *params,
struct efa_com_create_cq_result *result)
{
- struct efa_admin_create_cq_resp cmd_completion;
+ struct efa_admin_create_cq_resp cmd_completion = {};
struct efa_admin_create_cq_cmd create_cmd = {};
struct efa_com_admin_queue *aq = &edev->aq;
int err;
@@ -169,6 +163,11 @@ int efa_com_create_cq(struct efa_com_dev *edev,
create_cmd.cq_depth = params->cq_depth;
create_cmd.num_sub_cqs = params->num_sub_cqs;
create_cmd.uar = params->uarn;
+ if (params->interrupt_mode_enabled) {
+ EFA_SET(&create_cmd.cq_caps_1,
+ EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
+ create_cmd.eqn = params->eqn;
+ }
efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high,
@@ -187,6 +186,9 @@ int efa_com_create_cq(struct efa_com_dev *edev,
result->cq_idx = cmd_completion.cq_idx;
result->actual_depth = params->cq_depth;
+ result->db_off = cmd_completion.db_offset;
+ result->db_valid = EFA_GET(&cmd_completion.flags,
+ EFA_ADMIN_CREATE_CQ_RESP_DB_VALID);
return 0;
}
@@ -497,6 +499,23 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
sizeof(resp.u.network_attr.addr));
result->mtu = resp.u.network_attr.mtu;
+ if (efa_com_check_supported_feature_id(edev,
+ EFA_ADMIN_EVENT_QUEUE_ATTR)) {
+ err = efa_com_get_feature(edev, &resp,
+ EFA_ADMIN_EVENT_QUEUE_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get event queue attributes %d\n",
+ err);
+ return err;
+ }
+
+ result->max_eq = resp.u.event_queue_attr.max_eq;
+ result->max_eq_depth = resp.u.event_queue_attr.max_eq_depth;
+ result->event_bitmask = resp.u.event_queue_attr.event_bitmask;
+ }
+
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index eea4ebfbe6ec..c33010bbf9e8 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -73,7 +73,9 @@ struct efa_com_create_cq_params {
u16 cq_depth;
u16 num_sub_cqs;
u16 uarn;
+ u16 eqn;
u8 entry_size_in_bytes;
+ bool interrupt_mode_enabled;
};
struct efa_com_create_cq_result {
@@ -81,6 +83,8 @@ struct efa_com_create_cq_result {
u16 cq_idx;
/* actual cq depth in # of entries */
u16 actual_depth;
+ u32 db_off;
+ bool db_valid;
};
struct efa_com_destroy_cq_params {
@@ -125,6 +129,9 @@ struct efa_com_get_device_attr_result {
u32 max_llq_size;
u32 max_rdma_size;
u32 device_caps;
+ u32 max_eq;
+ u32 max_eq_depth;
+ u32 event_bitmask; /* EQ events bitmask */
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
@@ -260,7 +267,6 @@ union efa_com_get_stats_result {
struct efa_com_rdma_read_stats rdma_read_stats;
};
-void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
struct efa_com_create_qp_result *res);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 417dea5f90cf..94b94cca4870 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -67,6 +67,47 @@ static void efa_release_bars(struct efa_dev *dev, int bars_mask)
pci_release_selected_regions(pdev, release_bars);
}
+static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
+{
+ u16 cqn = eqe->u.comp_event.cqn;
+ struct efa_cq *cq;
+
+ /* Safe to load as we're in irq and removal calls synchronize_irq() */
+ cq = xa_load(&dev->cqs_xa, cqn);
+ if (unlikely(!cq)) {
+ ibdev_err_ratelimited(&dev->ibdev,
+ "Completion event on non-existent CQ[%u]",
+ cqn);
+ return;
+ }
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
+{
+ struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
+
+ if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
+ EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
+ efa_process_comp_eqe(dev, eqe);
+ else
+ ibdev_err_ratelimited(&dev->ibdev,
+ "Unknown event type received %lu",
+ EFA_GET(&eqe->common,
+ EFA_ADMIN_EQE_EVENT_TYPE));
+}
+
+static irqreturn_t efa_intr_msix_comp(int irq, void *data)
+{
+ struct efa_eq *eq = data;
+ struct efa_com_dev *edev = eq->eeq.edev;
+
+ efa_com_eq_comp_intr_handler(edev, &eq->eeq);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
{
struct efa_dev *dev = data;
@@ -77,26 +118,43 @@ static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
return IRQ_HANDLED;
}
-static int efa_request_mgmnt_irq(struct efa_dev *dev)
+static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
{
- struct efa_irq *irq;
int err;
- irq = &dev->admin_irq;
err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
if (err) {
- dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
- err);
+ dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
+ irq->name, err);
return err;
}
- dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
- nr_cpumask_bits, &irq->affinity_hint_mask, irq->irqn);
irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
return 0;
}
+static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
+ int vector)
+{
+ u32 cpu;
+
+ cpu = vector - EFA_COMP_EQS_VEC_BASE;
+ snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
+ pci_name(dev->pdev));
+ eq->irq.handler = efa_intr_msix_comp;
+ eq->irq.data = eq;
+ eq->irq.vector = vector;
+ eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
+ cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
+}
+
+static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
+{
+ irq_set_affinity_hint(irq->irqn, NULL);
+ free_irq(irq->irqn, irq->data);
+}
+
static void efa_setup_mgmnt_irq(struct efa_dev *dev)
{
u32 cpu;
@@ -105,8 +163,9 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev)
"efa-mgmnt@pci:%s", pci_name(dev->pdev));
dev->admin_irq.handler = efa_intr_msix_mgmnt;
dev->admin_irq.data = dev;
- dev->admin_irq.irqn =
- pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
+ dev->admin_irq.vector = dev->admin_msix_vector_idx;
+ dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
+ dev->admin_msix_vector_idx);
cpu = cpumask_first(cpu_online_mask);
cpumask_set_cpu(cpu,
&dev->admin_irq.affinity_hint_mask);
@@ -115,20 +174,11 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev)
dev->admin_irq.name);
}
-static void efa_free_mgmnt_irq(struct efa_dev *dev)
-{
- struct efa_irq *irq;
-
- irq = &dev->admin_irq;
- irq_set_affinity_hint(irq->irqn, NULL);
- free_irq(irq->irqn, irq->data);
-}
-
static int efa_set_mgmnt_irq(struct efa_dev *dev)
{
efa_setup_mgmnt_irq(dev);
- return efa_request_mgmnt_irq(dev);
+ return efa_request_irq(dev, &dev->admin_irq);
}
static int efa_request_doorbell_bar(struct efa_dev *dev)
@@ -234,6 +284,72 @@ static void efa_set_host_info(struct efa_dev *dev)
dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
}
+static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
+{
+ efa_com_eq_destroy(&dev->edev, &eq->eeq);
+ efa_free_irq(dev, &eq->irq);
+}
+
+static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
+{
+ int err;
+
+ efa_setup_comp_irq(dev, eq, msix_vec);
+ err = efa_request_irq(dev, &eq->irq);
+ if (err)
+ return err;
+
+ err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
+ dev->dev_attr.max_eq_depth, msix_vec);
+ if (err)
+ goto err_free_comp_irq;
+
+ return 0;
+
+err_free_comp_irq:
+ efa_free_irq(dev, &eq->irq);
+ return err;
+}
+
+static int efa_create_eqs(struct efa_dev *dev)
+{
+ unsigned int neqs = dev->dev_attr.max_eq;
+ int err;
+ int i;
+
+ neqs = min_t(unsigned int, neqs, num_online_cpus());
+ dev->neqs = neqs;
+ dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
+ if (!dev->eqs)
+ return -ENOMEM;
+
+ for (i = 0; i < neqs; i++) {
+ err = efa_create_eq(dev, &dev->eqs[i],
+ i + EFA_COMP_EQS_VEC_BASE);
+ if (err)
+ goto err_destroy_eqs;
+ }
+
+ return 0;
+
+err_destroy_eqs:
+ for (i--; i >= 0; i--)
+ efa_destroy_eq(dev, &dev->eqs[i]);
+ kfree(dev->eqs);
+
+ return err;
+}
+
+static void efa_destroy_eqs(struct efa_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->neqs; i++)
+ efa_destroy_eq(dev, &dev->eqs[i]);
+
+ kfree(dev->eqs);
+}
+
static const struct ib_device_ops efa_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_EFA,
@@ -264,6 +380,7 @@ static const struct ib_device_ops efa_dev_ops = {
.query_port = efa_query_port,
.query_qp = efa_query_qp,
.reg_user_mr = efa_reg_mr,
+ .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
@@ -300,23 +417,29 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
goto err_release_doorbell_bar;
+ err = efa_create_eqs(dev);
+ if (err)
+ goto err_release_doorbell_bar;
+
efa_set_host_info(dev);
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
dev->ibdev.phys_port_cnt = 1;
- dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
dev->ibdev.dev.parent = &pdev->dev;
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
if (err)
- goto err_release_doorbell_bar;
+ goto err_destroy_eqs;
ibdev_info(&dev->ibdev, "IB device registered\n");
return 0;
+err_destroy_eqs:
+ efa_destroy_eqs(dev);
err_release_doorbell_bar:
efa_release_doorbell_bar(dev);
return err;
@@ -324,9 +447,10 @@ err_release_doorbell_bar:
static void efa_ib_device_remove(struct efa_dev *dev)
{
- efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
+ efa_destroy_eqs(dev);
+ efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@@ -339,8 +463,12 @@ static int efa_enable_msix(struct efa_dev *dev)
{
int msix_vecs, irq_num;
- /* Reserve the max msix vectors we might need */
- msix_vecs = EFA_NUM_MSIX_VEC;
+ /*
+ * Reserve the max msix vectors we might need, one vector is reserved
+ * for admin.
+ */
+ msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
+ num_online_cpus() + 1);
dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
msix_vecs);
@@ -421,6 +549,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
edev->efa_dev = dev;
edev->dmadev = &pdev->dev;
dev->pdev = pdev;
+ xa_init(&dev->cqs_xa);
bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
@@ -476,7 +605,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
return dev;
err_free_mgmnt_irq:
- efa_free_mgmnt_irq(dev);
+ efa_free_irq(dev, &dev->admin_irq);
err_disable_msix:
efa_disable_msix(dev);
err_reg_read_destroy:
@@ -499,11 +628,12 @@ static void efa_remove_device(struct pci_dev *pdev)
edev = &dev->edev;
efa_com_admin_destroy(edev);
- efa_free_mgmnt_irq(dev);
+ efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
efa_com_mmio_reg_read_destroy(edev);
devm_iounmap(&pdev->dev, edev->reg_bar);
efa_release_bars(dev, EFA_BASE_BAR_MASK);
+ xa_destroy(&dev->cqs_xa);
ib_dealloc_device(&dev->ibdev);
pci_disable_device(pdev);
}
diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h
index 4017982fe13b..714ae6258800 100644
--- a/drivers/infiniband/hw/efa/efa_regs_defs.h
+++ b/drivers/infiniband/hw/efa/efa_regs_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_REGS_H_
@@ -42,6 +42,7 @@ enum efa_regs_reset_reason_types {
#define EFA_REGS_MMIO_REG_READ_OFF 0x5c
#define EFA_REGS_MMIO_RESP_LO_OFF 0x60
#define EFA_REGS_MMIO_RESP_HI_OFF 0x64
+#define EFA_REGS_EQ_DB_OFF 0x68
/* version register */
#define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff
@@ -93,4 +94,8 @@ enum efa_regs_reset_reason_types {
#define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
#define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+/* eq_db register */
+#define EFA_REGS_EQ_DB_EQN_MASK 0xffff
+#define EFA_REGS_EQ_DB_ARM_MASK 0x80000000
+
#endif /* _EFA_REGS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index e5f9d90aad5e..ecfe70eb5efb 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -3,6 +3,8 @@
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/vmalloc.h>
#include <linux/log2.h>
@@ -60,13 +62,14 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
#define EFA_STATS_ENUM(ename, name) ename,
-#define EFA_STATS_STR(ename, name) [ename] = name,
+#define EFA_STATS_STR(ename, nam) \
+ [ename].name = nam,
enum efa_hw_device_stats {
EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
};
-static const char *const efa_device_stats_names[] = {
+static const struct rdma_stat_desc efa_device_stats_descs[] = {
EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
};
@@ -74,7 +77,7 @@ enum efa_hw_port_stats {
EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
};
-static const char *const efa_port_stats_names[] = {
+static const struct rdma_stat_desc efa_port_stats_descs[] = {
EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
};
@@ -245,6 +248,9 @@ int efa_query_device(struct ib_device *ibdev,
if (EFA_DEV_CAP(dev, RNR_RETRY))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
+ if (dev->neqs)
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
+
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
@@ -984,6 +990,12 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
return efa_com_destroy_cq(&dev->edev, &params);
}
+static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
+{
+ rdma_user_mmap_entry_remove(cq->db_mmap_entry);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
+}
+
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibcq->device);
@@ -993,15 +1005,25 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
- rdma_user_mmap_entry_remove(cq->mmap_entry);
+ efa_cq_user_mmap_entries_remove(cq);
efa_destroy_cq_idx(dev, cq->cq_idx);
+ if (cq->eq) {
+ xa_erase(&dev->cqs_xa, cq->cq_idx);
+ synchronize_irq(cq->eq->irq.irqn);
+ }
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
return 0;
}
+static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
+{
+ return &dev->eqs[vec];
+}
+
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
- struct efa_ibv_create_cq_resp *resp)
+ struct efa_ibv_create_cq_resp *resp,
+ bool db_valid)
{
resp->q_mmap_size = cq->size;
cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
@@ -1011,6 +1033,21 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
if (!cq->mmap_entry)
return -ENOMEM;
+ if (db_valid) {
+ cq->db_mmap_entry =
+ efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ dev->db_bar_addr + resp->db_off,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->db_mmap_key);
+ if (!cq->db_mmap_entry) {
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
+ return -ENOMEM;
+ }
+
+ resp->db_off &= ~PAGE_MASK;
+ resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
+ }
+
return 0;
}
@@ -1019,8 +1056,8 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
{
struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct efa_ucontext, ibucontext);
+ struct efa_com_create_cq_params params = {};
struct efa_ibv_create_cq_resp resp = {};
- struct efa_com_create_cq_params params;
struct efa_com_create_cq_result result;
struct ib_device *ibdev = ibcq->device;
struct efa_dev *dev = to_edev(ibdev);
@@ -1065,7 +1102,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
+ if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
ibdev_dbg(ibdev,
"Incompatible ABI params, unknown fields in udata\n");
err = -EINVAL;
@@ -1101,29 +1138,45 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
params.dma_addr = cq->dma_addr;
params.entry_size_in_bytes = cmd.cq_entry_size;
params.num_sub_cqs = cmd.num_sub_cqs;
+ if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
+ cq->eq = efa_vec2eq(dev, attr->comp_vector);
+ params.eqn = cq->eq->eeq.eqn;
+ params.interrupt_mode_enabled = true;
+ }
+
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
goto err_free_mapped;
+ resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
cq->cq_idx = result.cq_idx;
cq->ibcq.cqe = result.actual_depth;
WARN_ON_ONCE(entries != result.actual_depth);
- err = cq_mmap_entries_setup(dev, cq, &resp);
+ err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
if (err) {
ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
cq->cq_idx);
goto err_destroy_cq;
}
+ if (cq->eq) {
+ err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
+ cq->cq_idx);
+ goto err_remove_mmap;
+ }
+ }
+
if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n");
- goto err_remove_mmap;
+ goto err_xa_erase;
}
}
@@ -1132,8 +1185,11 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
+err_xa_erase:
+ if (cq->eq)
+ xa_erase(&dev->cqs_xa, cq->cq_idx);
err_remove_mmap:
- rdma_user_mmap_entry_remove(cq->mmap_entry);
+ efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
@@ -1490,26 +1546,18 @@ static int efa_create_pbl(struct efa_dev *dev,
return 0;
}
-struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
- u64 virt_addr, int access_flags,
- struct ib_udata *udata)
+static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
+ struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
- struct efa_com_reg_mr_params params = {};
- struct efa_com_reg_mr_result result = {};
- struct pbl_context pbl;
int supp_access_flags;
- unsigned int pg_sz;
struct efa_mr *mr;
- int inline_size;
- int err;
if (udata && udata->inlen &&
!ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, udata not cleared\n");
- err = -EINVAL;
- goto err_out;
+ return ERR_PTR(-EINVAL);
}
supp_access_flags =
@@ -1521,23 +1569,26 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
access_flags, supp_access_flags);
- err = -EOPNOTSUPP;
- goto err_out;
+ return ERR_PTR(-EOPNOTSUPP);
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
- if (IS_ERR(mr->umem)) {
- err = PTR_ERR(mr->umem);
- ibdev_dbg(&dev->ibdev,
- "Failed to pin and map user space memory[%d]\n", err);
- goto err_free;
- }
+ return mr;
+}
+
+static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
+ u64 length, u64 virt_addr, int access_flags)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_com_reg_mr_params params = {};
+ struct efa_com_reg_mr_result result = {};
+ struct pbl_context pbl;
+ unsigned int pg_sz;
+ int inline_size;
+ int err;
params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
@@ -1548,10 +1599,9 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
dev->dev_attr.page_size_cap,
virt_addr);
if (!pg_sz) {
- err = -EOPNOTSUPP;
ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
dev->dev_attr.page_size_cap);
- goto err_unmap;
+ return -EOPNOTSUPP;
}
params.page_shift = order_base_2(pg_sz);
@@ -1565,21 +1615,21 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (params.page_num <= inline_size) {
err = efa_create_inline_pbl(dev, mr, &params);
if (err)
- goto err_unmap;
+ return err;
err = efa_com_register_mr(&dev->edev, &params, &result);
if (err)
- goto err_unmap;
+ return err;
} else {
err = efa_create_pbl(dev, &pbl, mr, &params);
if (err)
- goto err_unmap;
+ return err;
err = efa_com_register_mr(&dev->edev, &params, &result);
pbl_destroy(dev, &pbl);
if (err)
- goto err_unmap;
+ return err;
}
mr->ibmr.lkey = result.l_key;
@@ -1587,9 +1637,78 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
mr->ibmr.length = length;
ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
+ return 0;
+}
+
+struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct efa_mr *mr;
+ int err;
+
+ mr = efa_alloc_mr(ibpd, access_flags, udata);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto err_out;
+ }
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
+ access_flags);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ goto err_free;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+ err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
+ if (err)
+ goto err_release;
+
+ return &mr->ibmr;
+
+err_release:
+ ib_umem_release(mr->umem);
+err_free:
+ kfree(mr);
+err_out:
+ atomic64_inc(&dev->stats.reg_mr_err);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_mr *mr;
+ int err;
+
+ mr = efa_alloc_mr(ibpd, access_flags, udata);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto err_out;
+ }
+
+ mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ ibdev_dbg(&dev->ibdev,
+ "Failed to pin and map user space memory[%d]\n", err);
+ goto err_free;
+ }
+
+ err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
+ if (err)
+ goto err_release;
+
return &mr->ibmr;
-err_unmap:
+err_release:
ib_umem_release(mr->umem);
err_free:
kfree(mr);
@@ -1906,15 +2025,15 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- return rdma_alloc_hw_stats_struct(efa_port_stats_names,
- ARRAY_SIZE(efa_port_stats_names),
+ return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
+ ARRAY_SIZE(efa_port_stats_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
{
- return rdma_alloc_hw_stats_struct(efa_device_stats_names,
- ARRAY_SIZE(efa_device_stats_names),
+ return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
+ ARRAY_SIZE(efa_device_stats_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1939,7 +2058,7 @@ static int efa_fill_device_stats(struct efa_dev *dev,
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
- return ARRAY_SIZE(efa_device_stats_names);
+ return ARRAY_SIZE(efa_device_stats_descs);
}
static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
@@ -1988,7 +2107,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
- return ARRAY_SIZE(efa_port_stats_names);
+ return ARRAY_SIZE(efa_port_stats_descs);
}
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index 519866b30a13..6eb739052121 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HFI1
- tristate "Intel OPA Gen1 support"
+ tristate "Cornelis OPX Gen1 support"
depends on X86_64 && INFINIBAND_RDMAVT && I2C
select MMU_NOTIFIER
select CRC32
select I2C_ALGOBIT
help
- This is a low-level driver for Intel OPA Gen1 adapter.
+ This is a low-level driver for Cornelis OPX Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
bool "HFI1 SDMA Order debug"
depends on INFINIBAND_HFI1
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 37273dc0c03c..ec37f4fd8e96 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*/
/*
@@ -14918,7 +14919,7 @@ static int obtain_boardname(struct hfi1_devdata *dd)
{
/* generic board description */
const char generic[] =
- "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
+ "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series";
unsigned long size;
int ret;
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index de411884386b..61f341c3005c 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*/
#include <linux/spinlock.h>
@@ -56,7 +57,7 @@ module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
+MODULE_DESCRIPTION("Cornelis Omni-Path Express driver");
/*
* MAX_PKT_RCV is the max # if packets processed per receive interrupt.
diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index f275dd1abed8..e8ed05516bf2 100644
--- a/drivers/infiniband/hw/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
@@ -3,7 +3,9 @@
* Copyright(c) 2015, 2016 Intel Corporation.
*/
-#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
#include "efivar.h"
/* GUID for HFI1 variables in EFI */
@@ -112,7 +114,6 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
char prefix_name[64];
char name[64];
int result;
- int i;
/* create a common prefix */
snprintf(prefix_name, sizeof(prefix_name), "%04x:%02x:%02x.%x",
@@ -128,10 +129,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
* variable.
*/
if (result) {
- /* Converting to uppercase */
- for (i = 0; prefix_name[i]; i++)
- if (isalpha(prefix_name[i]))
- prefix_name[i] = toupper(prefix_name[i]);
+ string_upper(prefix_name, prefix_name);
snprintf(name, sizeof(name), "%s-%s", prefix_name, kind);
result = read_efi_var(name, size, return_data);
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3679d076eaa..dbd1c31830b9 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*/
#include <linux/pci.h>
@@ -1342,7 +1343,7 @@ static void remove_one(struct pci_dev *);
static int init_one(struct pci_dev *, const struct pci_device_id *);
static void shutdown_one(struct pci_dev *);
-#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
+#define DRIVER_LOAD_MSG "Cornelis " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
const struct pci_device_id hfi1_pci_tbl[] = {
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 2cff38b105ac..909122934246 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -44,22 +44,52 @@ union hfi1_ipoib_flow {
};
/**
+ * struct ipoib_txreq - IPOIB transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma_hdr: 9b ib headers
+ * @sdma_status: status returned by sdma engine
+ * @complete: non-zero implies complete
+ * @priv: ipoib netdev private data
+ * @txq: txq on which skb was output
+ * @skb: skb to send
+ */
+struct ipoib_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_sdma_header sdma_hdr;
+ int sdma_status;
+ int complete;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct hfi1_ipoib_txq *txq;
+ struct sk_buff *skb;
+};
+
+/**
* struct hfi1_ipoib_circ_buf - List of items to be processed
- * @items: ring of items
- * @head: ring head
- * @tail: ring tail
+ * @items: ring of items each a power of two size
* @max_items: max items + 1 that the ring can contain
- * @producer_lock: producer sync lock
- * @consumer_lock: consumer sync lock
+ * @shift: log2 of size for getting txreq
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @tail: ring tail
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
+ * @complete_txreqs: count of txreqs completed by sdma
+ * @head: ring head
*/
-struct ipoib_txreq;
struct hfi1_ipoib_circ_buf {
- struct ipoib_txreq **items;
- unsigned long head;
- unsigned long tail;
- unsigned long max_items;
- spinlock_t producer_lock; /* head sync lock */
- spinlock_t consumer_lock; /* tail sync lock */
+ void *items;
+ u32 max_items;
+ u32 shift;
+ /* consumer cache line */
+ u64 ____cacheline_aligned_in_smp sent_txreqs;
+ u32 avail;
+ u32 tail;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
+ /* producer cache line */
+ u64 ____cacheline_aligned_in_smp complete_txreqs;
+ u32 head;
};
/**
@@ -68,33 +98,24 @@ struct hfi1_ipoib_circ_buf {
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
- * @stops: count of stops of queue
- * @ring_full: ring has been filled
- * @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
* @wait: iowait structure
- * @complete_txreqs: count of txreqs completed by sdma
* @napi: pointer to tx napi interface
* @tx_ring: ring of ipoib txreqs to be reaped by napi callback
*/
struct hfi1_ipoib_txq {
+ struct napi_struct napi;
struct hfi1_ipoib_dev_priv *priv;
struct sdma_engine *sde;
struct list_head tx_list;
- u64 sent_txreqs;
- atomic_t stops;
- atomic_t ring_full;
- atomic_t no_desc;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
struct iowait wait;
- atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
- struct napi_struct *napi;
- struct hfi1_ipoib_circ_buf tx_ring;
+ struct hfi1_ipoib_circ_buf ____cacheline_aligned_in_smp tx_ring;
};
struct hfi1_ipoib_dev_priv {
@@ -102,15 +123,12 @@ struct hfi1_ipoib_dev_priv {
struct net_device *netdev;
struct ib_device *device;
struct hfi1_ipoib_txq *txqs;
- struct kmem_cache *txreq_cache;
- struct napi_struct *tx_napis;
+ const struct net_device_ops *netdev_ops;
+ struct rvt_qp *qp;
+ u32 qkey;
u16 pkey;
u16 pkey_index;
- u32 qkey;
u8 port_num;
-
- const struct net_device_ops *netdev_ops;
- struct rvt_qp *qp;
};
/* hfi1 ipoib rdma netdev's private data structure */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index e594a961f513..e1a2b02bbd91 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -11,7 +11,7 @@
#include "ipoib.h"
#include "hfi.h"
-static u32 qpn_from_mac(u8 *mac_arr)
+static u32 qpn_from_mac(const u8 *mac_arr)
{
return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 15b0cb0f363f..f4010890309f 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -22,24 +22,6 @@
#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
-/**
- * struct ipoib_txreq - IPOIB transmit descriptor
- * @txreq: sdma transmit request
- * @sdma_hdr: 9b ib headers
- * @sdma_status: status returned by sdma engine
- * @priv: ipoib netdev private data
- * @txq: txq on which skb was output
- * @skb: skb to send
- */
-struct ipoib_txreq {
- struct sdma_txreq txreq;
- struct hfi1_sdma_header sdma_hdr;
- int sdma_status;
- struct hfi1_ipoib_dev_priv *priv;
- struct hfi1_ipoib_txq *txq;
- struct sk_buff *skb;
-};
-
struct ipoib_txparms {
struct hfi1_devdata *dd;
struct rdma_ah_attr *ah_attr;
@@ -51,28 +33,34 @@ struct ipoib_txparms {
u8 entropy;
};
-static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
+static struct ipoib_txreq *
+hfi1_txreq_from_idx(struct hfi1_ipoib_circ_buf *r, u32 idx)
+{
+ return (struct ipoib_txreq *)(r->items + (idx << r->shift));
+}
+
+static u32 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
{
return sent - completed;
}
static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{
- return hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs));
+ return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
+ txq->tx_ring.complete_txreqs);
}
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
{
trace_hfi1_txq_stop(txq);
- if (atomic_inc_return(&txq->stops) == 1)
+ if (atomic_inc_return(&txq->tx_ring.stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
{
trace_hfi1_txq_wake(txq);
- if (atomic_dec_and_test(&txq->stops))
+ if (atomic_dec_and_test(&txq->tx_ring.stops))
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
}
@@ -90,9 +78,9 @@ static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
{
- ++txq->sent_txreqs;
+ ++txq->tx_ring.sent_txreqs;
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
- !atomic_xchg(&txq->ring_full, 1)) {
+ !atomic_xchg(&txq->tx_ring.ring_full, 1)) {
trace_hfi1_txq_full(txq);
hfi1_ipoib_stop_txq(txq);
}
@@ -117,7 +105,7 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* to protect against ring overflow.
*/
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
- atomic_xchg(&txq->ring_full, 0)) {
+ atomic_xchg(&txq->tx_ring.ring_full, 0)) {
trace_hfi1_txq_xmit_unstopped(txq);
hfi1_ipoib_wake_txq(txq);
}
@@ -125,7 +113,7 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
{
- struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
if (likely(!tx->sdma_status)) {
dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
@@ -139,97 +127,73 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
}
napi_consume_skb(tx->skb, budget);
+ tx->skb = NULL;
sdma_txclean(priv->dd, &tx->txreq);
- kmem_cache_free(priv->txreq_cache, tx);
}
-static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
+static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq)
{
struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
- unsigned long head;
- unsigned long tail;
- unsigned int max_tx;
- int work_done;
- int tx_count;
-
- spin_lock_bh(&tx_ring->consumer_lock);
-
- /* Read index before reading contents at that index. */
- head = smp_load_acquire(&tx_ring->head);
- tail = tx_ring->tail;
- max_tx = tx_ring->max_items;
-
- work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
+ int i;
+ struct ipoib_txreq *tx;
- for (tx_count = work_done; tx_count; tx_count--) {
- hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
- tail = CIRC_NEXT(tail, max_tx);
+ for (i = 0; i < tx_ring->max_items; i++) {
+ tx = hfi1_txreq_from_idx(tx_ring, i);
+ tx->complete = 0;
+ dev_kfree_skb_any(tx->skb);
+ tx->skb = NULL;
+ sdma_txclean(txq->priv->dd, &tx->txreq);
}
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->complete_txreqs = 0;
+ tx_ring->sent_txreqs = 0;
+ tx_ring->avail = hfi1_ipoib_ring_hwat(txq);
+}
- atomic64_add(work_done, &txq->complete_txreqs);
+static int hfi1_ipoib_poll_tx_ring(struct napi_struct *napi, int budget)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(napi, struct hfi1_ipoib_txq, napi);
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ u32 head = tx_ring->head;
+ u32 max_tx = tx_ring->max_items;
+ int work_done;
+ struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head);
- /* Finished freeing tx items so store the tail value. */
- smp_store_release(&tx_ring->tail, tail);
+ trace_hfi1_txq_poll(txq);
+ for (work_done = 0; work_done < budget; work_done++) {
+ /* See hfi1_ipoib_sdma_complete() */
+ if (!smp_load_acquire(&tx->complete))
+ break;
+ tx->complete = 0;
+ trace_hfi1_tx_produce(tx, head);
+ hfi1_ipoib_free_tx(tx, budget);
+ head = CIRC_NEXT(head, max_tx);
+ tx = hfi1_txreq_from_idx(tx_ring, head);
+ }
+ tx_ring->complete_txreqs += work_done;
- spin_unlock_bh(&tx_ring->consumer_lock);
+ /* Finished freeing tx items so store the head value. */
+ smp_store_release(&tx_ring->head, head);
hfi1_ipoib_check_queue_stopped(txq);
- return work_done;
-}
-
-static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
-{
- struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
- struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
-
- int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
-
if (work_done < budget)
napi_complete_done(napi, work_done);
return work_done;
}
-static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
-{
- struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
- unsigned long head;
- unsigned long tail;
- size_t max_tx;
-
- spin_lock(&tx_ring->producer_lock);
-
- head = tx_ring->head;
- tail = READ_ONCE(tx_ring->tail);
- max_tx = tx_ring->max_items;
-
- if (likely(CIRC_SPACE(head, tail, max_tx))) {
- tx_ring->items[head] = tx;
-
- /* Finish storing txreq before incrementing head. */
- smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
- napi_schedule_irqoff(tx->txq->napi);
- } else {
- struct hfi1_ipoib_txq *txq = tx->txq;
- struct hfi1_ipoib_dev_priv *priv = tx->priv;
-
- /* Ring was full */
- hfi1_ipoib_free_tx(tx, 0);
- atomic64_inc(&txq->complete_txreqs);
- dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
- }
-
- spin_unlock(&tx_ring->producer_lock);
-}
-
static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
{
struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
+ trace_hfi1_txq_complete(tx->txq);
tx->sdma_status = status;
-
- hfi1_ipoib_add_tx(tx);
+ /* see hfi1_ipoib_poll_tx_ring */
+ smp_store_release(&tx->complete, 1);
+ napi_schedule_irqoff(&tx->txq->napi);
}
static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
@@ -291,7 +255,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
struct ipoib_txparms *txp)
{
- struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
struct sk_buff *skb = tx->skb;
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
@@ -362,7 +326,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(txp->dqpn);
- ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
+ ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs));
/* Build the deth */
ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
@@ -385,19 +349,32 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = txp->txq;
struct ipoib_txreq *tx;
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ u32 tail = tx_ring->tail;
int ret;
- tx = kmem_cache_alloc_node(priv->txreq_cache,
- GFP_ATOMIC,
- priv->dd->node);
- if (unlikely(!tx))
- return ERR_PTR(-ENOMEM);
+ if (unlikely(!tx_ring->avail)) {
+ u32 head;
+
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq))
+ /* This shouldn't happen with a stopped queue */
+ return ERR_PTR(-ENOMEM);
+ /* See hfi1_ipoib_poll_tx_ring() */
+ head = smp_load_acquire(&tx_ring->head);
+ tx_ring->avail =
+ min_t(u32, hfi1_ipoib_ring_hwat(txq),
+ CIRC_CNT(head, tail, tx_ring->max_items));
+ } else {
+ tx_ring->avail--;
+ }
+ tx = hfi1_txreq_from_idx(tx_ring, tail);
+ trace_hfi1_txq_alloc_tx(txq);
/* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
- tx->priv = priv;
- tx->txq = txp->txq;
+ tx->txq = txq;
tx->skb = skb;
INIT_LIST_HEAD(&tx->txreq.list);
@@ -405,21 +382,20 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
ret = hfi1_ipoib_build_tx_desc(tx, txp);
if (likely(!ret)) {
- if (txp->txq->flow.as_int != txp->flow.as_int) {
- txp->txq->flow.tx_queue = txp->flow.tx_queue;
- txp->txq->flow.sc5 = txp->flow.sc5;
- txp->txq->sde =
+ if (txq->flow.as_int != txp->flow.as_int) {
+ txq->flow.tx_queue = txp->flow.tx_queue;
+ txq->flow.sc5 = txp->flow.sc5;
+ txq->sde =
sdma_select_engine_sc(priv->dd,
txp->flow.tx_queue,
txp->flow.sc5);
- trace_hfi1_flow_switch(txp->txq);
+ trace_hfi1_flow_switch(txq);
}
return tx;
}
sdma_txclean(priv->dd, &tx->txreq);
- kmem_cache_free(priv->txreq_cache, tx);
return ERR_PTR(ret);
}
@@ -480,8 +456,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
struct sk_buff *skb,
struct ipoib_txparms *txp)
{
- struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
struct hfi1_ipoib_txq *txq = txp->txq;
+ struct hfi1_ipoib_circ_buf *tx_ring;
struct ipoib_txreq *tx;
int ret;
@@ -499,10 +475,14 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
return NETDEV_TX_OK;
}
+ tx_ring = &txq->tx_ring;
+ trace_hfi1_tx_consume(tx, tx_ring->tail);
+ /* consume tx */
+ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) {
tx_ok:
- trace_sdma_output_ibhdr(tx->priv->dd,
+ trace_sdma_output_ibhdr(txq->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
@@ -514,9 +494,10 @@ tx_ok:
if (ret == -EBUSY || ret == -ECOMM)
goto tx_ok;
- sdma_txclean(priv->dd, &tx->txreq);
- dev_kfree_skb_any(skb);
- kmem_cache_free(priv->txreq_cache, tx);
+ /* mark complete and kick napi tx */
+ smp_store_release(&tx->complete, 1);
+ napi_schedule(&tx->txq->napi);
+
++dev->stats.tx_carrier_errors;
return NETDEV_TX_OK;
@@ -527,6 +508,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_txq *txq = txp->txq;
+ struct hfi1_ipoib_circ_buf *tx_ring;
struct ipoib_txreq *tx;
/* Has the flow change ? */
@@ -556,11 +538,15 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
return NETDEV_TX_OK;
}
+ tx_ring = &txq->tx_ring;
+ trace_hfi1_tx_consume(tx, tx_ring->tail);
+ /* consume tx */
+ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
list_add_tail(&tx->txreq.list, &txq->tx_list);
hfi1_ipoib_check_queue_depth(txq);
- trace_sdma_output_ibhdr(tx->priv->dd,
+ trace_sdma_output_ibhdr(txq->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
@@ -646,7 +632,7 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
if (list_empty(&txq->wait.list)) {
struct hfi1_ibport *ibp = &sde->ppd->ibport_data;
- if (!atomic_xchg(&txq->no_desc, 1)) {
+ if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) {
trace_hfi1_txq_queued(txq);
hfi1_ipoib_stop_txq(txq);
}
@@ -689,45 +675,29 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
if (likely(dev->reg_state == NETREG_REGISTERED) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
- if (atomic_xchg(&txq->no_desc, 0))
+ if (atomic_xchg(&txq->tx_ring.no_desc, 0))
hfi1_ipoib_wake_txq(txq);
}
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
{
struct net_device *dev = priv->netdev;
- char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
- unsigned long tx_ring_size;
+ u32 tx_ring_size, tx_item_size;
int i;
/*
* Ring holds 1 less than tx_ring_size
* Round up to next power of 2 in order to hold at least tx_queue_len
*/
- tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
-
- snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
- priv->txreq_cache = kmem_cache_create(buf,
- sizeof(struct ipoib_txreq),
- 0,
- 0,
- NULL);
- if (!priv->txreq_cache)
- return -ENOMEM;
-
- priv->tx_napis = kcalloc_node(dev->num_tx_queues,
- sizeof(struct napi_struct),
- GFP_KERNEL,
- priv->dd->node);
- if (!priv->tx_napis)
- goto free_txreq_cache;
+ tx_ring_size = roundup_pow_of_two(dev->tx_queue_len + 1);
+ tx_item_size = roundup_pow_of_two(sizeof(struct ipoib_txreq));
priv->txqs = kcalloc_node(dev->num_tx_queues,
sizeof(struct hfi1_ipoib_txq),
GFP_KERNEL,
priv->dd->node);
if (!priv->txqs)
- goto free_tx_napis;
+ return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
@@ -743,10 +713,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->priv = priv;
txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list);
- atomic64_set(&txq->complete_txreqs, 0);
- atomic_set(&txq->stops, 0);
- atomic_set(&txq->ring_full, 0);
- atomic_set(&txq->no_desc, 0);
+ atomic_set(&txq->tx_ring.stops, 0);
+ atomic_set(&txq->tx_ring.ring_full, 0);
+ atomic_set(&txq->tx_ring.no_desc, 0);
txq->q_idx = i;
txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff;
@@ -756,19 +725,17 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- kcalloc_node(tx_ring_size,
- sizeof(struct ipoib_txreq *),
+ kcalloc_node(tx_ring_size, tx_item_size,
GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
- spin_lock_init(&txq->tx_ring.producer_lock);
- spin_lock_init(&txq->tx_ring.consumer_lock);
txq->tx_ring.max_items = tx_ring_size;
+ txq->tx_ring.shift = ilog2(tx_ring_size);
+ txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
- txq->napi = &priv->tx_napis[i];
- netif_tx_napi_add(dev, txq->napi,
- hfi1_ipoib_process_tx_ring,
+ netif_tx_napi_add(dev, &txq->napi,
+ hfi1_ipoib_poll_tx_ring,
NAPI_POLL_WEIGHT);
}
@@ -778,20 +745,12 @@ free_txqs:
for (i--; i >= 0; i--) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
- netif_napi_del(txq->napi);
+ netif_napi_del(&txq->napi);
kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
priv->txqs = NULL;
-
-free_tx_napis:
- kfree(priv->tx_napis);
- priv->tx_napis = NULL;
-
-free_txreq_cache:
- kmem_cache_destroy(priv->txreq_cache);
- priv->txreq_cache = NULL;
return -ENOMEM;
}
@@ -799,7 +758,6 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
{
struct sdma_txreq *txreq;
struct sdma_txreq *txreq_tmp;
- atomic64_t *complete_txreqs = &txq->complete_txreqs;
list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
struct ipoib_txreq *tx =
@@ -808,16 +766,16 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
list_del(&txreq->list);
sdma_txclean(txq->priv->dd, &tx->txreq);
dev_kfree_skb_any(tx->skb);
- kmem_cache_free(txq->priv->txreq_cache, tx);
- atomic64_inc(complete_txreqs);
+ tx->skb = NULL;
+ txq->tx_ring.complete_txreqs++;
}
if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd,
- "txq %d not empty found %llu requests\n",
+ "txq %d not empty found %u requests\n",
txq->q_idx,
- hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(complete_txreqs)));
+ hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
+ txq->tx_ring.complete_txreqs));
}
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
@@ -830,19 +788,13 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
iowait_cancel_work(&txq->wait);
iowait_sdma_drain(&txq->wait);
hfi1_ipoib_drain_tx_list(txq);
- netif_napi_del(txq->napi);
- (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ netif_napi_del(&txq->napi);
+ hfi1_ipoib_drain_tx_ring(txq);
kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
priv->txqs = NULL;
-
- kfree(priv->tx_napis);
- priv->tx_napis = NULL;
-
- kmem_cache_destroy(priv->txreq_cache);
- priv->txreq_cache = NULL;
}
void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
@@ -853,7 +805,7 @@ void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
- napi_enable(txq->napi);
+ napi_enable(&txq->napi);
}
}
@@ -865,8 +817,8 @@ void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
- napi_disable(txq->napi);
- (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ napi_disable(&txq->napi);
+ hfi1_ipoib_drain_tx_ring(txq);
}
}
@@ -874,23 +826,23 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
- u64 completed = atomic64_read(&txq->complete_txreqs);
dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
txq, q,
__netif_subqueue_stopped(dev, txq->q_idx),
- atomic_read(&txq->stops),
- atomic_read(&txq->no_desc),
- atomic_read(&txq->ring_full));
+ atomic_read(&txq->tx_ring.stops),
+ atomic_read(&txq->tx_ring.no_desc),
+ atomic_read(&txq->tx_ring.ring_full));
dd_dev_info(priv->dd, "sde %p engine %u\n",
txq->sde,
txq->sde ? txq->sde->this_idx : 0);
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
- txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
- dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
+ txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs,
+ hfi1_ipoib_used(txq));
+ dd_dev_info(priv->dd, "tx_queue_len %u max_items %u\n",
dev->tx_queue_len, txq->tx_ring.max_items);
- dd_dev_info(priv->dd, "head %lu tail %lu\n",
+ dd_dev_info(priv->dd, "head %u tail %u\n",
txq->tx_ring.head, txq->tx_ring.tail);
dd_dev_info(priv->dd, "wait queued %u\n",
!list_empty(&txq->wait.list));
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 489b436f19bb..3d42bd2b36bd 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
{
u64 reg;
struct pio_buf *pbuf;
+ LIST_HEAD(wake_list);
if (!sc)
return;
@@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
- while (!list_empty(&sc->piowait)) {
+ if (!list_empty(&sc->piowait))
+ list_move(&sc->piowait, &wake_list);
+ write_sequnlock(&sc->waitlock);
+ while (!list_empty(&wake_list)) {
struct iowait *wait;
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
- wait = list_first_entry(&sc->piowait, struct iowait, list);
+ wait = list_first_entry(&wake_list, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
- write_sequnlock(&sc->waitlock);
spin_unlock_irq(&sc->alloc_lock);
}
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 7318aa6150b5..ed1b9e1e4b17 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -917,20 +917,22 @@ DECLARE_EVENT_CLASS(/* AIP */
__entry->tail = txq->tx_ring.tail;
__entry->idx = txq->q_idx;
__entry->used =
- txq->sent_txreqs -
- atomic64_read(&txq->complete_txreqs);
+ txq->tx_ring.sent_txreqs -
+ txq->tx_ring.complete_txreqs;
__entry->flow = txq->flow.as_int;
- __entry->stops = atomic_read(&txq->stops);
- __entry->no_desc = atomic_read(&txq->no_desc);
+ __entry->stops = atomic_read(&txq->tx_ring.stops);
+ __entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
__entry->stopped =
__netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
),
TP_printk(/* print */
- "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
+ "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
__get_str(dev),
(unsigned long long)__entry->txq,
__entry->idx,
(unsigned long long)__entry->sde,
+ __entry->sde ? __entry->sde->this_idx : 0,
+ __entry->sde ? __entry->sde->cpu : 0,
__entry->head,
__entry->tail,
__entry->flow,
@@ -995,6 +997,65 @@ DEFINE_EVENT(/* xmit_unstopped */
TP_ARGS(txq)
);
+DECLARE_EVENT_CLASS(/* AIP */
+ hfi1_ipoib_tx_template,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(tx->txq->priv->dd)
+ __field(struct ipoib_txreq *, tx)
+ __field(struct hfi1_ipoib_txq *, txq)
+ __field(struct sk_buff *, skb)
+ __field(ulong, idx)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(tx->txq->priv->dd);
+ __entry->tx = tx;
+ __entry->skb = tx->skb;
+ __entry->txq = tx->txq;
+ __entry->idx = idx;
+ ),
+ TP_printk(/* print */
+ "[%s] tx %llx txq %llx,%u skb %llx idx %lu",
+ __get_str(dev),
+ (unsigned long long)__entry->tx,
+ (unsigned long long)__entry->txq,
+ __entry->txq ? __entry->txq->q_idx : 0,
+ (unsigned long long)__entry->skb,
+ __entry->idx
+ )
+);
+
+DEFINE_EVENT(/* produce */
+ hfi1_ipoib_tx_template, hfi1_tx_produce,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx)
+);
+
+DEFINE_EVENT(/* consume */
+ hfi1_ipoib_tx_template, hfi1_tx_consume,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx)
+);
+
+DEFINE_EVENT(/* alloc_tx */
+ hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* poll */
+ hfi1_ipoib_txq_template, hfi1_txq_poll,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* complete */
+ hfi1_ipoib_txq_template, hfi1_txq_complete,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
#endif /* __HFI1_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0c86e9d354f8..186d30291260 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -692,8 +692,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
* Allocate the node first so we can handle a potential
* failure before we've programmed anything.
*/
- node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
- GFP_KERNEL);
+ node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL);
if (!node)
return -ENOMEM;
@@ -713,7 +712,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
node->dma_addr = phys;
node->grp = grp;
node->freed = false;
- memcpy(node->pages, pages, sizeof(struct page *) * npages);
+ memcpy(node->pages, pages, flex_array_size(node, pages, npages));
if (fd->use_mn) {
ret = mmu_interval_notifier_insert(
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 26bea51869bf..ed9fa0d84e9e 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1602,8 +1602,8 @@ static const char * const driver_cntr_names[] = {
};
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
-static const char **dev_cntr_names;
-static const char **port_cntr_names;
+static struct rdma_stat_desc *dev_cntr_descs;
+static struct rdma_stat_desc *port_cntr_descs;
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
@@ -1614,13 +1614,12 @@ static int cntr_names_initialized;
* strings. Optionally some entries can be reserved in the array to hold extra
* external strings.
*/
-static int init_cntr_names(const char *names_in,
- const size_t names_len,
- int num_extra_names,
- int *num_cntrs,
- const char ***cntr_names)
+static int init_cntr_names(const char *names_in, const size_t names_len,
+ int num_extra_names, int *num_cntrs,
+ struct rdma_stat_desc **cntr_descs)
{
- char *names_out, *p, **q;
+ struct rdma_stat_desc *q;
+ char *names_out, *p;
int i, n;
n = 0;
@@ -1628,26 +1627,28 @@ static int init_cntr_names(const char *names_in,
if (names_in[i] == '\n')
n++;
- names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
- GFP_KERNEL);
+ names_out =
+ kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) +
+ names_len,
+ GFP_KERNEL);
if (!names_out) {
*num_cntrs = 0;
- *cntr_names = NULL;
+ *cntr_descs = NULL;
return -ENOMEM;
}
- p = names_out + (n + num_extra_names) * sizeof(char *);
+ p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc);
memcpy(p, names_in, names_len);
- q = (char **)names_out;
+ q = (struct rdma_stat_desc *)names_out;
for (i = 0; i < n; i++) {
- q[i] = p;
+ q[i].name = p;
p = strchr(p, '\n');
*p++ = '\0';
}
*num_cntrs = n;
- *cntr_names = (const char **)names_out;
+ *cntr_descs = (struct rdma_stat_desc *)names_out;
return 0;
}
@@ -1661,18 +1662,18 @@ static int init_counters(struct ib_device *ibdev)
goto out_unlock;
err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs,
- &num_dev_cntrs, &dev_cntr_names);
+ &num_dev_cntrs, &dev_cntr_descs);
if (err)
goto out_unlock;
for (i = 0; i < num_driver_cntrs; i++)
- dev_cntr_names[num_dev_cntrs + i] = driver_cntr_names[i];
+ dev_cntr_descs[num_dev_cntrs + i].name = driver_cntr_names[i];
err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0,
- &num_port_cntrs, &port_cntr_names);
+ &num_port_cntrs, &port_cntr_descs);
if (err) {
- kfree(dev_cntr_names);
- dev_cntr_names = NULL;
+ kfree(dev_cntr_descs);
+ dev_cntr_descs = NULL;
goto out_unlock;
}
cntr_names_initialized = 1;
@@ -1686,7 +1687,7 @@ static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
{
if (init_counters(ibdev))
return NULL;
- return rdma_alloc_hw_stats_struct(dev_cntr_names,
+ return rdma_alloc_hw_stats_struct(dev_cntr_descs,
num_dev_cntrs + num_driver_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1696,7 +1697,7 @@ static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev,
{
if (init_counters(ibdev))
return NULL;
- return rdma_alloc_hw_stats_struct(port_cntr_names, num_port_cntrs,
+ return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1921,10 +1922,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
verbs_txreq_exit(dev);
mutex_lock(&cntr_names_lock);
- kfree(dev_cntr_names);
- kfree(port_cntr_names);
- dev_cntr_names = NULL;
- port_cntr_names = NULL;
+ kfree(dev_cntr_descs);
+ kfree(port_cntr_descs);
+ dev_cntr_descs = NULL;
+ port_cntr_descs = NULL;
cntr_names_initialized = 0;
mutex_unlock(&cntr_names_lock);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9467c39e3d28..43e17d61cb63 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -225,11 +225,24 @@ struct hns_roce_uar {
unsigned long logic_idx;
};
+enum hns_roce_mmap_type {
+ HNS_ROCE_MMAP_TYPE_DB = 1,
+ HNS_ROCE_MMAP_TYPE_TPTR,
+};
+
+struct hns_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ enum hns_roce_mmap_type mmap_type;
+ u64 address;
+};
+
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
+ struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@@ -898,7 +911,8 @@ struct hns_roce_hw {
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
- int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
+ const u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
@@ -1049,6 +1063,12 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
+static inline struct hns_user_mmap_entry *
+to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
+}
+
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
writeq(*(u64 *)val, dest);
@@ -1259,4 +1279,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct ib_cq *ib_cq);
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index e0f59b8d7d5d..f4af3992ba95 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -90,11 +90,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
unsigned long flags = 0;
void *wqe = NULL;
__le32 doorbell[2];
+ const u8 *smac;
int ret = 0;
int loopback;
u32 wqe_idx;
int nreq;
- u8 *smac;
if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_RC)) {
@@ -154,7 +154,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
UD_SEND_WQE_U32_8_DMAC_5_S,
ah->av.mac[5]);
- smac = (u8 *)hr_dev->dev_addr[qp->port];
+ smac = (const u8 *)hr_dev->dev_addr[qp->port];
loopback = ether_addr_equal_unaligned(ah->av.mac,
smac) ? 1 : 0;
roce_set_bit(ud_sq_wqe->u32_8,
@@ -1782,7 +1782,7 @@ static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
}
static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- u8 *addr)
+ const u8 *addr)
{
u32 reg_smac_l;
u16 reg_smac_h;
@@ -2743,12 +2743,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
__le32 doorbell[2] = {0};
u64 *mtts_2 = NULL;
int ret = -EINVAL;
+ const u8 *smac;
u64 sq_ba = 0;
u64 rq_ba = 0;
u32 port;
u32 port_num;
u8 *dmac;
- u8 *smac;
if (!check_qp_state(cur_state, new_state)) {
ibdev_err(ibqp->device,
@@ -2947,7 +2947,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
hr_qp->port;
- smac = (u8 *)hr_dev->dev_addr[port];
+ smac = (const u8 *)hr_dev->dev_addr[port];
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index d5f3faa1627a..9bfbaddd1763 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1165,32 +1165,22 @@ static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
{
int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(hr_dev->dev, size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
-
- return -ENOMEM;
- }
-
return 0;
}
static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
- dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
- ring->desc_num * sizeof(struct hns_roce_cmq_desc),
- DMA_BIDIRECTIONAL);
+ dma_free_coherent(hr_dev->dev,
+ ring->desc_num * sizeof(struct hns_roce_cmq_desc),
+ ring->desc, ring->desc_dma_addr);
ring->desc_dma_addr = 0;
- kfree(ring->desc);
}
static int init_csq(struct hns_roce_dev *hr_dev,
@@ -2992,7 +2982,7 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
}
static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- u8 *addr)
+ const u8 *addr)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cfg_smac_tb *smac_tb =
@@ -3328,7 +3318,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
memset(cq_context, 0, sizeof(*cq_context));
hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
- hr_reg_write(cq_context, CQC_ARM_ST, REG_NXT_CEQE);
+ hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
@@ -4318,10 +4308,10 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu ib_mtu;
+ const u8 *smac;
u8 lp_pktn_ini;
u64 *mtts;
u8 *dmac;
- u8 *smac;
u32 port;
int mtu;
int ret;
@@ -4374,7 +4364,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
- smac = (u8 *)hr_dev->dev_addr[port];
+ smac = (const u8 *)hr_dev->dev_addr[port];
dmac = (u8 *)attr->ah_attr.roce.dmac;
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
@@ -4399,8 +4389,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtu = ib_mtu_enum_to_int(ib_mtu);
if (WARN_ON(mtu <= 0))
return -EINVAL;
-#define MAX_LP_MSG_LEN 65536
- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+#define MAX_LP_MSG_LEN 16384
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
if (WARN_ON(lp_pktn_ini >= 0xF))
return -EINVAL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 5d39bd08582a..4194b626f3c6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -42,7 +42,8 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
+ const u8 *addr)
{
u8 phy_port;
u32 i;
@@ -291,6 +292,79 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type)
+{
+ struct hns_user_mmap_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_type = mmap_type;
+
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length,
+ mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
+{
+ if (context->db_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->db_mmap_entry->rdma_entry);
+
+ if (context->tptr_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->tptr_mmap_entry->rdma_entry);
+}
+
+static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+ u64 address;
+ int ret;
+
+ address = context->uar.pfn << PAGE_SHIFT;
+ context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
+ uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
+ if (!context->db_mmap_entry)
+ return -ENOMEM;
+
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+ return 0;
+
+ /*
+ * FIXME: using io_remap_pfn_range on the dma address returned
+ * by dma_alloc_coherent is totally wrong.
+ */
+ context->tptr_mmap_entry =
+ hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
+ hr_dev->tptr_size,
+ HNS_ROCE_MMAP_TYPE_TPTR);
+ if (!context->tptr_mmap_entry) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ hns_roce_dealloc_uar_entry(context);
+ return ret;
+}
+
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -309,6 +383,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_uar_alloc;
+ ret = hns_roce_alloc_uar_entry(uctx);
+ if (ret)
+ goto error_fail_uar_entry;
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
@@ -325,6 +403,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return 0;
error_fail_copy_to_udata:
+ hns_roce_dealloc_uar_entry(context);
+
+error_fail_uar_entry:
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
error_fail_uar_alloc:
@@ -336,39 +417,43 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+ hns_roce_dealloc_uar_entry(context);
+
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
}
-static int hns_roce_mmap(struct ib_ucontext *context,
- struct vm_area_struct *vma)
+static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
-
- switch (vma->vm_pgoff) {
- case 0:
- return rdma_user_mmap_io(context, vma,
- to_hr_ucontext(context)->uar.pfn,
- PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot),
- NULL);
-
- /* vm_pgoff: 1 -- TPTR */
- case 1:
- if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
- return -EINVAL;
- /*
- * FIXME: using io_remap_pfn_range on the dma address returned
- * by dma_alloc_coherent is totally wrong.
- */
- return rdma_user_mmap_io(context, vma,
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
- hr_dev->tptr_size,
- vma->vm_page_prot,
- NULL);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct hns_user_mmap_entry *entry;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ int ret;
- default:
+ rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+ if (!rdma_entry)
return -EINVAL;
- }
+
+ entry = to_hns_mmap(rdma_entry);
+ pfn = entry->address >> PAGE_SHIFT;
+ prot = vma->vm_page_prot;
+
+ if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
+ prot = pgprot_noncached(prot);
+
+ ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+ prot, rdma_entry);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return ret;
+}
+
+static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
+
+ kfree(entry);
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
@@ -444,6 +529,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
+ .mmap_free = hns_roce_free_mmap,
.modify_device = hns_roce_modify_device,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
index d03cd29333ea..3bf42728e9b7 100644
--- a/drivers/infiniband/hw/irdma/cm.h
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -159,14 +159,6 @@ enum irdma_cm_event_type {
IRDMA_CM_EVENT_ABORTED,
};
-struct irdma_bth { /* Base Trasnport Header */
- u8 opcode;
- u8 flags;
- __be16 pkey;
- __be32 qpn;
- __be32 apsn;
-};
-
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
@@ -397,7 +389,7 @@ int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
-int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
@@ -406,7 +398,7 @@ int irdma_cm_stop(struct irdma_device *dev);
bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
- u8 *mac_addr, u32 action);
+ const u8 *mac_addr, u32 action);
void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index f1e5515256e0..7264f8c2f7d5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -1420,44 +1420,6 @@ void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
}
/**
- * irdma_sc_send_lsmm_nostag - for privilege qp
- * @qp: sc qp struct
- * @lsmm_buf: buffer with lsmm message
- * @size: size of lsmm buffer
- */
-void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
-{
- __le64 *wqe;
- u64 hdr;
- struct irdma_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
-
- if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
- set_64bit_val(wqe, 8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
- else
- set_64bit_val(wqe, 8,
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
- set_64bit_val(wqe, 16, 0);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
- FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
- FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
- dma_wmb(); /* make sure WQE is written before valid bit is set */
-
- set_64bit_val(wqe, 24, hdr);
-
- print_hex_dump_debug("WQE: SEND_LSMM_NOSTAG WQE", DUMP_PREFIX_OFFSET,
- 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
-}
-
-/**
* irdma_sc_send_rtt - send last read0 or write0
* @qp: sc qp struct
* @read: Do read0 or write0
@@ -2501,7 +2463,6 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
struct irdma_cq_init_info *info)
{
- enum irdma_status_code ret_code;
u32 pble_obj_cnt;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
@@ -2513,9 +2474,7 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
cq->ceq_id = info->ceq_id;
info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
- ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
- if (ret_code)
- return ret_code;
+ irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
cq->virtual_map = info->virtual_map;
cq->pbl_chunk_size = info->pbl_chunk_size;
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 7de525a5ccf8..4108dcabece2 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -1057,7 +1057,7 @@ static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)
&iwdev->mac_ip_table_idx);
if (!status) {
status = irdma_add_local_mac_entry(iwdev->rf,
- (u8 *)iwdev->netdev->dev_addr,
+ (const u8 *)iwdev->netdev->dev_addr,
(u8)iwdev->mac_ip_table_idx);
if (status)
irdma_del_local_mac_entry(iwdev->rf,
@@ -2191,7 +2191,7 @@ void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
* @mac_addr: pointer to mac address
* @idx: the index of the mac ip address to add
*/
-int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx)
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
{
struct irdma_local_mac_entry_info *info;
struct irdma_cqp *iwcqp = &rf->cqp;
@@ -2362,7 +2362,8 @@ void irdma_del_apbvt(struct irdma_device *iwdev,
* @ipv4: flag inicating IPv4
* @action: add, delete or modify
*/
-void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
u32 *ip_addr, bool ipv4, u32 action)
{
struct irdma_add_arp_cache_entry_info *info;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index b678fe712447..91a497139ba3 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -467,7 +467,8 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
-void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
u32 *ip_addr, bool ipv4, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
void irdma_del_apbvt(struct irdma_device *iwdev,
@@ -479,7 +480,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
void irdma_put_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
-int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index b2ab52335ca6..63d8bb3a6903 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -37,7 +37,6 @@ struct irdma_hw;
struct irdma_pci_f;
struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
-u8 __iomem *irdma_get_hw_addr(void *dev);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index 78f598fdbccf..a17c0ffb0cc8 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -37,8 +37,6 @@ void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
enum irdma_status_code
irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_ws_node_info *node_info);
-enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_up_info *map_info);
enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_ceq *sc_ceq, u8 op);
enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h
index bcf10ec427d6..f633fb343328 100644
--- a/drivers/infiniband/hw/irdma/trace_cm.h
+++ b/drivers/infiniband/hw/irdma/trace_cm.h
@@ -144,7 +144,7 @@ DEFINE_EVENT(tos_template, irdma_dcb_tos,
DECLARE_EVENT_CLASS(qhash_template,
TP_PROTO(struct irdma_device *iwdev,
struct irdma_cm_listener *listener,
- char *dev_addr),
+ const char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(u16, lport)
@@ -173,12 +173,14 @@ DECLARE_EVENT_CLASS(qhash_template,
DEFINE_EVENT(qhash_template, irdma_add_mqh_6,
TP_PROTO(struct irdma_device *iwdev,
- struct irdma_cm_listener *listener, char *dev_addr),
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr));
DEFINE_EVENT(qhash_template, irdma_add_mqh_4,
TP_PROTO(struct irdma_device *iwdev,
- struct irdma_cm_listener *listener, char *dev_addr),
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr));
TRACE_EVENT(irdma_addr_resolve,
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 874bc25a938b..9483bb3e10ea 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -852,7 +852,6 @@ struct irdma_roce_offload_info {
u16 err_rq_idx;
u32 qkey;
u32 dest_qp;
- u32 local_qp;
u8 roce_tver;
u8 ack_credits;
u8 err_rq_idx_valid;
@@ -1256,7 +1255,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
u64 scratch, bool post_sq);
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag);
-void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
+
void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 5fb92de1f015..57a9444e9ea7 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -13,16 +13,16 @@
* @sge: sge length and stag
* @valid: The wqe valid
*/
-static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + 8,
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + 8,
@@ -38,14 +38,14 @@ static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
* @valid: wqe valid flag
*/
static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
- struct irdma_sge *sge, u8 valid)
+ struct ib_sge *sge, u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + 8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + 8, 0);
@@ -289,7 +289,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
read_fence |= info->read_fence;
@@ -310,7 +310,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
irdma_clr_wqes(qp, wqe_idx);
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
if (info->imm_data_valid) {
set_64bit_val(wqe, 0,
@@ -339,7 +339,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
++addl_frag_cnt;
}
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
@@ -391,7 +391,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
if (ret_code)
@@ -426,8 +426,8 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
++addl_frag_cnt;
}
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
@@ -477,7 +477,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (info->imm_data_valid)
frag_cnt = op_info->num_sges + 1;
@@ -705,9 +705,9 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
read_fence |= info->read_fence;
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
@@ -826,7 +826,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
u64 hdr;
u32 wqe_idx;
bool local_fence = false;
- struct irdma_sge sge = {};
+ struct ib_sge sge = {};
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
@@ -839,7 +839,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
irdma_clr_wqes(qp, wqe_idx);
- sge.stag = op_info->target_stag;
+ sge.lkey = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
set_64bit_val(wqe, 16, 0);
@@ -867,63 +867,6 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
}
/**
- * irdma_uk_mw_bind - bind Memory Window
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
-{
- __le64 *wqe;
- struct irdma_bind_window *op_info;
- u64 hdr;
- u32 wqe_idx;
- bool local_fence = false;
-
- info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.bind_window;
- local_fence |= info->local_fence;
-
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
- if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
-
- irdma_clr_wqes(qp, wqe_idx);
-
- qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
- FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
- ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
- FIELD_PREP(IRDMAQPSQ_VABASEDTO,
- (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
- FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
- (op_info->mem_window_type_1 ? 1 : 0)) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
- FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
- FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
- FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
-
- dma_wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, hdr);
-
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
-
- return 0;
-}
-
-/**
* irdma_uk_post_receive - post receive wqe
* @qp: hw qp ptr
* @info: post rq information
@@ -1092,12 +1035,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *)((u8 *)cqe + 32);
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
@@ -1503,8 +1446,8 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
* @cq: hw cq
* @info: hw cq initialization info
*/
-enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
- struct irdma_cq_uk_init_info *info)
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info)
{
cq->cq_base = info->cq_base;
cq->cq_id = info->cq_id;
@@ -1515,8 +1458,6 @@ enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
cq->avoid_mem_cflct = info->avoid_mem_cflct;
IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
cq->polarity = 1;
-
- return 0;
}
/**
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 3dcbb1fbf2c6..3c811fb88404 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -16,7 +16,6 @@
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
-#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
@@ -151,12 +150,6 @@ struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
-struct irdma_sge {
- irdma_tagged_offset tag_off;
- u32 len;
- irdma_stag stag;
-};
-
struct irdma_ring {
u32 head;
u32 tail;
@@ -172,7 +165,7 @@ struct irdma_extended_cqe {
};
struct irdma_post_send {
- irdma_sgl sg_list;
+ struct ib_sge *sg_list;
u32 num_sges;
u32 qkey;
u32 dest_qp;
@@ -189,26 +182,26 @@ struct irdma_post_inline_send {
struct irdma_post_rq_info {
u64 wr_id;
- irdma_sgl sg_list;
+ struct ib_sge *sg_list;
u32 num_sges;
};
struct irdma_rdma_write {
- irdma_sgl lo_sg_list;
+ struct ib_sge *lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_rdma_read {
- irdma_sgl lo_sg_list;
+ struct ib_sge *lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_bind_window {
@@ -283,9 +276,7 @@ enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
-enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq);
+
enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
@@ -306,7 +297,7 @@ enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
- void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
@@ -318,8 +309,8 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
-enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
- struct irdma_cq_uk_init_info *info);
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info);
enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info {
@@ -369,7 +360,6 @@ struct irdma_qp_uk {
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
- spinlock_t *lock;
u8 dbg_rq_flushed;
u8 sq_flush_seen;
u8 rq_flush_seen;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index ac91ea5296db..8b42c43fc14f 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -11,7 +11,7 @@
* @action: modify, delete or add
*/
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
- u8 *mac_addr, u32 action)
+ const u8 *mac_addr, u32 action)
{
unsigned long flags;
int arp_index;
@@ -77,7 +77,7 @@ int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
* @ipv4: IPv4 flag
* @mac: MAC address
*/
-int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac)
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
{
int arpidx;
@@ -768,17 +768,6 @@ struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
}
/**
- * irdma_get_hw_addr - return hw addr
- * @par: points to shared dev
- */
-u8 __iomem *irdma_get_hw_addr(void *par)
-{
- struct irdma_sc_dev *dev = par;
-
- return dev->hw->hw_addr;
-}
-
-/**
* irdma_remove_cqp_head - return head entry and remove
* @dev: device
*/
@@ -2060,40 +2049,6 @@ exit:
}
/**
- * irdma_cqp_up_map_cmd - Set the up-up mapping
- * @dev: pointer to device structure
- * @cmd: map command
- * @map_info: pointer to up map info
- */
-enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_up_info *map_info)
-{
- struct irdma_pci_f *rf = dev_to_rf(dev);
- struct irdma_cqp *iwcqp = &rf->cqp;
- struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
- if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
- cqp_info->cqp_cmd = cmd;
- cqp_info->post_sq = 1;
- cqp_info->in.u.up_map.info = *map_info;
- cqp_info->in.u.up_map.cqp = cqp;
- cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_ah_cqp_op - perform an AH cqp operation
* @rf: RDMA PCI function
* @sc_ah: address handle
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 7110ebf834f9..0f66e809d418 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -833,7 +833,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
qp = &iwqp->sc_qp;
qp->qp_uk.back_qp = iwqp;
- qp->qp_uk.lock = &iwqp->lock;
qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
iwqp->iwdev = iwdev;
@@ -1198,7 +1197,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
- roce_info->local_qp = ibqp->qp_num;
if (av->sgid_addr.saddr.sa_family == AF_INET6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
@@ -3041,24 +3039,6 @@ done:
}
/**
- * irdma_copy_sg_list - copy sg list for qp
- * @sg_list: copied into sg_list
- * @sgl: copy from sgl
- * @num_sges: count of sg entries
- */
-static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
- int num_sges)
-{
- unsigned int i;
-
- for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) {
- sg_list[i].tag_off = sgl[i].addr;
- sg_list[i].len = sgl[i].length;
- sg_list[i].stag = sgl[i].lkey;
- }
-}
-
-/**
* irdma_post_send - kernel application wr
* @ibqp: qp ptr for wr
* @ib_wr: work request ptr
@@ -3134,8 +3114,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
ret = irdma_uk_inline_send(ukqp, &info, false);
} else {
info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct irdma_sge *)
- ib_wr->sg_list;
+ info.op.send.sg_list = ib_wr->sg_list;
if (iwqp->ibqp.qp_type == IB_QPT_UD ||
iwqp->ibqp.qp_type == IB_QPT_GSI) {
ah = to_iwah(ud_wr(ib_wr)->ah);
@@ -3170,15 +3149,18 @@ static int irdma_post_send(struct ib_qp *ibqp,
if (ib_wr->send_flags & IB_SEND_INLINE) {
info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
- info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
- info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.inline_rdma_write.len =
+ ib_wr->sg_list[0].length;
+ info.op.inline_rdma_write.rem_addr.addr =
+ rdma_wr(ib_wr)->remote_addr;
+ info.op.inline_rdma_write.rem_addr.lkey =
+ rdma_wr(ib_wr)->rkey;
ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
ret = irdma_uk_rdma_write(ukqp, &info, false);
}
@@ -3199,8 +3181,8 @@ static int irdma_post_send(struct ib_qp *ibqp,
break;
}
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
- info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
@@ -3287,7 +3269,6 @@ static int irdma_post_recv(struct ib_qp *ibqp,
struct irdma_qp *iwqp;
struct irdma_qp_uk *ukqp;
struct irdma_post_rq_info post_recv = {};
- struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT];
enum irdma_status_code ret = 0;
unsigned long flags;
int err = 0;
@@ -3302,8 +3283,7 @@ static int irdma_post_recv(struct ib_qp *ibqp,
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
- irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
- post_recv.sg_list = sg_list;
+ post_recv.sg_list = ib_wr->sg_list;
ret = irdma_uk_post_receive(ukqp, &post_recv);
if (ret) {
ibdev_dbg(&iwqp->iwdev->ibdev,
@@ -3399,9 +3379,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
}
if (cq_poll_info->ud_vlan_valid) {
- entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
- entry->wc_flags |= IB_WC_WITH_VLAN;
+ u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+ if (vlan) {
+ entry->vlan_id = vlan;
+ entry->wc_flags |= IB_WC_WITH_VLAN;
+ }
} else {
entry->sl = 0;
}
@@ -3647,89 +3631,89 @@ static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
}
-static const char *const irdma_hw_stat_names[] = {
+static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
/* 32bit names */
- [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
- [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
- [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
- [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
- [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
- [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
- [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
- [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
- [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
+ [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
/* 64bit names */
- [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InOctets",
- [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InPkts",
- [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InReasmRqd",
- [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutOctets",
- [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutSegRqd",
- [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InOctets",
- [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InPkts",
- [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InReasmRqd",
- [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutOctets",
- [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutSegRqd",
- [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"tcpInSegs",
- [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name =
"tcpOutSegs",
- [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwInRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwInRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwInRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwOutRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwOutRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwOutRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwRdmaBnd",
- [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwRdmaInv",
- [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"RxUDP",
- [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"TxUDP",
- [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
- "RxECNMrkd",
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32]
+ .name = "RxECNMrkd",
};
static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
@@ -3753,10 +3737,10 @@ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
IRDMA_HW_STAT_INDEX_MAX_64;
unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
- BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) !=
+ BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) !=
(IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
- return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
lifespan);
}
@@ -4326,7 +4310,7 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
static __be64 irdma_mac_to_guid(struct net_device *ndev)
{
- unsigned char *mac = ndev->dev_addr;
+ const unsigned char *mac = ndev->dev_addr;
__be64 guid;
unsigned char *dst = (unsigned char *)&guid;
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
index b68c575eb78e..b0d6ee0739f5 100644
--- a/drivers/infiniband/hw/irdma/ws.c
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
- if (ret)
+ if (ret) {
+ vsi->unregister_qset(vsi, tc_node);
goto reg_err;
+ }
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d TC %d\n",
@@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
}
goto exit;
+reg_err:
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
@@ -369,11 +375,6 @@ vsi_add_err:
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
-
-reg_err:
- mutex_unlock(&vsi->dev->ws_mutex);
- irdma_ws_remove(vsi, user_pri);
- return ret;
}
/**
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 571d9c542024..e2e1f5daddc4 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -822,10 +822,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
}
spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
}
- for (i = 0 ; i < dev->num_ports; i++) {
- flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
+ for (i = 0 ; i < dev->num_ports; i++)
destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
- }
ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
kfree(dev->sriov.alias_guid.sa_client);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f367f4a4abff..ceca05982f61 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2105,10 +2105,10 @@ mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[0].name)
+ if (!diag[0].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -2118,10 +2118,10 @@ mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[1].name)
+ if (!diag[1].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -2151,10 +2151,8 @@ static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
}
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
- const char ***name,
- u32 **offset,
- u32 *num,
- bool port)
+ struct rdma_stat_desc **pdescs,
+ u32 **offset, u32 *num, bool port)
{
u32 num_counters;
@@ -2166,46 +2164,46 @@ static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
if (!port)
num_counters += ARRAY_SIZE(diag_device_only);
- *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
- if (!*name)
+ *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc),
+ GFP_KERNEL);
+ if (!*pdescs)
return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
if (!*offset)
- goto err_name;
+ goto err;
*num = num_counters;
return 0;
-err_name:
- kfree(*name);
+err:
+ kfree(*pdescs);
return -ENOMEM;
}
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
- const char **name,
- u32 *offset,
- bool port)
+ struct rdma_stat_desc *descs,
+ u32 *offset, bool port)
{
int i;
int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
- name[i] = diag_basic[i].name;
+ descs[i].name = diag_basic[i].name;
offset[i] = diag_basic[i].offset;
}
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
- name[j] = diag_ext[i].name;
+ descs[j].name = diag_ext[i].name;
offset[j] = diag_ext[i].offset;
}
}
if (!port) {
for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
- name[j] = diag_device_only[i].name;
+ descs[j].name = diag_device_only[i].name;
offset[j] = diag_device_only[i].offset;
}
}
@@ -2233,13 +2231,13 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
if (i && !per_port)
continue;
- ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
&diag[i].offset,
&diag[i].num_counters, i);
if (ret)
goto err_alloc;
- mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
diag[i].offset, i);
}
@@ -2249,7 +2247,7 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
err_alloc:
if (i) {
- kfree(diag[i - 1].name);
+ kfree(diag[i - 1].descs);
kfree(diag[i - 1].offset);
}
@@ -2262,7 +2260,7 @@ static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
kfree(ibdev->diag_counters[i].offset);
- kfree(ibdev->diag_counters[i].name);
+ kfree(ibdev->diag_counters[i].descs);
}
}
@@ -2275,7 +2273,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c60f6e9ac640..d84023b4b1b8 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -601,7 +601,7 @@ struct mlx4_ib_counters {
#define MLX4_DIAG_COUNTERS_TYPES 2
struct mlx4_ib_diag_counters {
- const char **name;
+ struct rdma_stat_desc *descs;
u32 *offset;
u32 num_counters;
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8662f462e2a5..b17d6ebc5b70 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1099,8 +1099,10 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (dev->steering_support ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
qp->flags |= MLX4_IB_QP_NETIF;
- else
+ else {
+ err = -EINVAL;
goto err;
+ }
}
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
@@ -1853,7 +1855,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64(smac),
+ ether_addr_to_u64(smac),
vlan_id,
path, &mqp->pri, port);
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index a8db8a051170..ff3742b0460a 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -206,3 +206,29 @@ out:
kfree(in);
return err;
}
+
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ MLX5_SET(alloc_uar_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+ return 0;
+}
+
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
+
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ MLX5_SET(dealloc_uar_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 66c96292ed43..ee46638db5de 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -57,4 +57,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 224ba36f2946..945758f39523 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -12,6 +12,7 @@
struct mlx5_ib_counter {
const char *name;
size_t offset;
+ u32 type;
};
#define INIT_Q_COUNTER(_name) \
@@ -75,6 +76,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
};
+#define INIT_OP_COUNTER(_name, _type) \
+ { .name = #_name, .type = MLX5_IB_OPCOUNTER_##_type}
+
+static const struct mlx5_ib_counter basic_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_ce_pkts, CC_RX_CE_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmarx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_cnp_pkts, CC_RX_CNP_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmatx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_tx_cnp_pkts, CC_TX_CNP_PKTS),
+};
+
static int mlx5_ib_read_counters(struct ib_counters *counters,
struct ib_counters_read_attr *read_attr,
struct uverbs_attr_bundle *attrs)
@@ -161,17 +177,34 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
return cnts->set_id;
}
+static struct rdma_hw_stats *do_alloc_stats(const struct mlx5_ib_counters *cnts)
+{
+ struct rdma_hw_stats *stats;
+ u32 num_hw_counters;
+ int i;
+
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ stats = rdma_alloc_hw_stats_struct(cnts->descs,
+ num_hw_counters +
+ cnts->num_op_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ if (!stats)
+ return NULL;
+
+ for (i = 0; i < cnts->num_op_counters; i++)
+ set_bit(num_hw_counters + i, stats->is_disabled);
+
+ return stats;
+}
+
static struct rdma_hw_stats *
mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = &dev->port[0].cnts;
- return rdma_alloc_hw_stats_struct(cnts->names,
- cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ return do_alloc_stats(cnts);
}
static struct rdma_hw_stats *
@@ -180,11 +213,7 @@ mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts;
- return rdma_alloc_hw_stats_struct(cnts->names,
- cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ return do_alloc_stats(cnts);
}
static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
@@ -241,9 +270,9 @@ free:
return ret;
}
-static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
- struct rdma_hw_stats *stats,
- u32 port_num, int index)
+static int do_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
@@ -295,6 +324,88 @@ done:
return num_counters;
}
+static int do_get_op_stat(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ const struct mlx5_ib_op_fc *opfcs;
+ u64 packets = 0, bytes;
+ u32 type;
+ int ret;
+
+ cnts = get_counters(dev, port_num - 1);
+ opfcs = cnts->opfcs;
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ if (!opfcs[type].fc)
+ goto out;
+
+ ret = mlx5_fc_query(dev->mdev, opfcs[type].fc,
+ &packets, &bytes);
+ if (ret)
+ return ret;
+
+out:
+ stats->value[index] = packets;
+ return index;
+}
+
+static int do_get_op_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ int index, ret, num_hw_counters;
+
+ cnts = get_counters(dev, port_num - 1);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ for (index = num_hw_counters;
+ index < (num_hw_counters + cnts->num_op_counters); index++) {
+ ret = do_get_op_stat(ibdev, stats, port_num, index);
+ if (ret != index)
+ return ret;
+ }
+
+ return cnts->num_op_counters;
+}
+
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ int num_counters, num_hw_counters, num_op_counters;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+
+ cnts = get_counters(dev, port_num - 1);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ num_counters = num_hw_counters + cnts->num_op_counters;
+
+ if (index < 0 || index > num_counters)
+ return -EINVAL;
+ else if (index > 0 && index < num_hw_counters)
+ return do_get_hw_stats(ibdev, stats, port_num, index);
+ else if (index >= num_hw_counters && index < num_counters)
+ return do_get_op_stat(ibdev, stats, port_num, index);
+
+ num_hw_counters = do_get_hw_stats(ibdev, stats, port_num, index);
+ if (num_hw_counters < 0)
+ return num_hw_counters;
+
+ num_op_counters = do_get_op_stats(ibdev, stats, port_num);
+ if (num_op_counters < 0)
+ return num_op_counters;
+
+ return num_hw_counters + num_op_counters;
+}
+
static struct rdma_hw_stats *
mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
{
@@ -302,11 +413,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
const struct mlx5_ib_counters *cnts =
get_counters(dev, counter->port - 1);
- return rdma_alloc_hw_stats_struct(cnts->names,
- cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ return do_alloc_stats(cnts);
}
static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
@@ -371,67 +478,89 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
return mlx5_ib_qp_set_counter(qp, NULL);
}
-
static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
- const char **names,
- size_t *offsets)
+ struct rdma_stat_desc *descs, size_t *offsets)
{
int i;
int j = 0;
for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
- names[j] = basic_q_cnts[i].name;
+ descs[j].name = basic_q_cnts[i].name;
offsets[j] = basic_q_cnts[i].offset;
}
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
- names[j] = out_of_seq_q_cnts[i].name;
+ descs[j].name = out_of_seq_q_cnts[i].name;
offsets[j] = out_of_seq_q_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
- names[j] = retrans_q_cnts[i].name;
+ descs[j].name = retrans_q_cnts[i].name;
offsets[j] = retrans_q_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
- names[j] = extended_err_cnts[i].name;
+ descs[j].name = extended_err_cnts[i].name;
offsets[j] = extended_err_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
- names[j] = roce_accl_cnts[i].name;
+ descs[j].name = roce_accl_cnts[i].name;
offsets[j] = roce_accl_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
- names[j] = cong_cnts[i].name;
+ descs[j].name = cong_cnts[i].name;
offsets[j] = cong_cnts[i].offset;
}
}
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
- names[j] = ext_ppcnt_cnts[i].name;
+ descs[j].name = ext_ppcnt_cnts[i].name;
offsets[j] = ext_ppcnt_cnts[i].offset;
}
}
+
+ for (i = 0; i < ARRAY_SIZE(basic_op_cnts); i++, j++) {
+ descs[j].name = basic_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &basic_op_cnts[i].type;
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmarx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmarx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmarx_cnp_op_cnts[i].type;
+ }
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmatx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmatx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmatx_cnp_op_cnts[i].type;
+ }
+ }
}
static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
struct mlx5_ib_counters *cnts)
{
- u32 num_counters;
+ u32 num_counters, num_op_counters;
num_counters = ARRAY_SIZE(basic_q_cnts);
@@ -457,20 +586,34 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
}
- cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL);
- if (!cnts->names)
+
+ num_op_counters = ARRAY_SIZE(basic_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmarx_cnp_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts);
+
+ cnts->num_op_counters = num_op_counters;
+ num_counters += num_op_counters;
+ cnts->descs = kcalloc(num_counters,
+ sizeof(struct rdma_stat_desc), GFP_KERNEL);
+ if (!cnts->descs)
return -ENOMEM;
cnts->offsets = kcalloc(num_counters,
sizeof(*cnts->offsets), GFP_KERNEL);
if (!cnts->offsets)
- goto err_names;
+ goto err;
return 0;
-err_names:
- kfree(cnts->names);
- cnts->names = NULL;
+err:
+ kfree(cnts->descs);
+ cnts->descs = NULL;
return -ENOMEM;
}
@@ -478,7 +621,7 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
int num_cnt_ports;
- int i;
+ int i, j;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
@@ -491,8 +634,20 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
dev->port[i].cnts.set_id);
mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
}
- kfree(dev->port[i].cnts.names);
+ kfree(dev->port[i].cnts.descs);
kfree(dev->port[i].cnts.offsets);
+
+ for (j = 0; j < MLX5_IB_OPCOUNTER_MAX; j++) {
+ if (!dev->port[i].cnts.opfcs[j].fc)
+ continue;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+ mlx5_ib_fs_remove_op_fc(dev,
+ &dev->port[i].cnts.opfcs[j], j);
+ mlx5_fc_destroy(dev->mdev,
+ dev->port[i].cnts.opfcs[j].fc);
+ dev->port[i].cnts.opfcs[j].fc = NULL;
+ }
}
}
@@ -514,7 +669,7 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
if (err)
goto err_alloc;
- mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs,
dev->port[i].cnts.offsets);
MLX5_SET(alloc_q_counter_in, in, uid,
@@ -672,6 +827,56 @@ void mlx5_ib_counters_clear_description(struct ib_counters *counters)
mutex_unlock(&mcounters->mcntrs_mutex);
}
+static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
+ unsigned int index, bool enable)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_counters *cnts;
+ struct mlx5_ib_op_fc *opfc;
+ u32 num_hw_counters, type;
+ int ret;
+
+ cnts = &dev->port[port - 1].cnts;
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ if (index < num_hw_counters ||
+ index >= (num_hw_counters + cnts->num_op_counters))
+ return -EINVAL;
+
+ if (!(cnts->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ opfc = &cnts->opfcs[type];
+
+ if (enable) {
+ if (opfc->fc)
+ return -EEXIST;
+
+ opfc->fc = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(opfc->fc))
+ return PTR_ERR(opfc->fc);
+
+ ret = mlx5_ib_fs_add_op_fc(dev, port, opfc, type);
+ if (ret) {
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ }
+ return ret;
+ }
+
+ if (!opfc->fc)
+ return -EINVAL;
+
+ mlx5_ib_fs_remove_op_fc(dev, opfc, type);
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ return 0;
+}
+
static const struct ib_device_ops hw_stats_ops = {
.alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
.get_hw_stats = mlx5_ib_get_hw_stats,
@@ -680,6 +885,8 @@ static const struct ib_device_ops hw_stats_ops = {
.counter_dealloc = mlx5_ib_counter_dealloc,
.counter_alloc_stats = mlx5_ib_counter_alloc_stats,
.counter_update_stats = mlx5_ib_counter_update_stats,
+ .modify_hw_stat = IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) ?
+ mlx5_ib_modify_stat : NULL,
};
static const struct ib_device_ops hw_switchdev_stats_ops = {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index e95967aefe78..08b7f6bc56c3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1292,21 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
struct mlx5_ib_dev *dev,
void *in, void *out)
{
- struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
- struct mlx5_core_mkey *mkey;
+ struct mlx5_ib_mkey *mkey = &obj->mkey;
void *mkc;
u8 key;
- mkey = &devx_mr->mmkey;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
key = MLX5_GET(mkc, mkc, mkey_7_0);
mkey->key = mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, out, mkey_index)) | key;
mkey->type = MLX5_MKEY_INDIRECT_DEVX;
- mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
- mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->pd = MLX5_GET(mkc, mkc, pd);
- devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
init_waitqueue_head(&mkey->wait);
return mlx5r_store_odp_mkey(dev, mkey);
@@ -1384,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
xa_erase(&obj->ib_dev->odp_mkeys,
- mlx5_base_mkey(obj->devx_mr.mmkey.key)))
+ mlx5_base_mkey(obj->mkey.key)))
/*
* The pagefault_single_data_segment() does commands against
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
+ mlx5r_deref_wait_odp_mkey(&obj->mkey);
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h
index 1f69866aed16..ee2213275fd6 100644
--- a/drivers/infiniband/hw/mlx5/devx.h
+++ b/drivers/infiniband/hw/mlx5/devx.h
@@ -16,7 +16,7 @@ struct devx_obj {
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
u32 flags;
union {
- struct mlx5_ib_devx_mr devx_mr;
+ struct mlx5_ib_mkey mkey;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
u32 flow_counter_bulk_size;
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 5fbc0a8454b9..b780185d9dc6 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -10,12 +10,14 @@
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/ib_hdrs.h>
#include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h>
+#include <net/inet_ecn.h>
#include "mlx5_ib.h"
#include "counters.h"
#include "devx.h"
@@ -847,6 +849,191 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return prio;
}
+enum {
+ RDMA_RX_ECN_OPCOUNTER_PRIO,
+ RDMA_RX_CNP_OPCOUNTER_PRIO,
+};
+
+enum {
+ RDMA_TX_CNP_OPCOUNTER_PRIO,
+};
+
+static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.source_vhca_port) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ ft_field_support.source_vhca_port))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, &spec->match_criteria,
+ misc_parameters.source_vhca_port);
+ MLX5_SET(fte_match_param, &spec->match_value,
+ misc_parameters.source_vhca_port, port_num);
+
+ return 0;
+}
+
+static int set_ecn_ce_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec, int ipv)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.outer_ip_version))
+ return -EOPNOTSUPP;
+
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_ecn);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_ecn,
+ INET_ECN_CE);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ ipv);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.bth_opcode);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.bth_opcode,
+ IB_BTH_OPCODE_CNP);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_flow_namespace_type fn_type;
+ int priority, i, err, spec_num;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dst;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_spec *spec;
+
+ spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ if (set_ecn_ce_spec(dev, port_num, &spec[0],
+ MLX5_FS_IPV4_VERSION) ||
+ set_ecn_ce_spec(dev, port_num, &spec[1],
+ MLX5_FS_IPV6_VERSION)) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 2;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_ECN_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ prio = &dev->flow_db->opfcs[type];
+ if (!prio->flow_table) {
+ prio = _get_prio(ns, prio, priority,
+ dev->num_ports * MAX_OPFC_RULES, 1, 0);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto free;
+ }
+ }
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst.counter_id = mlx5_fc_id(opfc->fc);
+
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ for (i = 0; i < spec_num; i++) {
+ opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
+ &flow_act, &dst, 1);
+ if (IS_ERR(opfc->rule[i])) {
+ err = PTR_ERR(opfc->rule[i]);
+ goto del_rules;
+ }
+ }
+ prio->refcount += spec_num;
+ kfree(spec);
+
+ return 0;
+
+del_rules:
+ for (i -= 1; i >= 0; i--)
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, false);
+free:
+ kfree(spec);
+ return err;
+}
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ int i;
+
+ for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) {
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, &dev->flow_db->opfcs[type], true);
+ }
+}
+
static void set_underlay_qp(struct mlx5_ib_dev *dev,
struct mlx5_flow_spec *spec,
u32 underlay_qpn)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8664bcf6d3f5..5ec8bd2f0b2f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1643,7 +1643,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_static_sys_pages; i++) {
- err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
+ context->devx_uid);
if (err)
goto error;
@@ -1657,7 +1658,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
error:
for (--i; i >= 0; i--)
- if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
+ if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid))
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
return err;
@@ -1673,7 +1675,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
for (i = 0; i < bfregi->num_sys_pages; i++)
if (i < bfregi->num_static_sys_pages ||
bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
- mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid);
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
@@ -1891,6 +1894,13 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
return -EINVAL;
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ err = mlx5_ib_devx_create(dev, true);
+ if (err < 0)
+ goto out_ctx;
+ context->devx_uid = err;
+ }
+
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
bfregi = &context->bfregi;
@@ -1903,7 +1913,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
- goto out_ctx;
+ goto out_devx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
@@ -1911,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
- goto out_ctx;
+ goto out_devx;
}
bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
@@ -1927,17 +1937,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
goto out_sys_pages;
uar_done:
- if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
- err = mlx5_ib_devx_create(dev, true);
- if (err < 0)
- goto out_uars;
- context->devx_uid = err;
- }
-
err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
context->devx_uid);
if (err)
- goto out_devx;
+ goto out_uars;
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1972,9 +1975,6 @@ uar_done:
out_mdev:
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
-out_devx:
- if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
- mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars:
deallocate_uars(dev, context);
@@ -1985,6 +1985,10 @@ out_sys_pages:
out_count:
kfree(bfregi->count);
+out_devx:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
+
out_ctx:
return err;
}
@@ -2021,12 +2025,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
bfregi = &context->bfregi;
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
- if (context->devx_uid)
- mlx5_ib_devx_destroy(dev, context->devx_uid);
-
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
kfree(bfregi->count);
+
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
@@ -2119,6 +2123,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_var_table *var_table = &dev->var_table;
+ struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
@@ -2133,7 +2138,8 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
break;
case MLX5_IB_MMAP_TYPE_UAR_WC:
case MLX5_IB_MMAP_TYPE_UAR_NC:
- mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
+ mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
+ context->devx_uid);
kfree(mentry);
break;
default:
@@ -2211,7 +2217,8 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
bfregi->count[bfreg_dyn_idx]++;
mutex_unlock(&bfregi->lock);
- err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
+ context->devx_uid);
if (err) {
mlx5_ib_warn(dev, "UAR alloc failed\n");
goto free_bfreg;
@@ -2240,7 +2247,7 @@ err:
if (!dyn_uar)
return err;
- mlx5_cmd_free_uar(dev->mdev, idx);
+ mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
free_bfreg:
mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
@@ -3489,7 +3496,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
return ERR_PTR(-ENOMEM);
dev = to_mdev(c->ibucontext.device);
- err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
if (err)
goto end;
@@ -3507,7 +3514,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
return entry;
err_insert:
- mlx5_cmd_free_uar(dev->mdev, uar_index);
+ mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
end:
kfree(entry);
return ERR_PTR(err);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bf20a388eabe..e636e954f6bf 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -263,6 +263,14 @@ struct mlx5_ib_pp {
struct mlx5_core_dev *mdev;
};
+enum mlx5_ib_optional_counter_type {
+ MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
+ MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
+ MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
+
+ MLX5_IB_OPCOUNTER_MAX,
+};
+
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
@@ -271,6 +279,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio fdb;
struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
@@ -619,6 +628,20 @@ struct mlx5_user_mmap_entry {
u32 page_idx;
};
+enum mlx5_mkey_type {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+ MLX5_MKEY_INDIRECT_DEVX,
+};
+
+struct mlx5_ib_mkey {
+ u32 key;
+ enum mlx5_mkey_type type;
+ unsigned int ndescs;
+ struct wait_queue_head wait;
+ refcount_t usecount;
+};
+
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
@@ -637,7 +660,7 @@ struct mlx5_user_mmap_entry {
struct mlx5_ib_mr {
struct ib_mr ibmr;
- struct mlx5_core_mkey mmkey;
+ struct mlx5_ib_mkey mmkey;
/* User MR data */
struct mlx5_cache_ent *cache_ent;
@@ -659,7 +682,6 @@ struct mlx5_ib_mr {
void *descs_alloc;
dma_addr_t desc_map;
int max_descs;
- int ndescs;
int desc_size;
int access_mode;
@@ -713,13 +735,7 @@ static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
struct mlx5_ib_mw {
struct ib_mw ibmw;
- struct mlx5_core_mkey mmkey;
- int ndescs;
-};
-
-struct mlx5_ib_devx_mr {
- struct mlx5_core_mkey mmkey;
- int ndescs;
+ struct mlx5_ib_mkey mmkey;
};
struct mlx5_ib_umr_context {
@@ -797,15 +813,32 @@ struct mlx5_ib_resources {
struct mlx5_ib_port_resources ports[2];
};
+#define MAX_OPFC_RULES 2
+
+struct mlx5_ib_op_fc {
+ struct mlx5_fc *fc;
+ struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
+};
+
struct mlx5_ib_counters {
- const char **names;
+ struct rdma_stat_desc *descs;
size_t *offsets;
u32 num_q_counters;
u32 num_cong_counters;
u32 num_ext_ppcnt_counters;
+ u32 num_op_counters;
u16 set_id;
+ struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
};
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
struct mlx5_ib_multiport_info;
struct mlx5_ib_multiport {
@@ -1579,7 +1612,7 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
}
static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mmkey)
+ struct mlx5_ib_mkey *mmkey)
{
refcount_set(&mmkey->usecount, 1);
@@ -1588,14 +1621,14 @@ static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
}
/* deref an mkey that can participate in ODP flow */
-static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey)
+static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
{
if (refcount_dec_and_test(&mmkey->usecount))
wake_up(&mmkey->wait);
}
/* deref an mkey that can participate in ODP flow and wait for relese */
-static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey)
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
{
mlx5r_deref_odp_mkey(mmkey);
wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3be36ebbf67a..157d862fb864 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -88,9 +88,8 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void
-assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
@@ -100,17 +99,22 @@ assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
mkey->key = key;
}
-static int
-mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
+static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
{
+ int ret;
+
assign_mkey_variant(dev, mkey, in);
- return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
+ ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
+ if (!ret)
+ init_waitqueue_head(&mkey->wait);
+
+ return ret;
}
static int
mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mkey,
+ struct mlx5_ib_mkey *mkey,
struct mlx5_async_ctx *async_ctx,
u32 *in, int inlen, u32 *out, int outlen,
struct mlx5_async_work *context)
@@ -133,7 +137,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
static void create_mkey_callback(int status, struct mlx5_async_work *context)
@@ -260,10 +264,11 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
goto free_in;
}
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
if (err)
goto free_mr;
+ init_waitqueue_head(&mr->mmkey.wait);
mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
@@ -290,7 +295,7 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
kfree(mr);
spin_lock_irq(&ent->lock);
}
@@ -600,29 +605,21 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
/* Return a MR already available in the cache */
static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
{
- struct mlx5_ib_dev *dev = req_ent->dev;
struct mlx5_ib_mr *mr = NULL;
struct mlx5_cache_ent *ent = req_ent;
- /* Try larger MR pools from the cache to satisfy the allocation */
- for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
- mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
- ent - dev->cache.ent);
-
- spin_lock_irq(&ent->lock);
- if (!list_empty(&ent->head)) {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
- list);
- list_del(&mr->list);
- ent->available_mrs--;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
- mlx5_clear_mr(mr);
- return mr;
- }
+ spin_lock_irq(&ent->lock);
+ if (!list_empty(&ent->head)) {
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
+ mlx5_clear_mr(mr);
+ return mr;
}
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
req_ent->miss++;
return NULL;
}
@@ -658,7 +655,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@ -911,12 +908,13 @@ static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 length, int access_flags)
+ u64 length, int access_flags, u64 iova)
{
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
mr->ibmr.device = &dev->ib_dev;
+ mr->ibmr.iova = iova;
mr->access_flags = access_flags;
}
@@ -974,11 +972,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
- mr->mmkey.iova = iova;
- mr->mmkey.size = umem->length;
- mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
}
@@ -1087,8 +1082,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
wr->wr.opcode = MLX5_IB_WR_UMR;
wr->pd = mr->ibmr.pd;
wr->mkey = mr->mmkey.key;
- wr->length = mr->mmkey.size;
- wr->virt_addr = mr->mmkey.iova;
+ wr->length = mr->ibmr.length;
+ wr->virt_addr = mr->ibmr.iova;
wr->access_flags = mr->access_flags;
wr->page_shift = mr->page_shift;
wr->xlt_size = sg->length;
@@ -1339,9 +1334,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
- mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1388,7 +1382,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
- set_mr_fields(dev, mr, length, acc);
+ set_mr_fields(dev, mr, length, acc, start_addr);
return &mr->ibmr;
@@ -1533,6 +1527,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
}
+ xa_init(&mr->implicit_children);
odp->private = mr;
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
@@ -1709,7 +1704,6 @@ static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
return err;
mr->access_flags = access_flags;
- mr->mmkey.pd = to_mpd(pd)->pdn;
return 0;
}
@@ -1754,7 +1748,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
if (flags & IB_MR_REREG_PD) {
mr->ibmr.pd = pd;
- mr->mmkey.pd = to_mpd(pd)->pdn;
upd_flags |= MLX5_IB_UPD_XLT_PD;
}
if (flags & IB_MR_REREG_ACCESS) {
@@ -1763,8 +1756,8 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
}
mr->ibmr.length = new_umem->length;
- mr->mmkey.iova = iova;
- mr->mmkey.size = new_umem->length;
+ mr->ibmr.iova = iova;
+ mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
mr->umem = new_umem;
err = mlx5_ib_update_mr_pas(mr, upd_flags);
@@ -1834,7 +1827,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr->umem = NULL;
atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- return create_real_mr(new_pd, umem, mr->mmkey.iova,
+ return create_real_mr(new_pd, umem, mr->ibmr.iova,
new_access_flags);
}
@@ -2263,9 +2256,9 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = to_mmw(ibmw);
+ unsigned int ndescs;
u32 *in = NULL;
void *mkc;
- int ndescs;
int err;
struct mlx5_ib_alloc_mw req = {};
struct {
@@ -2310,7 +2303,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
mw->mmkey.type = MLX5_MKEY_MW;
ibmw->rkey = mw->mmkey.key;
- mw->ndescs = ndescs;
+ mw->mmkey.ndescs = ndescs;
resp.response_length =
min(offsetofend(typeof(resp), response_length), udata->outlen);
@@ -2330,7 +2323,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
return 0;
free_mkey:
- mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
free:
kfree(in);
return err;
@@ -2349,7 +2342,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
*/
mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
- return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
@@ -2406,7 +2399,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
mr->meta_length = 0;
if (data_sg_nents == 1) {
n++;
- mr->ndescs = 1;
+ mr->mmkey.ndescs = 1;
if (data_sg_offset)
sg_offset = *data_sg_offset;
mr->data_length = sg_dma_len(data_sg) - sg_offset;
@@ -2459,7 +2452,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
if (sg_offset_p)
*sg_offset_p = sg_offset;
- mr->ndescs = i;
+ mr->mmkey.ndescs = i;
mr->data_length = mr->ibmr.length;
if (meta_sg_nents) {
@@ -2492,11 +2485,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+ descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
}
@@ -2506,11 +2499,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs + mr->meta_ndescs++] =
+ descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
@@ -2526,7 +2519,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@ -2560,7 +2553,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
* metadata offset at the first metadata page
*/
pi_mr->pi_iova = (iova & page_mask) +
- pi_mr->ndescs * ibmr->page_size +
+ pi_mr->mmkey.ndescs * ibmr->page_size +
(pi_mr->ibmr.iova & ~page_mask);
/*
* In order to use one MTT MR for data and metadata, we register
@@ -2591,7 +2584,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
struct mlx5_ib_mr *pi_mr = mr->klm_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@ -2626,7 +2619,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
mr->data_length = 0;
mr->data_iova = 0;
mr->meta_ndescs = 0;
@@ -2682,7 +2675,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebc..91eb615b89ee 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -430,7 +430,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
mr->parent = imr;
odp->private = mr;
@@ -500,7 +500,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
}
imr->ibmr.pd = &pd->ibpd;
- imr->mmkey.iova = 0;
+ imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
@@ -738,7 +738,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova))
+ if (unlikely(io_virt < mr->ibmr.iova))
return -EFAULT;
if (mr->umem->is_dmabuf)
@@ -747,7 +747,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
if (!odp->is_implicit_odp) {
u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova,
+ if (check_add_overflow(io_virt - mr->ibmr.iova,
(u64)odp->umem.address, &user_va))
return -EFAULT;
if (unlikely(user_va >= ib_umem_end(odp) ||
@@ -788,7 +788,7 @@ struct pf_frame {
int depth;
};
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
@@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
return mmkey->key == key;
}
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
-{
- struct mlx5_ib_mw *mw;
- struct mlx5_ib_devx_mr *devx_mr;
-
- if (mmkey->type == MLX5_MKEY_MW) {
- mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
- return mw->ndescs;
- }
-
- devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
- mmkey);
- return devx_mr->ndescs;
-}
-
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@ -831,12 +816,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
{
int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
- struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
struct mlx5_klm *pklm;
u32 *out = NULL;
size_t offset;
- int ndescs;
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
@@ -885,8 +869,6 @@ next_mr:
case MLX5_MKEY_MW:
case MLX5_MKEY_INDIRECT_DEVX:
- ndescs = get_indirect_num_descs(mmkey);
-
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
@@ -894,7 +876,7 @@ next_mr:
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
- sizeof(*pklm) * (ndescs - 2);
+ sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) {
kfree(out);
@@ -909,14 +891,14 @@ next_mr:
pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
if (ret)
goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) {
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
if (offset >= be32_to_cpu(pklm->bcount)) {
offset -= be32_to_cpu(pklm->bcount);
continue;
@@ -1559,6 +1541,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
@@ -1703,25 +1686,31 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
- if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
if (mr->ibmr.pd != pd) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
!mr->umem->writable) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
@@ -1753,7 +1742,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
destroy_prefetch_work(work);
}
-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct prefetch_mr_work *work,
struct ib_sge *sg_list, u32 num_sge)
@@ -1764,17 +1753,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- work->frags[i].io_virt = sg_list[i].addr;
- work->frags[i].length = sg_list[i].length;
- work->frags[i].mr =
- get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!work->frags[i].mr) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
work->num_sge = i;
- return false;
+ return PTR_ERR(mr);
}
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
}
work->num_sge = num_sge;
- return true;
+ return 0;
}
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@ -1790,8 +1781,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr)
- return -ENOENT;
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
if (ret < 0) {
@@ -1811,6 +1802,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
u32 pf_flags = 0;
struct prefetch_mr_work *work;
+ int rc;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1826,9 +1818,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (!work)
return -ENOMEM;
- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
destroy_prefetch_work(work);
- return -EINVAL;
+ return rc;
}
queue_work(system_unbound_wq, &work->work);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index b2fca110346c..e5abbcfc1d57 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 8841620af82f..51e48ca9016e 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -217,7 +217,7 @@ static __be64 sig_mkey_mask(void)
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
- int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
memset(umr, 0, sizeof(*umr));
@@ -374,7 +374,7 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
struct mlx5_ib_mr *mr,
u32 key, int access)
{
- int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
+ int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1;
memset(seg, 0, sizeof(*seg));
@@ -439,7 +439,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
struct mlx5_ib_mr *mr,
struct mlx5_ib_pd *pd)
{
- int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
+ int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs);
dseg->addr = cpu_to_be64(mr->desc_map);
dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
@@ -861,7 +861,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
- int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
@@ -1111,7 +1111,7 @@ static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
/* No UMR, use local_dma_lkey */
pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
- pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs;
pa_pi_mr.data_length = mr->data_length;
pa_pi_mr.data_iova = mr->data_iova;
if (mr->meta_ndescs) {
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 755930be01b8..65ce6d0f1885 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -228,7 +228,6 @@ static const struct ib_device_ops qedr_dev_ops = {
.query_srq = qedr_query_srq,
.reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq,
- .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
@@ -272,7 +271,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 3cb4febaad0f..8def88cfa300 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -455,6 +455,7 @@ struct qedr_qp {
/* synchronization objects used with iwarp ep */
struct kref refcnt;
struct completion iwarp_cm_comp;
+ struct completion qp_rel_comp;
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 1715fbe0719d..a51fc6854984 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
{
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
- kfree(qp);
+ complete(&qp->qp_rel_comp);
}
static void
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 3fbf172dbbef..9100009f0a23 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1052,16 +1052,6 @@ err0:
return -EINVAL;
}
-int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
-{
- struct qedr_dev *dev = get_qedr_dev(ibcq->device);
- struct qedr_cq *cq = get_qedr_cq(ibcq);
-
- DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
-
- return 0;
-}
-
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
@@ -1357,6 +1347,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
+ init_completion(&qp->qp_rel_comp);
}
qp->pd = pd;
@@ -2743,15 +2734,18 @@ int qedr_query_qp(struct ib_qp *ibqp,
int rc = 0;
memset(&params, 0, sizeof(params));
-
- rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
- if (rc)
- goto err;
-
memset(qp_attr, 0, sizeof(*qp_attr));
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
- qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ } else {
+ qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
+ }
+
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED;
@@ -2857,8 +2851,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_free_qp_resources(dev, qp, udata);
- if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qedr_iw_qp_rem_ref(&qp->ibqp);
+ wait_for_completion(&qp->qp_rel_comp);
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 031687dafc61..081753df79ef 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -53,7 +53,6 @@ int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 84fc4dcc5399..bf3fa12fe935 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2021 Cornelis Networks. All rights reserved.
* Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -62,8 +63,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
"Attempt pre-IBTA 1.2 DDR speed negotiation");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel <ibsupport@intel.com>");
-MODULE_DESCRIPTION("Intel IB driver");
+MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>");
+MODULE_DESCRIPTION("Cornelis IB driver");
/*
* QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index a67599b5a550..ac11943a5ddb 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -602,7 +602,7 @@ done:
/*
* How many pages in this iovec element?
*/
-static int qib_user_sdma_num_pages(const struct iovec *iov)
+static size_t qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, int npages)
+ unsigned long addr, int tlen, size_t npages)
{
struct page *pages[8];
int i, j;
@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
- const int npages = qib_user_sdma_num_pages(iov + idx);
+ const size_t npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
- int npages = 0;
- int bytes_togo = 0;
+ size_t npages = 0;
+ size_t bytes_togo = 0;
int tiddma = 0;
int cfur;
@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
npages += qib_user_sdma_num_pages(&iov[idx]);
- bytes_togo += slen;
+ if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
+ bytes_togo > type_max(typeof(pkt->bytes_togo))) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
pktnwc += slen >> 2;
idx++;
nfrags++;
@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
}
if (frag_size) {
- int tidsmsize, n;
- size_t pktsize;
+ size_t tidsmsize, n, pktsize, sz, addrlimit;
n = npages*((2*PAGE_SIZE/frag_size)+1);
pktsize = struct_size(pkt, addr, n);
@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
else
tidsmsize = 0;
- pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
+ if (check_add_overflow(pktsize, tidsmsize, &sz)) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+ pkt = kmalloc(sz, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_pbc;
}
pkt->largepkt = 1;
pkt->frag_size = frag_size;
- pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
+ if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
+ &addrlimit) ||
+ addrlimit > type_max(typeof(pkt->addrlimit))) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+ pkt->addrlimit = addrlimit;
if (tiddma) {
char *tidsm = (char *)pkt + pktsize;
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index 398c4c00b932..18a70850b738 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -103,7 +103,7 @@ void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
kfree(ufdev);
}
-void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN])
{
spin_lock(&ufdev->lock);
memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index f0b71d593da5..a91200886922 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -74,7 +74,7 @@ struct usnic_filter_action {
struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
-void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN]);
void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 49bdd78ac664..3305f2744bfa 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock);
- ret = ENOMEM;
+ ret = -ENOMEM;
goto bail_ip;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index da2e867a1ed9..38c7b6fb39d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -101,11 +101,29 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
{
+ struct rxe_ah *ah;
+ u32 ah_num;
+
if (!pkt || !pkt->qp)
return NULL;
if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
return &pkt->qp->pri_av;
- return (pkt->wqe) ? &pkt->wqe->av : NULL;
+ if (!pkt->wqe)
+ return NULL;
+
+ ah_num = pkt->wqe->wr.wr.ud.ah_num;
+ if (ah_num) {
+ /* only new user provider or kernel client */
+ ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num);
+ if (!ah || ah->ah_num != ah_num || rxe_ah_pd(ah) != pkt->qp->pd) {
+ pr_warn("Unable to find AH matching ah_num\n");
+ return NULL;
+ }
+ return &ah->av;
+ }
+
+ /* only old user provider for UD sends*/
+ return &pkt->wqe->wr.wr.ud.av;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d2d802c776fd..d771ba8449a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -142,10 +142,7 @@ static inline enum comp_state get_wqe(struct rxe_qp *qp,
/* we come here whether or not we found a response packet to see if
* there are any posted WQEs
*/
- if (qp->is_user)
- wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_USER);
- else
- wqe = queue_head(qp->sq.queue, QUEUE_TYPE_KERNEL);
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
*wqe_p = wqe;
/* no WQE or requester has not started it yet */
@@ -383,30 +380,35 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_cqe *cqe)
{
+ struct ib_wc *wc = &cqe->ibwc;
+ struct ib_uverbs_wc *uwc = &cqe->uibwc;
+
memset(cqe, 0, sizeof(*cqe));
if (!qp->is_user) {
- struct ib_wc *wc = &cqe->ibwc;
-
- wc->wr_id = wqe->wr.wr_id;
- wc->status = wqe->status;
- wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->byte_len = wqe->dma.length;
- wc->qp = &qp->ibqp;
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = wqe->status;
+ wc->qp = &qp->ibqp;
} else {
- struct ib_uverbs_wc *uwc = &cqe->uibwc;
-
- uwc->wr_id = wqe->wr.wr_id;
- uwc->status = wqe->status;
- uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
- uwc->wc_flags = IB_WC_WITH_IMM;
- uwc->byte_len = wqe->dma.length;
- uwc->qp_num = qp->ibqp.qp_num;
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = wqe->status;
+ uwc->qp_num = qp->ibqp.qp_num;
+ }
+
+ if (wqe->status == IB_WC_SUCCESS) {
+ if (!qp->is_user) {
+ wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->byte_len = wqe->dma.length;
+ } else {
+ uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ uwc->wc_flags = IB_WC_WITH_IMM;
+ uwc->byte_len = wqe->dma.length;
+ }
}
}
@@ -432,10 +434,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
if (post)
make_send_cqe(qp, wqe, &cqe);
- if (qp->is_user)
- advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_USER);
- else
- advance_consumer(qp->sq.queue, QUEUE_TYPE_KERNEL);
+ queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
if (post)
rxe_cq_post(qp->scq, &cqe, 0);
@@ -539,7 +538,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
wqe->status = IB_WC_WR_FLUSH_ERR;
do_complete(qp, wqe);
} else {
- advance_consumer(q, q->type);
+ queue_advance_consumer(q, q->type);
}
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index aef288f164fd..6848426c074f 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -25,11 +25,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cq) {
- if (cq->is_user)
- count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
- else
- count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
-
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
pr_warn("cqe(%d) < current # elements in queue (%d)",
cqe, count);
@@ -65,7 +61,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int err;
enum queue_type type;
- type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_TO_CLIENT;
cq->queue = rxe_queue_init(rxe, &cqe,
sizeof(struct rxe_cqe), type);
if (!cq->queue) {
@@ -81,8 +77,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
return err;
}
- if (uresp)
- cq->is_user = 1;
+ cq->is_user = uresp;
cq->is_dying = false;
@@ -117,11 +112,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
spin_lock_irqsave(&cq->cq_lock, flags);
- if (cq->is_user)
- full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
- else
- full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
-
+ full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
@@ -134,17 +125,10 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return -EBUSY;
}
- if (cq->is_user)
- addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
- else
- addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
-
+ addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
memcpy(addr, cqe, sizeof(*cqe));
- if (cq->is_user)
- advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
- else
- advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
spin_unlock_irqrestore(&cq->cq_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index d5ceb706d964..a012522b577a 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -6,22 +6,22 @@
#include "rxe.h"
#include "rxe_hw_counters.h"
-static const char * const rxe_counter_name[] = {
- [RXE_CNT_SENT_PKTS] = "sent_pkts",
- [RXE_CNT_RCVD_PKTS] = "rcvd_pkts",
- [RXE_CNT_DUP_REQ] = "duplicate_request",
- [RXE_CNT_OUT_OF_SEQ_REQ] = "out_of_seq_request",
- [RXE_CNT_RCV_RNR] = "rcvd_rnr_err",
- [RXE_CNT_SND_RNR] = "send_rnr_err",
- [RXE_CNT_RCV_SEQ_ERR] = "rcvd_seq_err",
- [RXE_CNT_COMPLETER_SCHED] = "ack_deferred",
- [RXE_CNT_RETRY_EXCEEDED] = "retry_exceeded_err",
- [RXE_CNT_RNR_RETRY_EXCEEDED] = "retry_rnr_exceeded_err",
- [RXE_CNT_COMP_RETRY] = "completer_retry_err",
- [RXE_CNT_SEND_ERR] = "send_err",
- [RXE_CNT_LINK_DOWNED] = "link_downed",
- [RXE_CNT_RDMA_SEND] = "rdma_sends",
- [RXE_CNT_RDMA_RECV] = "rdma_recvs",
+static const struct rdma_stat_desc rxe_counter_descs[] = {
+ [RXE_CNT_SENT_PKTS].name = "sent_pkts",
+ [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts",
+ [RXE_CNT_DUP_REQ].name = "duplicate_request",
+ [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request",
+ [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err",
+ [RXE_CNT_SND_RNR].name = "send_rnr_err",
+ [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err",
+ [RXE_CNT_COMPLETER_SCHED].name = "ack_deferred",
+ [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err",
+ [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err",
+ [RXE_CNT_COMP_RETRY].name = "completer_retry_err",
+ [RXE_CNT_SEND_ERR].name = "send_err",
+ [RXE_CNT_LINK_DOWNED].name = "link_downed",
+ [RXE_CNT_RDMA_SEND].name = "rdma_sends",
+ [RXE_CNT_RDMA_RECV].name = "rdma_recvs",
};
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
@@ -34,18 +34,18 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev,
if (!port || !stats)
return -EINVAL;
- for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_name); cnt++)
+ for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++)
stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]);
- return ARRAY_SIZE(rxe_counter_name);
+ return ARRAY_SIZE(rxe_counter_descs);
}
struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS);
+ BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS);
- return rdma_alloc_hw_stats_struct(rxe_counter_name,
- ARRAY_SIZE(rxe_counter_name),
+ return rdma_alloc_hw_stats_struct(rxe_counter_descs,
+ ARRAY_SIZE(rxe_counter_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index f0c954575bde..1ca43b859d80 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -86,6 +86,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
+int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
+int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 5890a8246216..53271df10e47 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,17 +24,22 @@ u8 rxe_get_next_key(u32 last_key)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
+ struct rxe_map_set *set = mr->cur_map_set;
+
switch (mr->type) {
- case RXE_MR_TYPE_DMA:
+ case IB_MR_TYPE_DMA:
return 0;
- case RXE_MR_TYPE_MR:
- if (iova < mr->iova || length > mr->length ||
- iova > mr->iova + mr->length - length)
+ case IB_MR_TYPE_USER:
+ case IB_MR_TYPE_MEM_REG:
+ if (iova < set->iova || length > set->length ||
+ iova > set->iova + set->length - length)
return -EFAULT;
return 0;
default:
+ pr_warn("%s: mr type (%d) not supported\n",
+ __func__, mr->type);
return -EFAULT;
}
}
@@ -48,48 +53,101 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
- mr->ibmr.lkey = lkey;
- mr->ibmr.rkey = rkey;
+ /* set ibmr->l/rkey and also copy into private l/rkey
+ * for user MRs these will always be the same
+ * for cases where caller 'owns' the key portion
+ * they may be different until REG_MR WQE is executed.
+ */
+ mr->lkey = mr->ibmr.lkey = lkey;
+ mr->rkey = mr->ibmr.rkey = rkey;
+
mr->state = RXE_MR_STATE_INVALID;
- mr->type = RXE_MR_TYPE_NONE;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
+static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set)
{
int i;
- int num_map;
- struct rxe_map **map = mr->map;
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+ for (i = 0; i < num_map; i++)
+ kfree(set->map[i]);
- mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
- if (!mr->map)
- goto err1;
+ kfree(set->map);
+ kfree(set);
+}
+
+static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp)
+{
+ int i;
+ struct rxe_map_set *set;
+
+ set = kmalloc(sizeof(*set), GFP_KERNEL);
+ if (!set)
+ goto err_out;
+
+ set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL);
+ if (!set->map)
+ goto err_free_set;
for (i = 0; i < num_map; i++) {
- mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
- if (!mr->map[i])
- goto err2;
+ set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL);
+ if (!set->map[i])
+ goto err_free_map;
}
+ *setp = set;
+
+ return 0;
+
+err_free_map:
+ for (i--; i >= 0; i--)
+ kfree(set->map[i]);
+
+ kfree(set->map);
+err_free_set:
+ kfree(set);
+err_out:
+ return -ENOMEM;
+}
+
+/**
+ * rxe_mr_alloc() - Allocate memory map array(s) for MR
+ * @mr: Memory region
+ * @num_buf: Number of buffer descriptors to support
+ * @both: If non zero allocate both mr->map and mr->next_map
+ * else just allocate mr->map. Used for fast MRs
+ *
+ * Return: 0 on success else an error
+ */
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
+{
+ int ret;
+ int num_map;
+
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
mr->map_mask = RXE_BUF_PER_MAP - 1;
-
mr->num_buf = num_buf;
- mr->num_map = num_map;
mr->max_buf = num_map * RXE_BUF_PER_MAP;
+ mr->num_map = num_map;
- return 0;
+ ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
+ if (ret)
+ goto err_out;
-err2:
- for (i--; i >= 0; i--)
- kfree(mr->map[i]);
+ if (both) {
+ ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
+ if (ret) {
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ goto err_out;
+ }
+ }
- kfree(mr->map);
-err1:
+ return 0;
+
+err_out:
return -ENOMEM;
}
@@ -100,12 +158,13 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
mr->ibmr.pd = &pd->ibpd;
mr->access = access;
mr->state = RXE_MR_STATE_VALID;
- mr->type = RXE_MR_TYPE_DMA;
+ mr->type = IB_MR_TYPE_DMA;
}
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
+ struct rxe_map_set *set;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
@@ -113,7 +172,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
- int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
@@ -127,18 +185,20 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
rxe_mr_init(access, mr);
- err = rxe_mr_alloc(mr, num_buf);
+ err = rxe_mr_alloc(mr, num_buf, 0);
if (err) {
pr_warn("%s: Unable to allocate memory for map\n",
__func__);
goto err_release_umem;
}
- mr->page_shift = PAGE_SHIFT;
- mr->page_mask = PAGE_SIZE - 1;
+ set = mr->cur_map_set;
+ set->page_shift = PAGE_SHIFT;
+ set->page_mask = PAGE_SIZE - 1;
+
+ num_buf = 0;
+ map = set->map;
- num_buf = 0;
- map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -161,26 +221,24 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
buf->size = PAGE_SIZE;
num_buf++;
buf++;
-
}
}
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
- mr->length = length;
- mr->iova = iova;
- mr->va = start;
- mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
- mr->type = RXE_MR_TYPE_MR;
+ mr->type = IB_MR_TYPE_USER;
+
+ set->length = length;
+ set->iova = iova;
+ set->va = start;
+ set->offset = ib_umem_offset(umem);
return 0;
err_cleanup_map:
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
- kfree(mr->map);
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -191,19 +249,17 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
{
int err;
- rxe_mr_init(0, mr);
+ /* always allow remote access for FMRs */
+ rxe_mr_init(IB_ACCESS_REMOTE, mr);
- /* In fastreg, we also set the rkey */
- mr->ibmr.rkey = mr->ibmr.lkey;
-
- err = rxe_mr_alloc(mr, max_pages);
+ err = rxe_mr_alloc(mr, max_pages, 1);
if (err)
goto err1;
mr->ibmr.pd = &pd->ibpd;
mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
- mr->type = RXE_MR_TYPE_MR;
+ mr->type = IB_MR_TYPE_MEM_REG;
return 0;
@@ -214,21 +270,24 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- size_t offset = iova - mr->iova + mr->offset;
+ struct rxe_map_set *set = mr->cur_map_set;
+ size_t offset = iova - set->iova + set->offset;
int map_index;
int buf_index;
u64 length;
+ struct rxe_map *map;
- if (likely(mr->page_shift)) {
- *offset_out = offset & mr->page_mask;
- offset >>= mr->page_shift;
+ if (likely(set->page_shift)) {
+ *offset_out = offset & set->page_mask;
+ offset >>= set->page_shift;
*n_out = offset & mr->map_mask;
*m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- length = mr->map[map_index]->buf[buf_index].size;
+ map = set->map[map_index];
+ length = map->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -238,7 +297,8 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
map_index++;
buf_index = 0;
}
- length = mr->map[map_index]->buf[buf_index].size;
+ map = set->map[map_index];
+ length = map->buf[buf_index].size;
}
*m_out = map_index;
@@ -259,7 +319,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- if (!mr->map) {
+ if (!mr->cur_map_set) {
addr = (void *)(uintptr_t)iova;
goto out;
}
@@ -272,13 +332,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->map[m]->buf[n].size) {
+ if (offset + length > mr->cur_map_set->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset;
out:
return addr;
@@ -302,7 +362,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
if (length == 0)
return 0;
- if (mr->type == RXE_MR_TYPE_DMA) {
+ if (mr->type == IB_MR_TYPE_DMA) {
u8 *src, *dest;
src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
@@ -314,7 +374,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return 0;
}
- WARN_ON_ONCE(!mr->map);
+ WARN_ON_ONCE(!mr->cur_map_set);
err = mr_check_range(mr, iova, length);
if (err) {
@@ -324,7 +384,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
lookup_iova(mr, iova, &m, &i, &offset);
- map = mr->map + m;
+ map = mr->cur_map_set->map + m;
buf = map[0]->buf + i;
while (length > 0) {
@@ -507,8 +567,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
if (!mr)
return NULL;
- if (unlikely((type == RXE_LOOKUP_LOCAL && mr_lkey(mr) != key) ||
- (type == RXE_LOOKUP_REMOTE && mr_rkey(mr) != key) ||
+ if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
+ (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
mr_pd(mr) != pd || (access && !(access & mr->access)) ||
mr->state != RXE_MR_STATE_VALID)) {
rxe_drop_ref(mr);
@@ -531,9 +591,9 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
goto err;
}
- if (rkey != mr->ibmr.rkey) {
- pr_err("%s: rkey (%#x) doesn't match mr->ibmr.rkey (%#x)\n",
- __func__, rkey, mr->ibmr.rkey);
+ if (rkey != mr->rkey) {
+ pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n",
+ __func__, rkey, mr->rkey);
ret = -EINVAL;
goto err_drop_ref;
}
@@ -545,6 +605,12 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
goto err_drop_ref;
}
+ if (unlikely(mr->type != IB_MR_TYPE_MEM_REG)) {
+ pr_warn("%s: mr->type (%d) is wrong type\n", __func__, mr->type);
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
mr->state = RXE_MR_STATE_FREE;
ret = 0;
@@ -554,6 +620,67 @@ err:
return ret;
}
+/* user can (re)register fast MR by executing a REG_MR WQE.
+ * user is expected to hold a reference on the ib mr until the
+ * WQE completes.
+ * Once a fast MR is created this is the only way to change the
+ * private keys. It is the responsibility of the user to maintain
+ * the ib mr keys in sync with rxe mr keys.
+ */
+int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
+ u32 key = wqe->wr.wr.reg.key & 0xff;
+ u32 access = wqe->wr.wr.reg.access;
+ struct rxe_map_set *set;
+
+ /* user can only register MR in free state */
+ if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
+ pr_warn("%s: mr->lkey = 0x%x not free\n",
+ __func__, mr->lkey);
+ return -EINVAL;
+ }
+
+ /* user can only register mr with qp in same protection domain */
+ if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
+ pr_warn("%s: qp->pd and mr->pd don't match\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mr->access = access;
+ mr->lkey = (mr->lkey & ~0xff) | key;
+ mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
+ mr->state = RXE_MR_STATE_VALID;
+
+ set = mr->cur_map_set;
+ mr->cur_map_set = mr->next_map_set;
+ mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
+ mr->next_map_set = set;
+
+ return 0;
+}
+
+int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map_set *set = mr->next_map_set;
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(set->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = set->map[set->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ set->nbuf++;
+
+ return 0;
+}
+
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mr *mr = to_rmr(ibmr);
@@ -564,7 +691,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return -EINVAL;
}
- mr->state = RXE_MR_STATE_ZOMBIE;
+ mr->state = RXE_MR_STATE_INVALID;
rxe_drop_ref(mr_pd(mr));
rxe_drop_index(mr);
rxe_drop_ref(mr);
@@ -575,14 +702,12 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
- int i;
ib_umem_release(mr->umem);
- if (mr->map) {
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
+ if (mr->cur_map_set)
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- kfree(mr->map);
- }
+ if (mr->next_map_set)
+ rxe_mr_free_map_set(mr->num_map, mr->next_map_set);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 5ba77df7598e..9534a7fe1a98 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
rxe_add_index(mw);
- ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
@@ -71,6 +71,8 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
+ u32 key = wqe->wr.wr.mw.rkey & 0xff;
+
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
pr_err_once(
@@ -108,7 +110,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
}
- if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) {
+ if (unlikely(key == (mw->rkey & 0xff))) {
pr_err_once("attempt to bind MW with same key\n");
return -EINVAL;
}
@@ -140,15 +142,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->iova + mr->length)))) {
+ (mr->cur_map_set->iova + mr->cur_map_set->length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
@@ -161,13 +163,9 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
- u32 rkey;
- u32 new_rkey;
-
- rkey = mw->ibmw.rkey;
- new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff);
+ u32 key = wqe->wr.wr.mw.rkey & 0xff;
- mw->ibmw.rkey = new_rkey;
+ mw->rkey = (mw->rkey & ~0xff) | key;
mw->access = wqe->wr.wr.mw.access;
mw->state = RXE_MW_STATE_VALID;
mw->addr = wqe->wr.wr.mw.addr;
@@ -197,29 +195,29 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_mw *mw;
struct rxe_mr *mr;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
+ u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
unsigned long flags;
- mw = rxe_pool_get_index(&rxe->mw_pool,
- wqe->wr.wr.mw.mw_rkey >> 8);
+ mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
if (unlikely(!mw)) {
ret = -EINVAL;
goto err;
}
- if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) {
+ if (unlikely(mw->rkey != mw_rkey)) {
ret = -EINVAL;
goto err_drop_mw;
}
if (likely(wqe->wr.wr.mw.length)) {
- mr = rxe_pool_get_index(&rxe->mr_pool,
- wqe->wr.wr.mw.mr_lkey >> 8);
+ mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8);
if (unlikely(!mr)) {
ret = -EINVAL;
goto err_drop_mw;
}
- if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) {
+ if (unlikely(mr->lkey != mr_lkey)) {
ret = -EINVAL;
goto err_drop_mr;
}
@@ -292,7 +290,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
goto err;
}
- if (rkey != mw->ibmw.rkey) {
+ if (rkey != mw->rkey) {
ret = -EINVAL;
goto err_drop_ref;
}
@@ -323,7 +321,7 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
if (!mw)
return NULL;
- if (unlikely((rxe_mw_rkey(mw) != rkey) || rxe_mw_pd(mw) != pd ||
+ if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
(mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
(mw->length == 0) ||
(access && !(access & mw->access)) ||
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index e02f039b8c44..8f9aaaf260f2 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -22,7 +22,6 @@ enum rxe_wr_mask {
WR_LOCAL_OP_MASK = BIT(5),
WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK,
- WR_READ_WRITE_OR_SEND_MASK = WR_READ_OR_WRITE_MASK | WR_SEND_MASK,
WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK,
WR_ATOMIC_OR_READ_MASK = WR_ATOMIC_MASK | WR_READ_MASK,
};
@@ -82,8 +81,9 @@ enum rxe_hdr_mask {
RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12),
- RXE_READ_OR_ATOMIC = (RXE_READ_MASK | RXE_ATOMIC_MASK),
- RXE_WRITE_OR_SEND = (RXE_WRITE_MASK | RXE_SEND_MASK),
+ RXE_READ_OR_ATOMIC_MASK = (RXE_READ_MASK | RXE_ATOMIC_MASK),
+ RXE_WRITE_OR_SEND_MASK = (RXE_WRITE_MASK | RXE_SEND_MASK),
+ RXE_READ_OR_WRITE_MASK = (RXE_READ_MASK | RXE_WRITE_MASK),
};
#define OPCODE_NONE (-1)
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 742e6ec93686..918270e34a35 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -9,6 +9,8 @@
#include <uapi/rdma/rdma_user_rxe.h>
+#define DEFAULT_MAX_VALUE (1 << 20)
+
static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
{
if (mtu < 256)
@@ -37,7 +39,7 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
enum rxe_device_param {
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_MAX_QP_WR = 0x4000,
+ RXE_MAX_QP_WR = DEFAULT_MAX_VALUE,
RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
| IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_AUTO_PATH_MIG
@@ -58,42 +60,44 @@ enum rxe_device_param {
RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE -
sizeof(struct rxe_send_wqe),
RXE_MAX_SGE_RD = 32,
- RXE_MAX_CQ = 16384,
+ RXE_MAX_CQ = DEFAULT_MAX_VALUE,
RXE_MAX_LOG_CQE = 15,
- RXE_MAX_PD = 0x7ffc,
+ RXE_MAX_PD = DEFAULT_MAX_VALUE,
RXE_MAX_QP_RD_ATOM = 128,
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
RXE_MAX_MCAST_GRP = 8192,
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
- RXE_MAX_AH = 100,
- RXE_MAX_SRQ_WR = 0x4000,
+ RXE_MAX_AH = (1<<15) - 1, /* 32Ki - 1 */
+ RXE_MIN_AH_INDEX = 1,
+ RXE_MAX_AH_INDEX = RXE_MAX_AH,
+ RXE_MAX_SRQ_WR = DEFAULT_MAX_VALUE,
RXE_MIN_SRQ_WR = 1,
RXE_MAX_SRQ_SGE = 27,
RXE_MIN_SRQ_SGE = 1,
RXE_MAX_FMR_PAGE_LIST_LEN = 512,
- RXE_MAX_PKEYS = 1,
+ RXE_MAX_PKEYS = 64,
RXE_LOCAL_CA_ACK_DELAY = 15,
- RXE_MAX_UCONTEXT = 512,
+ RXE_MAX_UCONTEXT = DEFAULT_MAX_VALUE,
RXE_NUM_PORT = 1,
- RXE_MAX_QP = 0x10000,
RXE_MIN_QP_INDEX = 16,
- RXE_MAX_QP_INDEX = 0x00020000,
+ RXE_MAX_QP_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_QP = DEFAULT_MAX_VALUE - RXE_MIN_QP_INDEX,
- RXE_MAX_SRQ = 0x00001000,
RXE_MIN_SRQ_INDEX = 0x00020001,
- RXE_MAX_SRQ_INDEX = 0x00040000,
+ RXE_MAX_SRQ_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
- RXE_MAX_MR = 0x00001000,
- RXE_MAX_MW = 0x00001000,
RXE_MIN_MR_INDEX = 0x00000001,
- RXE_MAX_MR_INDEX = 0x00010000,
+ RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
RXE_MIN_MW_INDEX = 0x00010001,
RXE_MAX_MW_INDEX = 0x00020000,
+ RXE_MAX_MW = 0x00001000,
RXE_MAX_PKT_PER_ACK = 64,
@@ -113,7 +117,7 @@ enum rxe_device_param {
/* default/initial rxe port parameters */
enum rxe_port_param {
RXE_PORT_GID_TBL_LEN = 1024,
- RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
+ RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
RXE_PORT_MAX_MSG_SZ = 0x800000,
RXE_PORT_BAD_PKEY_CNTR = 0,
RXE_PORT_QKEY_VIOL_CNTR = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index ffa8420b4765..2e80bb6aa957 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -7,9 +7,17 @@
#include "rxe.h"
#include "rxe_loc.h"
-/* info about object pools
- */
-struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
+static const struct rxe_type_info {
+ const char *name;
+ size_t size;
+ size_t elem_offset;
+ void (*cleanup)(struct rxe_pool_entry *obj);
+ enum rxe_pool_flags flags;
+ u32 min_index;
+ u32 max_index;
+ size_t key_offset;
+ size_t key_size;
+} rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
@@ -26,7 +34,9 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
.elem_offset = offsetof(struct rxe_ah, pelem),
- .flags = RXE_POOL_NO_ALLOC,
+ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
+ .min_index = RXE_MIN_AH_INDEX,
+ .max_index = RXE_MAX_AH_INDEX,
},
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
@@ -58,8 +68,8 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.elem_offset = offsetof(struct rxe_mr, pelem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
- .max_index = RXE_MAX_MR_INDEX,
.min_index = RXE_MIN_MR_INDEX,
+ .max_index = RXE_MAX_MR_INDEX,
},
[RXE_TYPE_MW] = {
.name = "rxe-mw",
@@ -67,8 +77,8 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.elem_offset = offsetof(struct rxe_mw, pelem),
.cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
- .max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
+ .max_index = RXE_MAX_MW_INDEX,
},
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
@@ -94,7 +104,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
- size_t size;
if ((max - min + 1) < pool->max_elem) {
pr_warn("not enough indices for max_elem\n");
@@ -105,16 +114,12 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
pool->index.max_index = max;
pool->index.min_index = min;
- size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
- pool->index.table = kmalloc(size, GFP_KERNEL);
+ pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL);
if (!pool->index.table) {
err = -ENOMEM;
goto out;
}
- pool->index.table_size = size;
- bitmap_zero(pool->index.table, max - min + 1);
-
out:
return err;
}
@@ -166,7 +171,7 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- kfree(pool->index.table);
+ bitmap_free(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
@@ -327,7 +332,7 @@ void __rxe_drop_index(struct rxe_pool_entry *elem)
void *rxe_alloc_locked(struct rxe_pool *pool)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem;
u8 *obj;
@@ -352,7 +357,7 @@ out_cnt:
void *rxe_alloc(struct rxe_pool *pool)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem;
u8 *obj;
@@ -395,7 +400,7 @@ void rxe_elem_release(struct kref *kref)
struct rxe_pool_entry *elem =
container_of(kref, struct rxe_pool_entry, ref_cnt);
struct rxe_pool *pool = elem->pool;
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
u8 *obj;
if (pool->cleanup)
@@ -411,7 +416,7 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj;
@@ -453,7 +458,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 1feca1bffced..8ecd9f870aea 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -32,20 +32,6 @@ enum rxe_elem_type {
struct rxe_pool_entry;
-struct rxe_type_info {
- const char *name;
- size_t size;
- size_t elem_offset;
- void (*cleanup)(struct rxe_pool_entry *obj);
- enum rxe_pool_flags flags;
- u32 max_index;
- u32 min_index;
- size_t key_offset;
- size_t key_size;
-};
-
-extern struct rxe_type_info rxe_type_info[];
-
struct rxe_pool_entry {
struct rxe_pool *pool;
struct kref ref_cnt;
@@ -74,7 +60,6 @@ struct rxe_pool {
struct {
struct rb_root tree;
unsigned long *table;
- size_t table_size;
u32 last;
u32 max_index;
u32 min_index;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 1ab6af7ddb25..975321812c87 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -190,8 +190,6 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
INIT_LIST_HEAD(&qp->grp_list);
- skb_queue_head_init(&qp->send_pkts);
-
spin_lock_init(&qp->grp_lock);
spin_lock_init(&qp->state_lock);
@@ -231,7 +229,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
wqe_size += sizeof(struct rxe_send_wqe);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
wqe_size, type);
if (!qp->sq.queue)
@@ -248,12 +246,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- if (qp->is_user)
- qp->req.wqe_index = producer_index(qp->sq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->req.wqe_index = producer_index(qp->sq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->req.wqe_index = queue_get_producer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1;
@@ -293,7 +287,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
wqe_size, type);
if (!qp->rq.queue)
@@ -313,8 +307,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->rq.producer_lock);
spin_lock_init(&qp->rq.consumer_lock);
- qp->rq.is_user = qp->is_user;
-
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 72d95398e604..6e6e023c1b45 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -111,17 +111,33 @@ err1:
static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
unsigned int num_elem)
{
- if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type)))
+ enum queue_type type = q->type;
+ u32 prod;
+ u32 cons;
+
+ if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
return -EINVAL;
- while (!queue_empty(q, q->type)) {
- memcpy(producer_addr(new_q, new_q->type),
- consumer_addr(q, q->type),
- new_q->elem_size);
- advance_producer(new_q, new_q->type);
- advance_consumer(q, q->type);
+ prod = queue_get_producer(new_q, type);
+ cons = queue_get_consumer(q, type);
+
+ while (!queue_empty(q, type)) {
+ memcpy(queue_addr_from_index(new_q, prod),
+ queue_addr_from_index(q, cons), new_q->elem_size);
+ prod = queue_next_index(new_q, prod);
+ cons = queue_next_index(q, cons);
}
+ new_q->buf->producer_index = prod;
+ q->buf->consumer_index = cons;
+
+ /* update private index copies */
+ if (type == QUEUE_TYPE_TO_CLIENT)
+ new_q->index = new_q->buf->producer_index;
+ else
+ q->index = q->buf->consumer_index;
+
+ /* exchange rxe_queue headers */
swap(*q, *new_q);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 2702b0e55fc3..6227112ef7a2 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -10,34 +10,47 @@
/* for definition of shared struct rxe_queue_buf */
#include <uapi/rdma/rdma_user_rxe.h>
-/* implements a simple circular buffer that can optionally be
- * shared between user space and the kernel and can be resized
- * the requested element size is rounded up to a power of 2
- * and the number of elements in the buffer is also rounded
- * up to a power of 2. Since the queue is empty when the
- * producer and consumer indices match the maximum capacity
- * of the queue is one less than the number of element slots
+/* Implements a simple circular buffer that is shared between user
+ * and the driver and can be resized. The requested element size is
+ * rounded up to a power of 2 and the number of elements in the buffer
+ * is also rounded up to a power of 2. Since the queue is empty when
+ * the producer and consumer indices match the maximum capacity of the
+ * queue is one less than the number of element slots.
*
* Notes:
- * - Kernel space indices are always masked off to q->index_mask
- * before storing so do not need to be checked on reads.
- * - User space indices may be out of range and must be
- * masked before use when read.
- * - The kernel indices for shared queues must not be written
- * by user space so a local copy is used and a shared copy is
- * stored when the local copy changes.
+ * - The driver indices are always masked off to q->index_mask
+ * before storing so do not need to be checked on reads.
+ * - The user whether user space or kernel is generally
+ * not trusted so its parameters are masked to make sure
+ * they do not access the queue out of bounds on reads.
+ * - The driver indices for queues must not be written
+ * by user so a local copy is used and a shared copy is
+ * stored when the local copy is changed.
* - By passing the type in the parameter list separate from q
- * the compiler can eliminate the switch statement when the
- * actual queue type is known when the function is called.
- * In the performance path this is done. In less critical
- * paths just q->type is passed.
+ * the compiler can eliminate the switch statement when the
+ * actual queue type is known when the function is called at
+ * compile time.
+ * - These queues are lock free. The user and driver must protect
+ * changes to their end of the queues with locks if more than one
+ * CPU can be accessing it at the same time.
*/
-/* type of queue */
+/**
+ * enum queue_type - type of queue
+ * @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and
+ * read by client. Used by rxe driver only.
+ * @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and
+ * read by rxe driver. Used by rxe driver only.
+ * @QUEUE_TYPE_TO_DRIVER: Queue is written by client and
+ * read by rxe driver. Used by kernel client only.
+ * @QUEUE_TYPE_FROM_DRIVER: Queue is written by rxe driver and
+ * read by client. Used by kernel client only.
+ */
enum queue_type {
- QUEUE_TYPE_KERNEL,
- QUEUE_TYPE_TO_USER,
- QUEUE_TYPE_FROM_USER,
+ QUEUE_TYPE_TO_CLIENT,
+ QUEUE_TYPE_FROM_CLIENT,
+ QUEUE_TYPE_TO_DRIVER,
+ QUEUE_TYPE_FROM_DRIVER,
};
struct rxe_queue {
@@ -69,238 +82,171 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
unsigned int elem_size, struct ib_udata *udata,
struct mminfo __user *outbuf,
- /* Protect producers while resizing queue */
- spinlock_t *producer_lock,
- /* Protect consumers while resizing queue */
- spinlock_t *consumer_lock);
+ spinlock_t *producer_lock, spinlock_t *consumer_lock);
void rxe_queue_cleanup(struct rxe_queue *queue);
-static inline int next_index(struct rxe_queue *q, int index)
+static inline u32 queue_next_index(struct rxe_queue *q, int index)
{
- return (index + 1) & q->buf->index_mask;
+ return (index + 1) & q->index_mask;
}
-static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_get_producer(const struct rxe_queue *q,
+ enum queue_type type)
{
u32 prod;
- u32 cons;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
+ case QUEUE_TYPE_FROM_CLIENT:
+ /* protect user index */
prod = smp_load_acquire(&q->buf->producer_index);
- cons = q->index;
break;
- case QUEUE_TYPE_TO_USER:
+ case QUEUE_TYPE_TO_CLIENT:
prod = q->index;
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
break;
- case QUEUE_TYPE_KERNEL:
+ case QUEUE_TYPE_FROM_DRIVER:
+ /* protect driver index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ break;
+ case QUEUE_TYPE_TO_DRIVER:
prod = q->buf->producer_index;
- cons = q->buf->consumer_index;
break;
}
- return ((prod - cons) & q->index_mask) == 0;
+ return prod;
}
-static inline int queue_full(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_get_consumer(const struct rxe_queue *q,
+ enum queue_type type)
{
- u32 prod;
u32 cons;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
+ case QUEUE_TYPE_FROM_CLIENT:
cons = q->index;
break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- /* protect user space index */
+ case QUEUE_TYPE_TO_CLIENT:
+ /* protect user index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
+ case QUEUE_TYPE_FROM_DRIVER:
cons = q->buf->consumer_index;
break;
+ case QUEUE_TYPE_TO_DRIVER:
+ /* protect driver index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
}
- return ((prod + 1 - cons) & q->index_mask) == 0;
+ return cons;
}
-static inline unsigned int queue_count(const struct rxe_queue *q,
- enum queue_type type)
+static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
{
- u32 prod;
- u32 cons;
-
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- cons = q->index;
- break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
- cons = q->buf->consumer_index;
- break;
- }
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
- return (prod - cons) & q->index_mask;
+ return ((prod - cons) & q->index_mask) == 0;
}
-static inline void advance_producer(struct rxe_queue *q, enum queue_type type)
+static inline int queue_full(struct rxe_queue *q, enum queue_type type)
{
- u32 prod;
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- pr_warn_once("Normally kernel should not write user space index\n");
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- prod = (prod + 1) & q->index_mask;
- /* same */
- smp_store_release(&q->buf->producer_index, prod);
- break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- q->index = (prod + 1) & q->index_mask;
- q->buf->producer_index = q->index;
- break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
- q->buf->producer_index = (prod + 1) & q->index_mask;
- break;
- }
+ return ((prod + 1 - cons) & q->index_mask) == 0;
}
-static inline void advance_consumer(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_count(const struct rxe_queue *q,
+ enum queue_type type)
{
- u32 cons;
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- cons = q->index;
- q->index = (cons + 1) & q->index_mask;
- q->buf->consumer_index = q->index;
- break;
- case QUEUE_TYPE_TO_USER:
- pr_warn_once("Normally kernel should not write user space index\n");
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- cons = (cons + 1) & q->index_mask;
- /* same */
- smp_store_release(&q->buf->consumer_index, cons);
- break;
- case QUEUE_TYPE_KERNEL:
- cons = q->buf->consumer_index;
- q->buf->consumer_index = (cons + 1) & q->index_mask;
- break;
- }
+ return (prod - cons) & q->index_mask;
}
-static inline void *producer_addr(struct rxe_queue *q, enum queue_type type)
+static inline void queue_advance_producer(struct rxe_queue *q,
+ enum queue_type type)
{
u32 prod;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- prod &= q->index_mask;
+ case QUEUE_TYPE_FROM_CLIENT:
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
break;
- case QUEUE_TYPE_TO_USER:
+ case QUEUE_TYPE_TO_CLIENT:
prod = q->index;
+ prod = (prod + 1) & q->index_mask;
+ q->index = prod;
+ /* protect user index */
+ smp_store_release(&q->buf->producer_index, prod);
+ break;
+ case QUEUE_TYPE_FROM_DRIVER:
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
break;
- case QUEUE_TYPE_KERNEL:
+ case QUEUE_TYPE_TO_DRIVER:
prod = q->buf->producer_index;
+ prod = (prod + 1) & q->index_mask;
+ q->buf->producer_index = prod;
break;
}
-
- return q->buf->data + (prod << q->log2_elem_size);
}
-static inline void *consumer_addr(struct rxe_queue *q, enum queue_type type)
+static inline void queue_advance_consumer(struct rxe_queue *q,
+ enum queue_type type)
{
u32 cons;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
+ case QUEUE_TYPE_FROM_CLIENT:
cons = q->index;
+ cons = (cons + 1) & q->index_mask;
+ q->index = cons;
+ /* protect user index */
+ smp_store_release(&q->buf->consumer_index, cons);
break;
- case QUEUE_TYPE_TO_USER:
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- cons &= q->index_mask;
+ case QUEUE_TYPE_TO_CLIENT:
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
break;
- case QUEUE_TYPE_KERNEL:
+ case QUEUE_TYPE_FROM_DRIVER:
cons = q->buf->consumer_index;
+ cons = (cons + 1) & q->index_mask;
+ q->buf->consumer_index = cons;
+ break;
+ case QUEUE_TYPE_TO_DRIVER:
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
break;
}
-
- return q->buf->data + (cons << q->log2_elem_size);
}
-static inline unsigned int producer_index(struct rxe_queue *q,
- enum queue_type type)
+static inline void *queue_producer_addr(struct rxe_queue *q,
+ enum queue_type type)
{
- u32 prod;
+ u32 prod = queue_get_producer(q, type);
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- prod &= q->index_mask;
- break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
- break;
- }
-
- return prod;
+ return q->buf->data + (prod << q->log2_elem_size);
}
-static inline unsigned int consumer_index(struct rxe_queue *q,
- enum queue_type type)
+static inline void *queue_consumer_addr(struct rxe_queue *q,
+ enum queue_type type)
{
- u32 cons;
-
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- cons = q->index;
- break;
- case QUEUE_TYPE_TO_USER:
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- cons &= q->index_mask;
- break;
- case QUEUE_TYPE_KERNEL:
- cons = q->buf->consumer_index;
- break;
- }
+ u32 cons = queue_get_consumer(q, type);
- return cons;
+ return q->buf->data + (cons << q->log2_elem_size);
}
-static inline void *addr_from_index(struct rxe_queue *q,
- unsigned int index)
+static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
{
return q->buf->data + ((index & q->index_mask)
- << q->buf->log2_elem_size);
+ << q->log2_elem_size);
}
-static inline unsigned int index_from_addr(const struct rxe_queue *q,
+static inline u32 queue_index_from_addr(const struct rxe_queue *q,
const void *addr)
{
return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
@@ -309,7 +255,7 @@ static inline unsigned int index_from_addr(const struct rxe_queue *q,
static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
{
- return queue_empty(q, type) ? NULL : consumer_addr(q, type);
+ return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
}
#endif /* RXE_QUEUE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 3894197a82f6..0c9d2af15f3d 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -49,21 +49,16 @@ static void req_retry(struct rxe_qp *qp)
unsigned int cons;
unsigned int prod;
- if (qp->is_user) {
- cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
- prod = producer_index(q, QUEUE_TYPE_FROM_USER);
- } else {
- cons = consumer_index(q, QUEUE_TYPE_KERNEL);
- prod = producer_index(q, QUEUE_TYPE_KERNEL);
- }
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
qp->req.wqe_index = cons;
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
for (wqe_index = cons; wqe_index != prod;
- wqe_index = next_index(q, wqe_index)) {
- wqe = addr_from_index(qp->sq.queue, wqe_index);
+ wqe_index = queue_next_index(q, wqe_index)) {
+ wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
mask = wr_opcode_mask(wqe->wr.opcode, qp);
if (wqe->state == wqe_state_posted)
@@ -121,15 +116,9 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
unsigned int cons;
unsigned int prod;
- if (qp->is_user) {
- wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
- cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
- prod = producer_index(q, QUEUE_TYPE_FROM_USER);
- } else {
- wqe = queue_head(q, QUEUE_TYPE_KERNEL);
- cons = consumer_index(q, QUEUE_TYPE_KERNEL);
- prod = producer_index(q, QUEUE_TYPE_KERNEL);
- }
+ wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* check to see if we are drained;
@@ -170,7 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (index == prod)
return NULL;
- wqe = addr_from_index(q, index);
+ wqe = queue_addr_from_index(q, index);
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
qp->req.state == QP_STATE_DRAINED) &&
@@ -390,9 +379,8 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* length from start of bth to end of icrc */
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
- /* pkt->hdr, rxe, port_num and mask are initialized in ifc
- * layer
- */
+ /* pkt->hdr, port_num and mask are initialized in ifc layer */
+ pkt->rxe = rxe;
pkt->opcode = opcode;
pkt->qp = qp;
pkt->psn = qp->req.psn;
@@ -402,6 +390,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* init skb */
av = rxe_get_av(pkt);
+ if (!av)
+ return NULL;
+
skb = rxe_init_packet(rxe, av, paylen, pkt);
if (unlikely(!skb))
return NULL;
@@ -472,7 +463,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (err)
return err;
- if (pkt->mask & RXE_WRITE_OR_SEND) {
+ if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
if (wqe->wr.send_flags & IB_SEND_INLINE) {
u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
@@ -560,7 +551,8 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
qp->req.opcode = pkt->opcode;
if (pkt->mask & RXE_END_MASK)
- qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
+ qp->req.wqe_index);
qp->need_req_skb = 0;
@@ -572,7 +564,6 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
u8 opcode = wqe->wr.opcode;
- struct rxe_mr *mr;
u32 rkey;
int ret;
@@ -590,14 +581,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
break;
case IB_WR_REG_MR:
- mr = to_rmr(wqe->wr.wr.reg.mr);
- rxe_add_ref(mr);
- mr->state = RXE_MR_STATE_VALID;
- mr->access = wqe->wr.wr.reg.access;
- mr->ibmr.lkey = wqe->wr.wr.reg.key;
- mr->ibmr.rkey = wqe->wr.wr.reg.key;
- mr->iova = wqe->wr.wr.reg.mr->iova;
- rxe_drop_ref(mr);
+ ret = rxe_reg_fast_mr(qp, wqe);
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return ret;
+ }
break;
case IB_WR_BIND_MW:
ret = rxe_bind_mw(qp, wqe);
@@ -614,7 +602,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -645,7 +633,8 @@ next_wqe:
goto exit;
if (unlikely(qp->req.state == QP_STATE_RESET)) {
- qp->req.wqe_index = consumer_index(q, q->type);
+ qp->req.wqe_index = queue_get_consumer(q,
+ QUEUE_TYPE_FROM_CLIENT);
qp->req.opcode = -1;
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
@@ -691,13 +680,13 @@ next_wqe:
}
mask = rxe_opcode[opcode].mask;
- if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
+ if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
if (check_init_depth(qp, wqe))
goto exit;
}
mtu = get_mtu(qp);
- payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
+ payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
if (payload > mtu) {
if (qp_type(qp) == IB_QPT_UD) {
/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
@@ -711,7 +700,7 @@ next_wqe:
wqe->last_psn = qp->req.psn;
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
- qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5501227ddc65..e8f435fa6e4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -303,10 +303,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
spin_lock_bh(&srq->rq.consumer_lock);
- if (qp->is_user)
- wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
- else
- wqe = queue_head(q, QUEUE_TYPE_KERNEL);
+ wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
if (!wqe) {
spin_unlock_bh(&srq->rq.consumer_lock);
return RESPST_ERR_RNR;
@@ -322,13 +319,8 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
memcpy(&qp->resp.srq_wqe, wqe, size);
qp->resp.wqe = &qp->resp.srq_wqe.wqe;
- if (qp->is_user) {
- advance_consumer(q, QUEUE_TYPE_FROM_USER);
- count = queue_count(q, QUEUE_TYPE_FROM_USER);
- } else {
- advance_consumer(q, QUEUE_TYPE_KERNEL);
- count = queue_count(q, QUEUE_TYPE_KERNEL);
- }
+ queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
srq->limit = 0;
@@ -357,12 +349,8 @@ static enum resp_states check_resource(struct rxe_qp *qp,
qp->resp.status = IB_WC_WR_FLUSH_ERR;
return RESPST_COMPLETE;
} else if (!srq) {
- if (qp->is_user)
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
if (qp->resp.wqe) {
qp->resp.status = IB_WC_WR_FLUSH_ERR;
return RESPST_COMPLETE;
@@ -374,7 +362,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
}
}
- if (pkt->mask & RXE_READ_OR_ATOMIC) {
+ if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
/* it is the requesters job to not send
* too many read/atomic ops, we just
* recycle the responder resource queue
@@ -389,12 +377,8 @@ static enum resp_states check_resource(struct rxe_qp *qp,
if (srq)
return get_srq_wqe(qp);
- if (qp->is_user)
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
}
@@ -429,7 +413,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
enum resp_states state;
int access;
- if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
+ if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
if (pkt->mask & RXE_RETH_MASK) {
qp->resp.va = reth_va(pkt);
qp->resp.offset = 0;
@@ -450,7 +434,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
}
/* A zero-byte op is not required to set an addr or rkey. */
- if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
+ if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
(pkt->mask & RXE_RETH_MASK) &&
reth_len(pkt) == 0) {
return RESPST_EXECUTE;
@@ -876,7 +860,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
pkt->mask & RXE_WRITE_MASK) ?
IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
- wc->vendor_err = 0;
wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
pkt->mask & RXE_WRITE_MASK) ?
qp->resp.length : wqe->dma.length - wqe->dma.resid;
@@ -897,8 +880,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
uwc->ex.invalidate_rkey = ieth_rkey(pkt);
}
- uwc->qp_num = qp->ibqp.qp_num;
-
if (pkt->mask & RXE_DETH_MASK)
uwc->src_qp = deth_sqp(pkt);
@@ -930,18 +911,13 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (pkt->mask & RXE_DETH_MASK)
wc->src_qp = deth_sqp(pkt);
- wc->qp = &qp->ibqp;
wc->port_num = qp->attr.port_num;
}
}
/* have copy for srq and reference for !srq */
- if (!qp->srq) {
- if (qp->is_user)
- advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_USER);
- else
- advance_consumer(qp->rq.queue, QUEUE_TYPE_KERNEL);
- }
+ if (!qp->srq)
+ queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
qp->resp.wqe = NULL;
@@ -1213,7 +1189,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
return;
while (!qp->srq && q && queue_head(q, q->type))
- advance_consumer(q, q->type);
+ queue_advance_consumer(q, q->type);
}
int rxe_responder(void *arg)
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 610c98d24b5c..eb1c4c3b3a78 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -86,14 +86,13 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->srq_num = srq->pelem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
- srq->rq.is_user = srq->is_user;
srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
q = rxe_queue_init(rxe, &srq->rq.max_wr,
srq_wqe_size, type);
if (!q) {
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 267b5a9c345d..0aa0d7e52773 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -29,13 +29,10 @@ static int rxe_query_port(struct ib_device *dev,
u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_port *port;
int rc;
- port = &rxe->port;
-
/* *attr being zeroed by the caller, avoid zeroing it here */
- *attr = port->attr;
+ *attr = rxe->port.attr;
mutex_lock(&rxe->usdev_lock);
rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
@@ -161,9 +158,19 @@ static int rxe_create_ah(struct ib_ah *ibah,
struct ib_udata *udata)
{
- int err;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
+ struct rxe_create_ah_resp __user *uresp = NULL;
+ int err;
+
+ if (udata) {
+ /* test if new user provider */
+ if (udata->outlen >= sizeof(*uresp))
+ uresp = udata->outbuf;
+ ah->is_user = true;
+ } else {
+ ah->is_user = false;
+ }
err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
if (err)
@@ -173,6 +180,24 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
+ /* create index > 0 */
+ rxe_add_index(ah);
+ ah->ah_num = ah->pelem.index;
+
+ if (uresp) {
+ /* only if new user provider */
+ err = copy_to_user(&uresp->ah_num, &ah->ah_num,
+ sizeof(uresp->ah_num));
+ if (err) {
+ rxe_drop_index(ah);
+ rxe_drop_ref(ah);
+ return -EFAULT;
+ }
+ } else if (ah->is_user) {
+ /* only if old user provider */
+ ah->ah_num = 0;
+ }
+
rxe_init_av(init_attr->ah_attr, &ah->av);
return 0;
}
@@ -205,6 +230,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
+ rxe_drop_index(ah);
rxe_drop_ref(ah);
return 0;
}
@@ -218,11 +244,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
int num_sge = ibwr->num_sge;
int full;
- if (rq->is_user)
- full = queue_full(rq->queue, QUEUE_TYPE_FROM_USER);
- else
- full = queue_full(rq->queue, QUEUE_TYPE_KERNEL);
-
+ full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
err = -ENOMEM;
goto err1;
@@ -237,11 +259,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- if (rq->is_user)
- recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_FROM_USER);
- else
- recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_KERNEL);
-
+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
recv_wqe->wr_id = ibwr->wr_id;
recv_wqe->num_sge = num_sge;
@@ -254,10 +272,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- if (rq->is_user)
- advance_producer(rq->queue, QUEUE_TYPE_FROM_USER);
- else
- advance_producer(rq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
return 0;
@@ -281,9 +296,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata->outlen < sizeof(*uresp))
return -EINVAL;
uresp = udata->outbuf;
- srq->is_user = true;
- } else {
- srq->is_user = false;
}
err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
@@ -522,8 +534,11 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
if (qp_type(qp) == IB_QPT_UD ||
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) {
+ struct ib_ah *ibah = ud_wr(ibwr)->ah;
+
wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
+ wr->wr.ud.ah_num = to_rah(ibah)->ah_num;
if (qp_type(qp) == IB_QPT_GSI)
wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
if (wr->opcode == IB_WR_SEND_WITH_IMM)
@@ -595,11 +610,6 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
return;
}
- if (qp_type(qp) == IB_QPT_UD ||
- qp_type(qp) == IB_QPT_SMI ||
- qp_type(qp) == IB_QPT_GSI)
- memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
-
if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
copy_inline_data_to_wqe(wqe, ibwr);
else
@@ -633,27 +643,17 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
spin_lock_irqsave(&qp->sq.sq_lock, flags);
- if (qp->is_user)
- full = queue_full(sq->queue, QUEUE_TYPE_FROM_USER);
- else
- full = queue_full(sq->queue, QUEUE_TYPE_KERNEL);
+ full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return -ENOMEM;
}
- if (qp->is_user)
- send_wqe = producer_addr(sq->queue, QUEUE_TYPE_FROM_USER);
- else
- send_wqe = producer_addr(sq->queue, QUEUE_TYPE_KERNEL);
-
+ send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
init_send_wqe(qp, ibwr, mask, length, send_wqe);
- if (qp->is_user)
- advance_producer(sq->queue, QUEUE_TYPE_FROM_USER);
- else
- advance_producer(sq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -845,18 +845,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
- if (cq->is_user)
- cqe = queue_head(cq->queue, QUEUE_TYPE_TO_USER);
- else
- cqe = queue_head(cq->queue, QUEUE_TYPE_KERNEL);
+ cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
break;
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- if (cq->is_user)
- advance_consumer(cq->queue, QUEUE_TYPE_TO_USER);
- else
- advance_consumer(cq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
@@ -868,10 +862,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
struct rxe_cq *cq = to_rcq(ibcq);
int count;
- if (cq->is_user)
- count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
- else
- count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
+ count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
return (count > wc_cnt) ? wc_cnt : count;
}
@@ -887,10 +878,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- if (cq->is_user)
- empty = queue_empty(cq->queue, QUEUE_TYPE_TO_USER);
- else
- empty = queue_empty(cq->queue, QUEUE_TYPE_KERNEL);
+ empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
@@ -987,41 +975,26 @@ err1:
return ERR_PTR(err);
}
-static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(mr->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- mr->nbuf++;
-
- return 0;
-}
-
+/* build next_map_set from scatterlist
+ * The IB_WR_REG_MR WR will swap map_sets
+ */
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map_set *set = mr->next_map_set;
int n;
- mr->nbuf = 0;
+ set->nbuf = 0;
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page);
- mr->va = ibmr->iova;
- mr->iova = ibmr->iova;
- mr->length = ibmr->length;
- mr->page_shift = ilog2(ibmr->page_size);
- mr->page_mask = ibmr->page_size - 1;
- mr->offset = mr->iova & mr->page_mask;
+ set->va = ibmr->iova;
+ set->iova = ibmr->iova;
+ set->length = ibmr->length;
+ set->page_shift = ilog2(ibmr->page_size);
+ set->page_mask = ibmr->page_size - 1;
+ set->offset = set->iova & set->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ac2a2148027f..35e041450090 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -46,8 +46,9 @@ struct rxe_pd {
struct rxe_ah {
struct ib_ah ibah;
struct rxe_pool_entry pelem;
- struct rxe_pd *pd;
struct rxe_av av;
+ bool is_user;
+ int ah_num;
};
struct rxe_cqe {
@@ -64,7 +65,7 @@ struct rxe_cq {
spinlock_t cq_lock;
u8 notify;
bool is_dying;
- int is_user;
+ bool is_user;
struct tasklet_struct comp_task;
};
@@ -77,7 +78,6 @@ enum wqe_state {
};
struct rxe_sq {
- bool is_user;
int max_wr;
int max_sge;
int max_inline;
@@ -86,7 +86,6 @@ struct rxe_sq {
};
struct rxe_rq {
- bool is_user;
int max_wr;
int max_sge;
spinlock_t producer_lock; /* guard queue producer */
@@ -100,7 +99,6 @@ struct rxe_srq {
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
- bool is_user;
int limit;
int error;
@@ -240,7 +238,6 @@ struct rxe_qp {
struct sk_buff_head req_pkts;
struct sk_buff_head resp_pkts;
- struct sk_buff_head send_pkts;
struct rxe_req_info req;
struct rxe_comp_info comp;
@@ -267,18 +264,11 @@ struct rxe_qp {
};
enum rxe_mr_state {
- RXE_MR_STATE_ZOMBIE,
RXE_MR_STATE_INVALID,
RXE_MR_STATE_FREE,
RXE_MR_STATE_VALID,
};
-enum rxe_mr_type {
- RXE_MR_TYPE_NONE,
- RXE_MR_TYPE_DMA,
- RXE_MR_TYPE_MR,
-};
-
enum rxe_mr_copy_dir {
RXE_TO_MR_OBJ,
RXE_FROM_MR_OBJ,
@@ -300,6 +290,17 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
+struct rxe_map_set {
+ struct rxe_map **map;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
+ u32 nbuf;
+ int page_shift;
+ int page_mask;
+};
+
static inline int rkey_is_mw(u32 rkey)
{
u32 index = rkey >> 8;
@@ -313,28 +314,24 @@ struct rxe_mr {
struct ib_umem *umem;
+ u32 lkey;
+ u32 rkey;
enum rxe_mr_state state;
- enum rxe_mr_type type;
- u64 va;
- u64 iova;
- size_t length;
- u32 offset;
+ enum ib_mr_type type;
int access;
- int page_shift;
- int page_mask;
int map_shift;
int map_mask;
u32 num_buf;
- u32 nbuf;
u32 max_buf;
u32 num_map;
atomic_t num_mw;
- struct rxe_map **map;
+ struct rxe_map_set *cur_map_set;
+ struct rxe_map_set *next_map_set;
};
enum rxe_mw_state {
@@ -350,6 +347,7 @@ struct rxe_mw {
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
struct rxe_mr *mr;
+ u32 rkey;
int access;
u64 addr;
u64 length;
@@ -469,19 +467,14 @@ static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
}
-static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
+static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
{
- return to_rpd(mr->ibmr.pd);
+ return to_rpd(ah->ibah.pd);
}
-static inline u32 mr_lkey(struct rxe_mr *mr)
-{
- return mr->ibmr.lkey;
-}
-
-static inline u32 mr_rkey(struct rxe_mr *mr)
+static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
{
- return mr->ibmr.rkey;
+ return to_rpd(mr->ibmr.pd);
}
static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
@@ -489,11 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
return to_rpd(mw->ibmw.pd);
}
-static inline u32 rxe_mw_rkey(struct rxe_mw *mw)
-{
- return mw->ibmw.rkey;
-}
-
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 7a5ed86ffc9f..7acdd3c3a599 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1951,8 +1951,6 @@ int siw_cm_init(void)
void siw_cm_exit(void)
{
- if (siw_cm_wq) {
- flush_workqueue(siw_cm_wq);
+ if (siw_cm_wq)
destroy_workqueue(siw_cm_wq);
- }
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 684c2ddb16f5..fd9d7f2c4d64 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1583,6 +1583,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int max_srq_sge, i;
+ u8 addr;
INIT_LIST_HEAD(&priv->cm.passive_ids);
INIT_LIST_HEAD(&priv->cm.reap_list);
@@ -1636,7 +1637,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
}
}
- priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
+ addr = IPOIB_FLAGS_RC;
+ dev_addr_mod(dev, 0, &addr, 1);
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ceabfb0b0a83..2c3dca41d3bd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1057,13 +1057,11 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
{
union ib_gid search_gid;
union ib_gid gid0;
- union ib_gid *netdev_gid;
int err;
u16 index;
u32 port;
bool ret = false;
- netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
return false;
@@ -1073,7 +1071,8 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
* to do it later
*/
priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
- netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
+ sizeof(gid0.global.subnet_prefix));
search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
search_gid.global.interface_id = priv->local_gid.global.interface_id;
@@ -1135,8 +1134,8 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
memcpy(&priv->local_gid, &gid0,
sizeof(priv->local_gid));
- memcpy(priv->dev->dev_addr + 4, &gid0,
- sizeof(priv->local_gid));
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
+ sizeof(priv->local_gid));
ret = true;
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0aa8629fdf62..9934b8bd7f56 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1696,6 +1696,7 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
static int ipoib_dev_init_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ u8 addr_mod[3];
ipoib_napi_add(dev);
@@ -1723,9 +1724,10 @@ static int ipoib_dev_init_default(struct net_device *dev)
}
/* after qp created set dev address */
- priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
- priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
- priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
+ addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
+ addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff;
+ addr_mod[2] = (priv->qp->qp_num) & 0xff;
+ dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
return 0;
@@ -1886,8 +1888,7 @@ static int ipoib_parent_init(struct net_device *ndev)
priv->ca->name, priv->port, result);
return result;
}
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
- sizeof(union ib_gid));
+ dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
priv->dev->dev_port = priv->port - 1;
@@ -1908,8 +1909,8 @@ static void ipoib_child_init(struct net_device *ndev)
memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
sizeof(priv->local_gid));
else {
- memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr,
- INFINIBAND_ALEN);
+ __dev_addr_set(priv->dev, ppriv->dev->dev_addr,
+ INFINIBAND_ALEN);
memcpy(&priv->local_gid, &ppriv->local_gid,
sizeof(priv->local_gid));
}
@@ -1997,7 +1998,6 @@ static void ipoib_ndo_uninit(struct net_device *dev)
if (priv->wq) {
/* See ipoib_mcast_carrier_on_task() */
WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
- flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
@@ -2327,7 +2327,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
sizeof(gid->global.interface_id));
- memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+ dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
netif_addr_unlock_bh(netdev);
diff --git a/drivers/infiniband/ulp/opa_vnic/Kconfig b/drivers/infiniband/ulp/opa_vnic/Kconfig
index e84248587187..4d43d055fa8e 100644
--- a/drivers/infiniband/ulp/opa_vnic/Kconfig
+++ b/drivers/infiniband/ulp/opa_vnic/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_OPA_VNIC
- tristate "Intel OPA VNIC support"
+ tristate "Cornelis OPX VNIC support"
depends on X86_64 && INFINIBAND
help
- This is Omni-Path (OPA) Virtual Network Interface Controller (VNIC)
+ This is Omni-Path Express (OPX) Virtual Network Interface Controller (VNIC)
driver for Ethernet over Omni-Path feature. It implements the HW
independent VNIC functionality. It interfaces with Linux stack for
data path and IB MAD for the control path.
diff --git a/drivers/infiniband/ulp/opa_vnic/Makefile b/drivers/infiniband/ulp/opa_vnic/Makefile
index a8c21d140ccb..196183817cdc 100644
--- a/drivers/infiniband/ulp/opa_vnic/Makefile
+++ b/drivers/infiniband/ulp/opa_vnic/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Makefile - Intel Omni-Path Virtual Network Controller driver
+# Makefile - Cornelis Omni-Path Express Virtual Network Controller driver
# Copyright(c) 2017, Intel Corporation.
+# Copyright(c) 2021, Cornelis Networks.
#
obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic.o
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index cecf0f7cadf9..21c6cea8b1db 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -1,5 +1,6 @@
/*
* Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -46,7 +47,7 @@
*/
/*
- * This file contains OPA Virtual Network Interface Controller (VNIC)
+ * This file contains OPX Virtual Network Interface Controller (VNIC)
* Ethernet Management Agent (EMA) driver
*/
@@ -1051,5 +1052,5 @@ static void opa_vnic_deinit(void)
module_exit(opa_vnic_deinit);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel OPA Virtual Network driver");
+MODULE_AUTHOR("Cornelis Networks");
+MODULE_DESCRIPTION("Cornelis OPX Virtual Network driver");
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 5e780bdd763d..f7e459fe68be 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -37,46 +37,50 @@ void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
s->rdma.failover_cnt++;
}
-int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
- char *buf, size_t len)
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
{
struct rtrs_clt_stats_pcpu *s;
size_t used;
int cpu;
- used = scnprintf(buf, len, " ");
- for_each_possible_cpu(cpu)
- used += scnprintf(buf + used, len - used, " CPU%u", cpu);
-
- used += scnprintf(buf + used, len - used, "\nfrom:");
+ used = 0;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
- used += scnprintf(buf + used, len - used, " %d",
+ used += sysfs_emit_at(buf, used, "%d ",
atomic_read(&s->cpu_migr.from));
}
- used += scnprintf(buf + used, len - used, "\nto :");
+ used += sysfs_emit_at(buf, used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = 0;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
- used += scnprintf(buf + used, len - used, " %d",
- s->cpu_migr.to);
+ used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
}
- used += scnprintf(buf + used, len - used, "\n");
+
+ used += sysfs_emit_at(buf, used, "\n");
return used;
}
-int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len)
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
{
- return scnprintf(buf, len, "%d %d\n",
- stats->reconnects.successful_cnt,
- stats->reconnects.fail_cnt);
+ return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
}
-ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
- char *page, size_t len)
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
{
struct rtrs_clt_stats_rdma sum;
struct rtrs_clt_stats_rdma *r;
@@ -94,16 +98,15 @@ ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
sum.failover_cnt += r->failover_cnt;
}
- return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
+ return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
sum.dir[READ].cnt, sum.dir[READ].size_total,
sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
atomic_read(&stats->inflight), sum.failover_cnt);
}
-ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
- char *page, size_t len)
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
{
- return scnprintf(page, len, "echo 1 to reset all statistics\n");
+ return sysfs_emit(page, "echo 1 to reset all statistics\n");
}
int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 4ee592ccf979..0e69180c3771 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -296,8 +296,12 @@ static struct kobj_attribute rtrs_clt_remove_path_attr =
__ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
rtrs_clt_remove_path_store);
-STAT_ATTR(struct rtrs_clt_stats, cpu_migration,
- rtrs_clt_stats_migration_cnt_to_str,
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_from,
+ rtrs_clt_stats_migration_from_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_to,
+ rtrs_clt_stats_migration_to_cnt_to_str,
rtrs_clt_reset_cpu_migr_stats);
STAT_ATTR(struct rtrs_clt_stats, reconnects,
@@ -313,7 +317,8 @@ STAT_ATTR(struct rtrs_clt_stats, reset_all,
rtrs_clt_reset_all_stats);
static struct attribute *rtrs_clt_stats_attrs[] = {
- &cpu_migration_attr.attr,
+ &cpu_migration_from_attr.attr,
+ &cpu_migration_to_attr.attr,
&reconnects_attr.attr,
&rdma_attr.attr,
&reset_all_attr.attr,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index bc8824b4ee0d..15c0077dd27e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2788,6 +2788,12 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
struct rtrs_clt *clt;
int err, i;
+ if (strchr(sessname, '/') || strchr(sessname, '.')) {
+ pr_err("sessname cannot contain / and .\n");
+ err = -EINVAL;
+ goto out;
+ }
+
clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
reconnect_delay_sec,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 9dc819885ec7..9afffccff973 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -224,19 +224,18 @@ void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
bool enable);
ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
- char *page, size_t len);
+ char *page);
int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
-int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len);
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
-int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf);
int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
- char *page, size_t len);
+ char *page);
int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
- char *page, size_t len);
+ char *page);
/* rtrs-clt-sysfs.c */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index d12ddfa50747..78eac9a4f703 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -398,7 +398,7 @@ static ssize_t get_value##_show(struct kobject *kobj, \
{ \
type *stats = container_of(kobj, type, kobj_stats); \
\
- return print(stats, page, PAGE_SIZE); \
+ return print(stats, page); \
}
#define STAT_ATTR(type, stat, print, reset) \
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
index 12c374b5eb6e..44b1c1652131 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -23,8 +23,7 @@ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
return -EINVAL;
}
-ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
- char *page, size_t len)
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
{
struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 20efd44297fb..9c43ce5ba1c1 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -102,7 +102,7 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
page, PAGE_SIZE);
- return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+ return cnt + sysfs_emit_at(page, cnt, "\n");
}
static struct kobj_attribute rtrs_srv_src_addr_attr =
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 716ef7b23558..7df71f8cf149 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -803,6 +803,11 @@ static int process_info_req(struct rtrs_srv_con *con,
return err;
}
+ if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) {
+ rtrs_err(s, "sessname cannot contain / and .\n");
+ return -EINVAL;
+ }
+
if (exist_sessname(sess->srv->ctx,
msg->sessname, &sess->srv->paths_uuid)) {
rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname);
@@ -1766,6 +1771,7 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
strscpy(sess->s.sessname, str, sizeof(sess->s.sessname));
sess->s.con_num = con_num;
+ sess->s.irq_con_num = con_num;
sess->s.recon_cnt = recon_cnt;
uuid_copy(&sess->s.uuid, uuid);
spin_lock_init(&sess->state_lock);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 9d8d2a91a235..7d403c12faf3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -136,8 +136,7 @@ static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
/* functions which are implemented in rtrs-srv-stats.c */
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
-ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
- char *page, size_t len);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page);
int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
char *page, size_t len);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index ca542e477d38..37952c8e768c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -222,13 +222,23 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
}
}
+static bool is_pollqueue(struct rtrs_con *con)
+{
+ return con->cid >= con->sess->irq_con_num;
+}
+
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
enum ib_poll_context poll_ctx)
{
struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq;
- cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
+ if (is_pollqueue(con))
+ cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector,
+ poll_ctx);
+ else
+ cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
+
if (IS_ERR(cq)) {
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
@@ -269,6 +279,17 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
return ret;
}
+static void destroy_cq(struct rtrs_con *con)
+{
+ if (con->cq) {
+ if (is_pollqueue(con))
+ ib_free_cq(con->cq);
+ else
+ ib_cq_pool_put(con->cq, con->nr_cqe);
+ }
+ con->cq = NULL;
+}
+
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
@@ -283,8 +304,7 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
- ib_cq_pool_put(con->cq, con->nr_cqe);
- con->cq = NULL;
+ destroy_cq(con);
return err;
}
con->sess = sess;
@@ -299,10 +319,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
rdma_destroy_qp(con->cm_id);
con->qp = NULL;
}
- if (con->cq) {
- ib_cq_pool_put(con->cq, con->nr_cqe);
- con->cq = NULL;
- }
+ destroy_cq(con);
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 9c6386b2af33..e09b1fae42e1 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -255,41 +256,24 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return 0;
}
-#ifdef CONFIG_ACPI
-static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
- struct platform_device *pdev)
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek)
{
- unsigned long long hrv = 0;
- acpi_status status;
-
if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
axp20x_pek->axp20x->variant == AXP288_ID) {
- status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
- "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status))
- dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
-
/*
* On Cherry Trail platforms (hrv == 3), do not register the
* input device if there is an "INTCFD9" or "ACPI0011" gpio
* button ACPI device, as that handles the power button too,
* and otherwise we end up reporting all presses twice.
*/
- if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
+ if (soc_intel_is_cht() &&
+ (acpi_dev_present("INTCFD9", NULL, -1) ||
acpi_dev_present("ACPI0011", NULL, -1)))
return false;
-
}
return true;
}
-#else
-static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
- struct platform_device *pdev)
-{
- return true;
-}
-#endif
static int axp20x_pek_probe(struct platform_device *pdev)
{
@@ -321,7 +305,7 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->irq_dbf = regmap_irq_get_virq(
axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
- if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
+ if (axp20x_pek_should_register_input(axp20x_pek)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
return error;
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 54de49ca7808..ef7999a08c8b 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -11,60 +11,231 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include "smd-rpm.h"
#include "icc-rpm.h"
-static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+/* BIMC QoS */
+#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
+#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
+#define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
+
+#define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
+#define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
+#define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
+#define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
+#define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
+
+#define M_BKE_EN_EN_BMASK 0x1
+
+/* NoC QoS */
+#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
+#define NOC_QOS_PRIORITY_P1_MASK 0xc
+#define NOC_QOS_PRIORITY_P0_MASK 0x3
+#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
+
+#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
+#define NOC_QOS_MODEn_MASK 0x3
+
+static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
+ struct qcom_icc_qos *qos,
+ int regnum)
+{
+ u32 val;
+ u32 mask;
+
+ val = qos->prio_level;
+ mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
+
+ val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
+ mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
+
+ /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
+ if (regnum != 3) {
+ val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
+ mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
+ }
+
+ return regmap_update_bits(qp->regmap,
+ qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
+ mask, val);
+}
+
+static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
- struct icc_node *n;
- u64 sum_bw;
- u64 max_peak_bw;
- u64 rate;
- u32 agg_avg = 0;
- u32 agg_peak = 0;
- int ret, i;
+ u32 mode = NOC_QOS_MODE_BYPASS;
+ u32 val = 0;
+ int i, rc = 0;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
+ if (qn->qos.qos_mode != -1)
+ mode = qn->qos.qos_mode;
+
+ /* QoS Priority: The QoS Health parameters are getting considered
+ * only if we are NOT in Bypass Mode.
+ */
+ if (mode != NOC_QOS_MODE_BYPASS) {
+ for (i = 3; i >= 0; i--) {
+ rc = qcom_icc_bimc_set_qos_health(qp,
+ &qn->qos, i);
+ if (rc)
+ return rc;
+ }
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
+ /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
+ val = 1;
+ }
+
+ return regmap_update_bits(qp->regmap,
+ qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
+ M_BKE_EN_EN_BMASK, val);
+}
+
+static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
+ struct qcom_icc_qos *qos)
+{
+ u32 val;
+ int rc;
+
+ /* Must be updated one at a time, P1 first, P0 last */
+ val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
+ rc = regmap_update_bits(qp->regmap,
+ qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
+ NOC_QOS_PRIORITY_P1_MASK, val);
+ if (rc)
+ return rc;
+
+ return regmap_update_bits(qp->regmap,
+ qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
+ NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
+}
+
+static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ u32 mode = NOC_QOS_MODE_BYPASS;
+ int rc = 0;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ if (qn->qos.qos_port < 0) {
+ dev_dbg(src->provider->dev,
+ "NoC QoS: Skipping %s: vote aggregated on parent.\n",
+ qn->name);
+ return 0;
+ }
+
+ if (qn->qos.qos_mode != -1)
+ mode = qn->qos.qos_mode;
+
+ if (mode == NOC_QOS_MODE_FIXED) {
+ dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
+ qn->name);
+ rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
+ if (rc)
+ return rc;
+ } else if (mode == NOC_QOS_MODE_BYPASS) {
+ dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
+ qn->name);
+ }
+
+ return regmap_update_bits(qp->regmap,
+ qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
+ NOC_QOS_MODEn_MASK, mode);
+}
+
+static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
+{
+ struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
+ struct qcom_icc_node *qn = node->data;
+
+ dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
- /* send bandwidth request message to the RPM processor */
- if (qn->mas_rpm_id != -1) {
+ if (qp->is_bimc_node)
+ return qcom_icc_set_bimc_qos(node, sum_bw);
+
+ return qcom_icc_set_noc_qos(node, sum_bw);
+}
+
+static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
+{
+ int ret = 0;
+
+ if (mas_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
+ mas_rpm_id,
sum_bw);
if (ret) {
pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
+ mas_rpm_id, ret);
return ret;
}
}
- if (qn->slv_rpm_id != -1) {
+ if (slv_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
+ slv_rpm_id,
sum_bw);
if (ret) {
pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
- qn->slv_rpm_id, ret);
+ slv_rpm_id, ret);
return ret;
}
}
+ return ret;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ u64 sum_bw;
+ u64 max_peak_bw;
+ u64 rate;
+ u32 agg_avg = 0;
+ u32 agg_peak = 0;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ if (!qn->qos.ap_owned) {
+ /* send bandwidth request message to the RPM processor */
+ ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
+ if (ret)
+ return ret;
+ } else if (qn->qos.qos_mode != -1) {
+ /* set bandwidth directly from the AP */
+ ret = qcom_icc_qos_set(src, sum_bw);
+ if (ret)
+ return ret;
+ }
+
rate = max(sum_bw, max_peak_bw);
do_div(rate, qn->buswidth);
@@ -86,8 +257,11 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
- const struct clk_bulk_data *cd)
+static const char * const bus_clocks[] = {
+ "bus", "bus_a",
+};
+
+int qnoc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct qcom_icc_desc *desc;
@@ -97,6 +271,8 @@ int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
+ const char * const *cds;
+ int cd_num;
int ret;
/* wait for the RPM proxy */
@@ -110,7 +286,15 @@ int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
- qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (desc->num_clocks) {
+ cds = desc->clocks;
+ cd_num = desc->num_clocks;
+ } else {
+ cds = bus_clocks;
+ cd_num = ARRAY_SIZE(bus_clocks);
+ }
+
+ qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
if (!qp)
return -ENOMEM;
@@ -119,12 +303,35 @@ int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
if (!data)
return -ENOMEM;
- qp->bus_clks = devm_kmemdup(dev, cd, cd_size,
- GFP_KERNEL);
- if (!qp->bus_clks)
- return -ENOMEM;
-
+ for (i = 0; i < cd_num; i++)
+ qp->bus_clks[i].id = cds[i];
qp->num_clks = cd_num;
+
+ qp->is_bimc_node = desc->is_bimc_node;
+ qp->qos_offset = desc->qos_offset;
+
+ if (desc->regmap_cfg) {
+ struct resource *res;
+ void __iomem *mmio;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ mmio = devm_ioremap_resource(dev, res);
+
+ if (IS_ERR(mmio)) {
+ dev_err(dev, "Cannot ioremap interconnect bus resource\n");
+ return PTR_ERR(mmio);
+ }
+
+ qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
+ if (IS_ERR(qp->regmap)) {
+ dev_err(dev, "Cannot regmap interconnect bus resource\n");
+ return PTR_ERR(qp->regmap);
+ }
+ }
+
ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
if (ret)
return ret;
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index 79a6f68249c1..f5744de4da19 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -9,8 +9,6 @@
#define RPM_BUS_MASTER_REQ 0x73616d62
#define RPM_BUS_SLAVE_REQ 0x766c7362
-#define QCOM_MAX_LINKS 12
-
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
@@ -19,11 +17,35 @@
* @provider: generic interconnect provider
* @bus_clks: the clk_bulk_data table of bus clocks
* @num_clks: the total number of clk_bulk_data entries
+ * @is_bimc_node: indicates whether to use bimc specific setting
+ * @qos_offset: offset to QoS registers
+ * @regmap: regmap for QoS registers read/write access
*/
struct qcom_icc_provider {
struct icc_provider provider;
- struct clk_bulk_data *bus_clks;
int num_clks;
+ bool is_bimc_node;
+ struct regmap *regmap;
+ unsigned int qos_offset;
+ struct clk_bulk_data bus_clks[];
+};
+
+/**
+ * struct qcom_icc_qos - Qualcomm specific interconnect QoS parameters
+ * @areq_prio: node requests priority
+ * @prio_level: priority level for bus communication
+ * @limit_commands: activate/deactivate limiter mode during runtime
+ * @ap_owned: indicates if the node is owned by the AP or by the RPM
+ * @qos_mode: default qos mode for this node
+ * @qos_port: qos port number for finding qos registers of this node
+ */
+struct qcom_icc_qos {
+ u32 areq_prio;
+ u32 prio_level;
+ bool limit_commands;
+ bool ap_owned;
+ int qos_mode;
+ int qos_port;
};
/**
@@ -35,39 +57,37 @@ struct qcom_icc_provider {
* @buswidth: width of the interconnect between a node and the bus (bytes)
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
+ * @qos: NoC QoS setting parameters
* @rate: current bus clock rate in Hz
*/
struct qcom_icc_node {
unsigned char *name;
u16 id;
- u16 links[QCOM_MAX_LINKS];
+ const u16 *links;
u16 num_links;
u16 buswidth;
int mas_rpm_id;
int slv_rpm_id;
+ struct qcom_icc_qos qos;
u64 rate;
};
struct qcom_icc_desc {
struct qcom_icc_node **nodes;
size_t num_nodes;
+ const char * const *clocks;
+ size_t num_clocks;
+ bool is_bimc_node;
+ const struct regmap_config *regmap_cfg;
+ unsigned int qos_offset;
};
-#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
- ...) \
- static struct qcom_icc_node _name = { \
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
- .mas_rpm_id = _mas_rpm_id, \
- .slv_rpm_id = _slv_rpm_id, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
+/* Valid for both NoC and BIMC */
+#define NOC_QOS_MODE_INVALID -1
+#define NOC_QOS_MODE_FIXED 0x0
+#define NOC_QOS_MODE_BYPASS 0x2
-int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
- const struct clk_bulk_data *cd);
+int qnoc_probe(struct platform_device *pdev);
int qnoc_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index fc3689c8947a..e3c995b11357 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/of_device.h>
#include <dt-bindings/interconnect/qcom,msm8916.h>
@@ -105,96 +106,1090 @@ enum {
MSM8916_SNOC_PNOC_SLV,
};
-static const struct clk_bulk_data msm8916_bus_clocks[] = {
- { .id = "bus" },
- { .id = "bus_a" },
-};
-
-DEFINE_QNODE(bimc_snoc_mas, MSM8916_BIMC_SNOC_MAS, 8, -1, -1, MSM8916_BIMC_SNOC_SLV);
-DEFINE_QNODE(bimc_snoc_slv, MSM8916_BIMC_SNOC_SLV, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_1);
-DEFINE_QNODE(mas_apss, MSM8916_MASTER_AMPSS_M0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_audio, MSM8916_MASTER_LPASS, 4, -1, -1, MSM8916_PNOC_MAS_0);
-DEFINE_QNODE(mas_blsp_1, MSM8916_MASTER_BLSP_1, 4, -1, -1, MSM8916_PNOC_MAS_1);
-DEFINE_QNODE(mas_dehr, MSM8916_MASTER_DEHR, 4, -1, -1, MSM8916_PNOC_MAS_0);
-DEFINE_QNODE(mas_gfx, MSM8916_MASTER_GRAPHICS_3D, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_jpeg, MSM8916_MASTER_JPEG, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_mdp, MSM8916_MASTER_MDP_PORT0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8916_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8916_PNOC_INT_1);
-DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_INT_1);
-DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
-DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, -1, -1, MSM8916_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
-DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_usb_hs, MSM8916_MASTER_USB_HS, 4, -1, -1, MSM8916_PNOC_MAS_1);
-DEFINE_QNODE(mas_vfe, MSM8916_MASTER_VFE, 16, -1, -1, MSM8916_SNOC_MM_INT_1, MSM8916_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_video, MSM8916_MASTER_VIDEO_P0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
-DEFINE_QNODE(mm_int_0, MSM8916_SNOC_MM_INT_0, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
-DEFINE_QNODE(mm_int_1, MSM8916_SNOC_MM_INT_1, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
-DEFINE_QNODE(mm_int_2, MSM8916_SNOC_MM_INT_2, 16, -1, -1, MSM8916_SNOC_INT_0);
-DEFINE_QNODE(mm_int_bimc, MSM8916_SNOC_MM_INT_BIMC, 16, -1, -1, MSM8916_SNOC_BIMC_1_MAS);
-DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, MSM8916_PNOC_SLV_0, MSM8916_PNOC_SLV_1, MSM8916_PNOC_SLV_2, MSM8916_PNOC_SLV_3, MSM8916_PNOC_SLV_4, MSM8916_PNOC_SLV_8, MSM8916_PNOC_SLV_9);
-DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
-DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
-DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
-DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
-DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
-DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
-DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
-DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
-DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
-DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
-DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
-DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
-DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
-DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
-DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, -1, 0);
-DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, -1, 0);
-DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
-DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_display_cfg, MSM8916_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_ebi_ch0, MSM8916_SLAVE_EBI_CH0, 8, -1, 0, 0);
-DEFINE_QNODE(slv_gfx_cfg, MSM8916_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_imem_cfg, MSM8916_SLAVE_IMEM_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_imem, MSM8916_SLAVE_IMEM, 8, -1, 26, 0);
-DEFINE_QNODE(slv_mpm, MSM8916_SLAVE_MPM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_msg_ram, MSM8916_SLAVE_MSG_RAM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_mss, MSM8916_SLAVE_MSS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pdm, MSM8916_SLAVE_PDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pmic_arb, MSM8916_SLAVE_PMIC_ARB, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pcnoc_cfg, MSM8916_SLAVE_PNOC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_prng, MSM8916_SLAVE_PRNG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_qdss_cfg, MSM8916_SLAVE_QDSS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_qdss_stm, MSM8916_SLAVE_QDSS_STM, 4, -1, 30, 0);
-DEFINE_QNODE(slv_rbcpr_cfg, MSM8916_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_sdcc_1, MSM8916_SLAVE_SDCC_1, 4, -1, -1, 0);
-DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
-DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
-DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
-DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
-DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_venus_cfg, MSM8916_SLAVE_VENUS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(snoc_bimc_0_mas, MSM8916_SNOC_BIMC_0_MAS, 8, 3, -1, MSM8916_SNOC_BIMC_0_SLV);
-DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_EBI_CH0);
-DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
-DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
-DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, -1, -1, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
-DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
-DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
-DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
+static const u16 bimc_snoc_mas_links[] = {
+ MSM8916_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node bimc_snoc_mas = {
+ .name = "bimc_snoc_mas",
+ .id = MSM8916_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(bimc_snoc_mas_links),
+ .links = bimc_snoc_mas_links,
+};
+
+static const u16 bimc_snoc_slv_links[] = {
+ MSM8916_SNOC_INT_0,
+ MSM8916_SNOC_INT_1
+};
+
+static struct qcom_icc_node bimc_snoc_slv = {
+ .name = "bimc_snoc_slv",
+ .id = MSM8916_BIMC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(bimc_snoc_slv_links),
+ .links = bimc_snoc_slv_links,
+};
+
+static const u16 mas_apss_links[] = {
+ MSM8916_SLAVE_EBI_CH0,
+ MSM8916_BIMC_SNOC_MAS,
+ MSM8916_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_apss = {
+ .name = "mas_apss",
+ .id = MSM8916_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apss_links),
+ .links = mas_apss_links,
+};
+
+static const u16 mas_audio_links[] = {
+ MSM8916_PNOC_MAS_0
+};
+
+static struct qcom_icc_node mas_audio = {
+ .name = "mas_audio",
+ .id = MSM8916_MASTER_LPASS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_audio_links),
+ .links = mas_audio_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ MSM8916_PNOC_MAS_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = MSM8916_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_dehr_links[] = {
+ MSM8916_PNOC_MAS_0
+};
+
+static struct qcom_icc_node mas_dehr = {
+ .name = "mas_dehr",
+ .id = MSM8916_MASTER_DEHR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_dehr_links),
+ .links = mas_dehr_links,
+};
+
+static const u16 mas_gfx_links[] = {
+ MSM8916_SLAVE_EBI_CH0,
+ MSM8916_BIMC_SNOC_MAS,
+ MSM8916_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_gfx = {
+ .name = "mas_gfx",
+ .id = MSM8916_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_gfx_links),
+ .links = mas_gfx_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ MSM8916_SNOC_MM_INT_0,
+ MSM8916_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = MSM8916_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_mdp_links[] = {
+ MSM8916_SNOC_MM_INT_0,
+ MSM8916_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_mdp = {
+ .name = "mas_mdp",
+ .id = MSM8916_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp_links),
+ .links = mas_mdp_links,
+};
+
+static const u16 mas_pcnoc_crypto_0_links[] = {
+ MSM8916_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcnoc_crypto_0 = {
+ .name = "mas_pcnoc_crypto_0",
+ .id = MSM8916_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_crypto_0_links),
+ .links = mas_pcnoc_crypto_0_links,
+};
+
+static const u16 mas_pcnoc_sdcc_1_links[] = {
+ MSM8916_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcnoc_sdcc_1 = {
+ .name = "mas_pcnoc_sdcc_1",
+ .id = MSM8916_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_sdcc_1_links),
+ .links = mas_pcnoc_sdcc_1_links,
+};
+
+static const u16 mas_pcnoc_sdcc_2_links[] = {
+ MSM8916_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcnoc_sdcc_2 = {
+ .name = "mas_pcnoc_sdcc_2",
+ .id = MSM8916_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_sdcc_2_links),
+ .links = mas_pcnoc_sdcc_2_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ MSM8916_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = MSM8916_MASTER_QDSS_BAM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ MSM8916_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = MSM8916_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ MSM8916_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .name = "mas_snoc_cfg",
+ .id = MSM8916_MASTER_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ MSM8916_PNOC_MAS_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = MSM8916_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_tcu0_links[] = {
+ MSM8916_SLAVE_EBI_CH0,
+ MSM8916_BIMC_SNOC_MAS,
+ MSM8916_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_tcu0 = {
+ .name = "mas_tcu0",
+ .id = MSM8916_MASTER_TCU0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .qos.prio_level = 2,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_tcu0_links),
+ .links = mas_tcu0_links,
+};
+
+static const u16 mas_tcu1_links[] = {
+ MSM8916_SLAVE_EBI_CH0,
+ MSM8916_BIMC_SNOC_MAS,
+ MSM8916_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_tcu1 = {
+ .name = "mas_tcu1",
+ .id = MSM8916_MASTER_TCU1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_tcu1_links),
+ .links = mas_tcu1_links,
+};
+
+static const u16 mas_usb_hs_links[] = {
+ MSM8916_PNOC_MAS_1
+};
+
+static struct qcom_icc_node mas_usb_hs = {
+ .name = "mas_usb_hs",
+ .id = MSM8916_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs_links),
+ .links = mas_usb_hs_links,
+};
+
+static const u16 mas_vfe_links[] = {
+ MSM8916_SNOC_MM_INT_1,
+ MSM8916_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = MSM8916_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe_links),
+ .links = mas_vfe_links,
+};
+
+static const u16 mas_video_links[] = {
+ MSM8916_SNOC_MM_INT_0,
+ MSM8916_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_video = {
+ .name = "mas_video",
+ .id = MSM8916_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_video_links),
+ .links = mas_video_links,
+};
+
+static const u16 mm_int_0_links[] = {
+ MSM8916_SNOC_MM_INT_BIMC
+};
+
+static struct qcom_icc_node mm_int_0 = {
+ .name = "mm_int_0",
+ .id = MSM8916_SNOC_MM_INT_0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_0_links),
+ .links = mm_int_0_links,
+};
+
+static const u16 mm_int_1_links[] = {
+ MSM8916_SNOC_MM_INT_BIMC
+};
+
+static struct qcom_icc_node mm_int_1 = {
+ .name = "mm_int_1",
+ .id = MSM8916_SNOC_MM_INT_1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_1_links),
+ .links = mm_int_1_links,
+};
+
+static const u16 mm_int_2_links[] = {
+ MSM8916_SNOC_INT_0
+};
+
+static struct qcom_icc_node mm_int_2 = {
+ .name = "mm_int_2",
+ .id = MSM8916_SNOC_MM_INT_2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_2_links),
+ .links = mm_int_2_links,
+};
+
+static const u16 mm_int_bimc_links[] = {
+ MSM8916_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node mm_int_bimc = {
+ .name = "mm_int_bimc",
+ .id = MSM8916_SNOC_MM_INT_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_bimc_links),
+ .links = mm_int_bimc_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ MSM8916_PNOC_SNOC_MAS,
+ MSM8916_PNOC_SLV_0,
+ MSM8916_PNOC_SLV_1,
+ MSM8916_PNOC_SLV_2,
+ MSM8916_PNOC_SLV_3,
+ MSM8916_PNOC_SLV_4,
+ MSM8916_PNOC_SLV_8,
+ MSM8916_PNOC_SLV_9
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = MSM8916_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ MSM8916_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = MSM8916_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ MSM8916_PNOC_INT_0
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = MSM8916_PNOC_MAS_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ MSM8916_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = MSM8916_PNOC_MAS_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ MSM8916_SLAVE_CLK_CTL,
+ MSM8916_SLAVE_TLMM,
+ MSM8916_SLAVE_TCSR,
+ MSM8916_SLAVE_SECURITY,
+ MSM8916_SLAVE_MSS
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = MSM8916_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ MSM8916_SLAVE_IMEM_CFG,
+ MSM8916_SLAVE_CRYPTO_0_CFG,
+ MSM8916_SLAVE_MSG_RAM,
+ MSM8916_SLAVE_PDM,
+ MSM8916_SLAVE_PRNG
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = MSM8916_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ MSM8916_SLAVE_SPDM,
+ MSM8916_SLAVE_BOOT_ROM,
+ MSM8916_SLAVE_BIMC_CFG,
+ MSM8916_SLAVE_PNOC_CFG,
+ MSM8916_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = MSM8916_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ MSM8916_SLAVE_MPM,
+ MSM8916_SLAVE_SNOC_CFG,
+ MSM8916_SLAVE_RBCPR_CFG,
+ MSM8916_SLAVE_QDSS_CFG,
+ MSM8916_SLAVE_DEHR_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = MSM8916_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ MSM8916_SLAVE_VENUS_CFG,
+ MSM8916_SLAVE_CAMERA_CFG,
+ MSM8916_SLAVE_DISPLAY_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = MSM8916_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ MSM8916_SLAVE_USB_HS,
+ MSM8916_SLAVE_SDCC_1,
+ MSM8916_SLAVE_BLSP_1
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = MSM8916_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 pcnoc_s_9_links[] = {
+ MSM8916_SLAVE_SDCC_2,
+ MSM8916_SLAVE_LPASS,
+ MSM8916_SLAVE_GRAPHICS_3D_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_9 = {
+ .name = "pcnoc_s_9",
+ .id = MSM8916_PNOC_SLV_9,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_9_links),
+ .links = pcnoc_s_9_links,
+};
+
+static const u16 pcnoc_snoc_mas_links[] = {
+ MSM8916_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_snoc_mas = {
+ .name = "pcnoc_snoc_mas",
+ .id = MSM8916_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_snoc_mas_links),
+ .links = pcnoc_snoc_mas_links,
+};
+
+static const u16 pcnoc_snoc_slv_links[] = {
+ MSM8916_SNOC_INT_0,
+ MSM8916_SNOC_INT_BIMC,
+ MSM8916_SNOC_INT_1
+};
+
+static struct qcom_icc_node pcnoc_snoc_slv = {
+ .name = "pcnoc_snoc_slv",
+ .id = MSM8916_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(pcnoc_snoc_slv_links),
+ .links = pcnoc_snoc_slv_links,
+};
+
+static const u16 qdss_int_links[] = {
+ MSM8916_SNOC_INT_0,
+ MSM8916_SNOC_INT_BIMC
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = MSM8916_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static struct qcom_icc_node slv_apps_l2 = {
+ .name = "slv_apps_l2",
+ .id = MSM8916_SLAVE_AMPSS_L2,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_apss = {
+ .name = "slv_apss",
+ .id = MSM8916_SLAVE_APSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_audio = {
+ .name = "slv_audio",
+ .id = MSM8916_SLAVE_LPASS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = MSM8916_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = MSM8916_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_boot_rom = {
+ .name = "slv_boot_rom",
+ .id = MSM8916_SLAVE_BOOT_ROM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = MSM8916_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = MSM8916_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = MSM8916_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = MSM8916_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = MSM8916_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_dehr_cfg = {
+ .name = "slv_dehr_cfg",
+ .id = MSM8916_SLAVE_DEHR_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = MSM8916_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_ebi_ch0 = {
+ .name = "slv_ebi_ch0",
+ .id = MSM8916_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static struct qcom_icc_node slv_gfx_cfg = {
+ .name = "slv_gfx_cfg",
+ .id = MSM8916_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = MSM8916_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = MSM8916_SLAVE_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node slv_mpm = {
+ .name = "slv_mpm",
+ .id = MSM8916_SLAVE_MPM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_msg_ram = {
+ .name = "slv_msg_ram",
+ .id = MSM8916_SLAVE_MSG_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_mss = {
+ .name = "slv_mss",
+ .id = MSM8916_SLAVE_MSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = MSM8916_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = MSM8916_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pcnoc_cfg = {
+ .name = "slv_pcnoc_cfg",
+ .id = MSM8916_SLAVE_PNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = MSM8916_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = MSM8916_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = MSM8916_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_rbcpr_cfg = {
+ .name = "slv_rbcpr_cfg",
+ .id = MSM8916_SLAVE_RBCPR_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = MSM8916_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = MSM8916_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_security = {
+ .name = "slv_security",
+ .id = MSM8916_SLAVE_SECURITY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = MSM8916_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = MSM8916_SLAVE_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = MSM8916_SLAVE_SRVC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = MSM8916_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = MSM8916_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = MSM8916_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = MSM8916_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 snoc_bimc_0_mas_links[] = {
+ MSM8916_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node snoc_bimc_0_mas = {
+ .name = "snoc_bimc_0_mas",
+ .id = MSM8916_SNOC_BIMC_0_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_bimc_0_mas_links),
+ .links = snoc_bimc_0_mas_links,
+};
+
+static const u16 snoc_bimc_0_slv_links[] = {
+ MSM8916_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node snoc_bimc_0_slv = {
+ .name = "snoc_bimc_0_slv",
+ .id = MSM8916_SNOC_BIMC_0_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(snoc_bimc_0_slv_links),
+ .links = snoc_bimc_0_slv_links,
+};
+
+static const u16 snoc_bimc_1_mas_links[] = {
+ MSM8916_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node snoc_bimc_1_mas = {
+ .name = "snoc_bimc_1_mas",
+ .id = MSM8916_SNOC_BIMC_1_MAS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_bimc_1_mas_links),
+ .links = snoc_bimc_1_mas_links,
+};
+
+static const u16 snoc_bimc_1_slv_links[] = {
+ MSM8916_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node snoc_bimc_1_slv = {
+ .name = "snoc_bimc_1_slv",
+ .id = MSM8916_SNOC_BIMC_1_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_bimc_1_slv_links),
+ .links = snoc_bimc_1_slv_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ MSM8916_SLAVE_QDSS_STM,
+ MSM8916_SLAVE_IMEM,
+ MSM8916_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = MSM8916_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ MSM8916_SLAVE_APSS,
+ MSM8916_SLAVE_CATS_128,
+ MSM8916_SLAVE_OCMEM_64
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = MSM8916_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_bimc_links[] = {
+ MSM8916_SNOC_BIMC_0_MAS
+};
+
+static struct qcom_icc_node snoc_int_bimc = {
+ .name = "snoc_int_bimc",
+ .id = MSM8916_SNOC_INT_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 101,
+ .slv_rpm_id = 132,
+ .num_links = ARRAY_SIZE(snoc_int_bimc_links),
+ .links = snoc_int_bimc_links,
+};
+
+static const u16 snoc_pcnoc_mas_links[] = {
+ MSM8916_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_pcnoc_mas = {
+ .name = "snoc_pcnoc_mas",
+ .id = MSM8916_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_pcnoc_mas_links),
+ .links = snoc_pcnoc_mas_links,
+};
+
+static const u16 snoc_pcnoc_slv_links[] = {
+ MSM8916_PNOC_INT_0
+};
+
+static struct qcom_icc_node snoc_pcnoc_slv = {
+ .name = "snoc_pcnoc_slv",
+ .id = MSM8916_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_pcnoc_slv_links),
+ .links = snoc_pcnoc_slv_links,
+};
static struct qcom_icc_node *msm8916_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
@@ -225,9 +1220,19 @@ static struct qcom_icc_node *msm8916_snoc_nodes[] = {
[SNOC_QDSS_INT] = &qdss_int,
};
+static const struct regmap_config msm8916_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14000,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8916_snoc = {
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
+ .regmap_cfg = &msm8916_snoc_regmap_config,
+ .qos_offset = 0x7000,
};
static struct qcom_icc_node *msm8916_bimc_nodes[] = {
@@ -242,9 +1247,20 @@ static struct qcom_icc_node *msm8916_bimc_nodes[] = {
[SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
};
+static const struct regmap_config msm8916_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8916_bimc = {
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
+ .is_bimc_node = true,
+ .regmap_cfg = &msm8916_bimc_regmap_config,
+ .qos_offset = 0x8000,
};
static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
@@ -300,17 +1316,21 @@ static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
};
+static const struct regmap_config msm8916_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11000,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8916_pcnoc = {
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
+ .regmap_cfg = &msm8916_pcnoc_regmap_config,
+ .qos_offset = 0x7000,
};
-static int msm8916_qnoc_probe(struct platform_device *pdev)
-{
- return qnoc_probe(pdev, sizeof(msm8916_bus_clocks),
- ARRAY_SIZE(msm8916_bus_clocks), msm8916_bus_clocks);
-}
-
static const struct of_device_id msm8916_noc_of_match[] = {
{ .compatible = "qcom,msm8916-bimc", .data = &msm8916_bimc },
{ .compatible = "qcom,msm8916-pcnoc", .data = &msm8916_pcnoc },
@@ -320,7 +1340,7 @@ static const struct of_device_id msm8916_noc_of_match[] = {
MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
static struct platform_driver msm8916_noc_driver = {
- .probe = msm8916_qnoc_probe,
+ .probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8916",
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index 20f31a1b4192..16272a477bd8 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/of_device.h>
#include <dt-bindings/interconnect/qcom,msm8939.h>
@@ -110,100 +111,1145 @@ enum {
MSM8939_SNOC_PNOC_SLV,
};
-static const struct clk_bulk_data msm8939_bus_clocks[] = {
- { .id = "bus" },
- { .id = "bus_a" },
-};
-
-DEFINE_QNODE(bimc_snoc_mas, MSM8939_BIMC_SNOC_MAS, 8, -1, -1, MSM8939_BIMC_SNOC_SLV);
-DEFINE_QNODE(bimc_snoc_slv, MSM8939_BIMC_SNOC_SLV, 16, -1, 2, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_1);
-DEFINE_QNODE(mas_apss, MSM8939_MASTER_AMPSS_M0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_audio, MSM8939_MASTER_LPASS, 4, -1, -1, MSM8939_PNOC_MAS_0);
-DEFINE_QNODE(mas_blsp_1, MSM8939_MASTER_BLSP_1, 4, -1, -1, MSM8939_PNOC_MAS_1);
-DEFINE_QNODE(mas_dehr, MSM8939_MASTER_DEHR, 4, -1, -1, MSM8939_PNOC_MAS_0);
-DEFINE_QNODE(mas_gfx, MSM8939_MASTER_GRAPHICS_3D, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_jpeg, MSM8939_MASTER_JPEG, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_mdp0, MSM8939_MASTER_MDP_PORT0, 16, -1, -1, MSM8939_SNOC_MM_INT_1, MSM8939_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_mdp1, MSM8939_MASTER_MDP_PORT1, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_cpp, MSM8939_MASTER_CPP, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8939_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8939_PNOC_INT_1);
-DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_INT_1);
-DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
-DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC);
-DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
-DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
-DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
-DEFINE_QNODE(mas_usb_hs2, MSM8939_MASTER_USB_HS2, 4, -1, -1, MSM8939_PNOC_MAS_1);
-DEFINE_QNODE(mas_vfe, MSM8939_MASTER_VFE, 16, -1, -1, MSM8939_SNOC_MM_INT_1, MSM8939_SNOC_MM_INT_2);
-DEFINE_QNODE(mas_video, MSM8939_MASTER_VIDEO_P0, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
-DEFINE_QNODE(mm_int_0, MSM8939_SNOC_MM_INT_0, 16, -1, -1, MSM8939_SNOC_BIMC_2_MAS);
-DEFINE_QNODE(mm_int_1, MSM8939_SNOC_MM_INT_1, 16, -1, -1, MSM8939_SNOC_BIMC_1_MAS);
-DEFINE_QNODE(mm_int_2, MSM8939_SNOC_MM_INT_2, 16, -1, -1, MSM8939_SNOC_INT_0);
-DEFINE_QNODE(pcnoc_int_0, MSM8939_PNOC_INT_0, 8, -1, -1, MSM8939_PNOC_SNOC_MAS, MSM8939_PNOC_SLV_0, MSM8939_PNOC_SLV_1, MSM8939_PNOC_SLV_2, MSM8939_PNOC_SLV_3, MSM8939_PNOC_SLV_4, MSM8939_PNOC_SLV_8, MSM8939_PNOC_SLV_9);
-DEFINE_QNODE(pcnoc_int_1, MSM8939_PNOC_INT_1, 8, -1, -1, MSM8939_PNOC_SNOC_MAS);
-DEFINE_QNODE(pcnoc_m_0, MSM8939_PNOC_MAS_0, 8, -1, -1, MSM8939_PNOC_INT_0);
-DEFINE_QNODE(pcnoc_m_1, MSM8939_PNOC_MAS_1, 8, -1, -1, MSM8939_PNOC_SNOC_MAS);
-DEFINE_QNODE(pcnoc_s_0, MSM8939_PNOC_SLV_0, 4, -1, -1, MSM8939_SLAVE_CLK_CTL, MSM8939_SLAVE_TLMM, MSM8939_SLAVE_TCSR, MSM8939_SLAVE_SECURITY, MSM8939_SLAVE_MSS);
-DEFINE_QNODE(pcnoc_s_1, MSM8939_PNOC_SLV_1, 4, -1, -1, MSM8939_SLAVE_IMEM_CFG, MSM8939_SLAVE_CRYPTO_0_CFG, MSM8939_SLAVE_MSG_RAM, MSM8939_SLAVE_PDM, MSM8939_SLAVE_PRNG);
-DEFINE_QNODE(pcnoc_s_2, MSM8939_PNOC_SLV_2, 4, -1, -1, MSM8939_SLAVE_SPDM, MSM8939_SLAVE_BOOT_ROM, MSM8939_SLAVE_BIMC_CFG, MSM8939_SLAVE_PNOC_CFG, MSM8939_SLAVE_PMIC_ARB);
-DEFINE_QNODE(pcnoc_s_3, MSM8939_PNOC_SLV_3, 4, -1, -1, MSM8939_SLAVE_MPM, MSM8939_SLAVE_SNOC_CFG, MSM8939_SLAVE_RBCPR_CFG, MSM8939_SLAVE_QDSS_CFG, MSM8939_SLAVE_DEHR_CFG);
-DEFINE_QNODE(pcnoc_s_4, MSM8939_PNOC_SLV_4, 4, -1, -1, MSM8939_SLAVE_VENUS_CFG, MSM8939_SLAVE_CAMERA_CFG, MSM8939_SLAVE_DISPLAY_CFG);
-DEFINE_QNODE(pcnoc_s_8, MSM8939_PNOC_SLV_8, 4, -1, -1, MSM8939_SLAVE_USB_HS1, MSM8939_SLAVE_SDCC_1, MSM8939_SLAVE_BLSP_1);
-DEFINE_QNODE(pcnoc_s_9, MSM8939_PNOC_SLV_9, 4, -1, -1, MSM8939_SLAVE_SDCC_2, MSM8939_SLAVE_LPASS, MSM8939_SLAVE_USB_HS2);
-DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC_SLV);
-DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
-DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
-DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
-DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0);
-DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0);
-DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
-DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_display_cfg, MSM8939_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_ebi_ch0, MSM8939_SLAVE_EBI_CH0, 16, -1, 0, 0);
-DEFINE_QNODE(slv_gfx_cfg, MSM8939_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_imem_cfg, MSM8939_SLAVE_IMEM_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_imem, MSM8939_SLAVE_IMEM, 8, -1, 26, 0);
-DEFINE_QNODE(slv_mpm, MSM8939_SLAVE_MPM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_msg_ram, MSM8939_SLAVE_MSG_RAM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_mss, MSM8939_SLAVE_MSS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pdm, MSM8939_SLAVE_PDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pmic_arb, MSM8939_SLAVE_PMIC_ARB, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pcnoc_cfg, MSM8939_SLAVE_PNOC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_prng, MSM8939_SLAVE_PRNG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_qdss_cfg, MSM8939_SLAVE_QDSS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_qdss_stm, MSM8939_SLAVE_QDSS_STM, 4, -1, 30, 0);
-DEFINE_QNODE(slv_rbcpr_cfg, MSM8939_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_sdcc_1, MSM8939_SLAVE_SDCC_1, 4, -1, -1, 0);
-DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
-DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
-DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
-DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
-DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
-DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
-DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV);
-DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
-DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
-DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
-DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
-DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
-DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
-DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
-DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
-DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
+static const u16 bimc_snoc_mas_links[] = {
+ MSM8939_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node bimc_snoc_mas = {
+ .name = "bimc_snoc_mas",
+ .id = MSM8939_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(bimc_snoc_mas_links),
+ .links = bimc_snoc_mas_links,
+};
+
+static const u16 bimc_snoc_slv_links[] = {
+ MSM8939_SNOC_INT_0,
+ MSM8939_SNOC_INT_1
+};
+
+static struct qcom_icc_node bimc_snoc_slv = {
+ .name = "bimc_snoc_slv",
+ .id = MSM8939_BIMC_SNOC_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(bimc_snoc_slv_links),
+ .links = bimc_snoc_slv_links,
+};
+
+static const u16 mas_apss_links[] = {
+ MSM8939_SLAVE_EBI_CH0,
+ MSM8939_BIMC_SNOC_MAS,
+ MSM8939_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_apss = {
+ .name = "mas_apss",
+ .id = MSM8939_MASTER_AMPSS_M0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apss_links),
+ .links = mas_apss_links,
+};
+
+static const u16 mas_audio_links[] = {
+ MSM8939_PNOC_MAS_0
+};
+
+static struct qcom_icc_node mas_audio = {
+ .name = "mas_audio",
+ .id = MSM8939_MASTER_LPASS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_audio_links),
+ .links = mas_audio_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ MSM8939_PNOC_MAS_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = MSM8939_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_dehr_links[] = {
+ MSM8939_PNOC_MAS_0
+};
+
+static struct qcom_icc_node mas_dehr = {
+ .name = "mas_dehr",
+ .id = MSM8939_MASTER_DEHR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_dehr_links),
+ .links = mas_dehr_links,
+};
+
+static const u16 mas_gfx_links[] = {
+ MSM8939_SLAVE_EBI_CH0,
+ MSM8939_BIMC_SNOC_MAS,
+ MSM8939_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_gfx = {
+ .name = "mas_gfx",
+ .id = MSM8939_MASTER_GRAPHICS_3D,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_gfx_links),
+ .links = mas_gfx_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ MSM8939_SNOC_MM_INT_0,
+ MSM8939_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = MSM8939_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_mdp0_links[] = {
+ MSM8939_SNOC_MM_INT_1,
+ MSM8939_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_mdp0 = {
+ .name = "mas_mdp0",
+ .id = MSM8939_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp0_links),
+ .links = mas_mdp0_links,
+};
+
+static const u16 mas_mdp1_links[] = {
+ MSM8939_SNOC_MM_INT_0,
+ MSM8939_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_mdp1 = {
+ .name = "mas_mdp1",
+ .id = MSM8939_MASTER_MDP_PORT1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 13,
+ .num_links = ARRAY_SIZE(mas_mdp1_links),
+ .links = mas_mdp1_links,
+};
+
+static const u16 mas_cpp_links[] = {
+ MSM8939_SNOC_MM_INT_0,
+ MSM8939_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = MSM8939_MASTER_CPP,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_cpp_links),
+ .links = mas_cpp_links,
+};
+
+static const u16 mas_pcnoc_crypto_0_links[] = {
+ MSM8939_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcnoc_crypto_0 = {
+ .name = "mas_pcnoc_crypto_0",
+ .id = MSM8939_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_crypto_0_links),
+ .links = mas_pcnoc_crypto_0_links,
+};
+
+static const u16 mas_pcnoc_sdcc_1_links[] = {
+ MSM8939_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcnoc_sdcc_1 = {
+ .name = "mas_pcnoc_sdcc_1",
+ .id = MSM8939_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_sdcc_1_links),
+ .links = mas_pcnoc_sdcc_1_links,
+};
+
+static const u16 mas_pcnoc_sdcc_2_links[] = {
+ MSM8939_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcnoc_sdcc_2 = {
+ .name = "mas_pcnoc_sdcc_2",
+ .id = MSM8939_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_sdcc_2_links),
+ .links = mas_pcnoc_sdcc_2_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ MSM8939_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = MSM8939_MASTER_QDSS_BAM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ MSM8939_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = MSM8939_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ MSM8939_SLAVE_SRVC_SNOC
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .name = "mas_snoc_cfg",
+ .id = MSM8939_MASTER_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ MSM8939_PNOC_MAS_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = MSM8939_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_tcu0_links[] = {
+ MSM8939_SLAVE_EBI_CH0,
+ MSM8939_BIMC_SNOC_MAS,
+ MSM8939_SLAVE_AMPSS_L2
+};
+
+static struct qcom_icc_node mas_tcu0 = {
+ .name = "mas_tcu0",
+ .id = MSM8939_MASTER_TCU0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_tcu0_links),
+ .links = mas_tcu0_links,
+};
+
+static const u16 mas_usb_hs1_links[] = {
+ MSM8939_PNOC_MAS_1
+};
+
+static struct qcom_icc_node mas_usb_hs1 = {
+ .name = "mas_usb_hs1",
+ .id = MSM8939_MASTER_USB_HS1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs1_links),
+ .links = mas_usb_hs1_links,
+};
+
+static const u16 mas_usb_hs2_links[] = {
+ MSM8939_PNOC_MAS_1
+};
+
+static struct qcom_icc_node mas_usb_hs2 = {
+ .name = "mas_usb_hs2",
+ .id = MSM8939_MASTER_USB_HS2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs2_links),
+ .links = mas_usb_hs2_links,
+};
+
+static const u16 mas_vfe_links[] = {
+ MSM8939_SNOC_MM_INT_1,
+ MSM8939_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = MSM8939_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe_links),
+ .links = mas_vfe_links,
+};
+
+static const u16 mas_video_links[] = {
+ MSM8939_SNOC_MM_INT_0,
+ MSM8939_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_video = {
+ .name = "mas_video",
+ .id = MSM8939_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_video_links),
+ .links = mas_video_links,
+};
+
+static const u16 mm_int_0_links[] = {
+ MSM8939_SNOC_BIMC_2_MAS
+};
+
+static struct qcom_icc_node mm_int_0 = {
+ .name = "mm_int_0",
+ .id = MSM8939_SNOC_MM_INT_0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_0_links),
+ .links = mm_int_0_links,
+};
+
+static const u16 mm_int_1_links[] = {
+ MSM8939_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node mm_int_1 = {
+ .name = "mm_int_1",
+ .id = MSM8939_SNOC_MM_INT_1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_1_links),
+ .links = mm_int_1_links,
+};
+
+static const u16 mm_int_2_links[] = {
+ MSM8939_SNOC_INT_0
+};
+
+static struct qcom_icc_node mm_int_2 = {
+ .name = "mm_int_2",
+ .id = MSM8939_SNOC_MM_INT_2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_2_links),
+ .links = mm_int_2_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ MSM8939_PNOC_SNOC_MAS,
+ MSM8939_PNOC_SLV_0,
+ MSM8939_PNOC_SLV_1,
+ MSM8939_PNOC_SLV_2,
+ MSM8939_PNOC_SLV_3,
+ MSM8939_PNOC_SLV_4,
+ MSM8939_PNOC_SLV_8,
+ MSM8939_PNOC_SLV_9
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = MSM8939_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ MSM8939_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = MSM8939_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ MSM8939_PNOC_INT_0
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = MSM8939_PNOC_MAS_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ MSM8939_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = MSM8939_PNOC_MAS_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ MSM8939_SLAVE_CLK_CTL,
+ MSM8939_SLAVE_TLMM,
+ MSM8939_SLAVE_TCSR,
+ MSM8939_SLAVE_SECURITY,
+ MSM8939_SLAVE_MSS
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = MSM8939_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ MSM8939_SLAVE_IMEM_CFG,
+ MSM8939_SLAVE_CRYPTO_0_CFG,
+ MSM8939_SLAVE_MSG_RAM,
+ MSM8939_SLAVE_PDM,
+ MSM8939_SLAVE_PRNG
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = MSM8939_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ MSM8939_SLAVE_SPDM,
+ MSM8939_SLAVE_BOOT_ROM,
+ MSM8939_SLAVE_BIMC_CFG,
+ MSM8939_SLAVE_PNOC_CFG,
+ MSM8939_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = MSM8939_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ MSM8939_SLAVE_MPM,
+ MSM8939_SLAVE_SNOC_CFG,
+ MSM8939_SLAVE_RBCPR_CFG,
+ MSM8939_SLAVE_QDSS_CFG,
+ MSM8939_SLAVE_DEHR_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = MSM8939_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ MSM8939_SLAVE_VENUS_CFG,
+ MSM8939_SLAVE_CAMERA_CFG,
+ MSM8939_SLAVE_DISPLAY_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = MSM8939_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ MSM8939_SLAVE_USB_HS1,
+ MSM8939_SLAVE_SDCC_1,
+ MSM8939_SLAVE_BLSP_1
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = MSM8939_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 pcnoc_s_9_links[] = {
+ MSM8939_SLAVE_SDCC_2,
+ MSM8939_SLAVE_LPASS,
+ MSM8939_SLAVE_USB_HS2
+};
+
+static struct qcom_icc_node pcnoc_s_9 = {
+ .name = "pcnoc_s_9",
+ .id = MSM8939_PNOC_SLV_9,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_9_links),
+ .links = pcnoc_s_9_links,
+};
+
+static const u16 pcnoc_snoc_mas_links[] = {
+ MSM8939_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_snoc_mas = {
+ .name = "pcnoc_snoc_mas",
+ .id = MSM8939_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_snoc_mas_links),
+ .links = pcnoc_snoc_mas_links,
+};
+
+static const u16 pcnoc_snoc_slv_links[] = {
+ MSM8939_SNOC_INT_0,
+ MSM8939_SNOC_INT_BIMC,
+ MSM8939_SNOC_INT_1
+};
+
+static struct qcom_icc_node pcnoc_snoc_slv = {
+ .name = "pcnoc_snoc_slv",
+ .id = MSM8939_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(pcnoc_snoc_slv_links),
+ .links = pcnoc_snoc_slv_links,
+};
+
+static const u16 qdss_int_links[] = {
+ MSM8939_SNOC_INT_0,
+ MSM8939_SNOC_INT_BIMC
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = MSM8939_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static struct qcom_icc_node slv_apps_l2 = {
+ .name = "slv_apps_l2",
+ .id = MSM8939_SLAVE_AMPSS_L2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_apss = {
+ .name = "slv_apss",
+ .id = MSM8939_SLAVE_APSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_audio = {
+ .name = "slv_audio",
+ .id = MSM8939_SLAVE_LPASS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = MSM8939_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = MSM8939_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_boot_rom = {
+ .name = "slv_boot_rom",
+ .id = MSM8939_SLAVE_BOOT_ROM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = MSM8939_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = MSM8939_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = MSM8939_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = MSM8939_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = MSM8939_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_dehr_cfg = {
+ .name = "slv_dehr_cfg",
+ .id = MSM8939_SLAVE_DEHR_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = MSM8939_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_ebi_ch0 = {
+ .name = "slv_ebi_ch0",
+ .id = MSM8939_SLAVE_EBI_CH0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static struct qcom_icc_node slv_gfx_cfg = {
+ .name = "slv_gfx_cfg",
+ .id = MSM8939_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = MSM8939_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = MSM8939_SLAVE_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node slv_mpm = {
+ .name = "slv_mpm",
+ .id = MSM8939_SLAVE_MPM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_msg_ram = {
+ .name = "slv_msg_ram",
+ .id = MSM8939_SLAVE_MSG_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_mss = {
+ .name = "slv_mss",
+ .id = MSM8939_SLAVE_MSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = MSM8939_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = MSM8939_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pcnoc_cfg = {
+ .name = "slv_pcnoc_cfg",
+ .id = MSM8939_SLAVE_PNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = MSM8939_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = MSM8939_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = MSM8939_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_rbcpr_cfg = {
+ .name = "slv_rbcpr_cfg",
+ .id = MSM8939_SLAVE_RBCPR_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = MSM8939_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = MSM8939_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_security = {
+ .name = "slv_security",
+ .id = MSM8939_SLAVE_SECURITY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = MSM8939_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = MSM8939_SLAVE_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = MSM8939_SLAVE_SRVC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = MSM8939_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = MSM8939_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_usb_hs1 = {
+ .name = "slv_usb_hs1",
+ .id = MSM8939_SLAVE_USB_HS1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_usb_hs2 = {
+ .name = "slv_usb_hs2",
+ .id = MSM8939_SLAVE_USB_HS2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = MSM8939_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 snoc_bimc_0_mas_links[] = {
+ MSM8939_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node snoc_bimc_0_mas = {
+ .name = "snoc_bimc_0_mas",
+ .id = MSM8939_SNOC_BIMC_0_MAS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_bimc_0_mas_links),
+ .links = snoc_bimc_0_mas_links,
+};
+
+static const u16 snoc_bimc_0_slv_links[] = {
+ MSM8939_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node snoc_bimc_0_slv = {
+ .name = "snoc_bimc_0_slv",
+ .id = MSM8939_SNOC_BIMC_0_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_bimc_0_slv_links),
+ .links = snoc_bimc_0_slv_links,
+};
+
+static const u16 snoc_bimc_1_mas_links[] = {
+ MSM8939_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node snoc_bimc_1_mas = {
+ .name = "snoc_bimc_1_mas",
+ .id = MSM8939_SNOC_BIMC_1_MAS,
+ .buswidth = 16,
+ .mas_rpm_id = 76,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_bimc_1_mas_links),
+ .links = snoc_bimc_1_mas_links,
+};
+
+static const u16 snoc_bimc_1_slv_links[] = {
+ MSM8939_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node snoc_bimc_1_slv = {
+ .name = "snoc_bimc_1_slv",
+ .id = MSM8939_SNOC_BIMC_1_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 104,
+ .num_links = ARRAY_SIZE(snoc_bimc_1_slv_links),
+ .links = snoc_bimc_1_slv_links,
+};
+
+static const u16 snoc_bimc_2_mas_links[] = {
+ MSM8939_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node snoc_bimc_2_mas = {
+ .name = "snoc_bimc_2_mas",
+ .id = MSM8939_SNOC_BIMC_2_MAS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_bimc_2_mas_links),
+ .links = snoc_bimc_2_mas_links,
+};
+
+static const u16 snoc_bimc_2_slv_links[] = {
+ MSM8939_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node snoc_bimc_2_slv = {
+ .name = "snoc_bimc_2_slv",
+ .id = MSM8939_SNOC_BIMC_2_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_bimc_2_slv_links),
+ .links = snoc_bimc_2_slv_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ MSM8939_SLAVE_QDSS_STM,
+ MSM8939_SLAVE_IMEM,
+ MSM8939_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = MSM8939_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ MSM8939_SLAVE_APSS,
+ MSM8939_SLAVE_CATS_128,
+ MSM8939_SLAVE_OCMEM_64
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = MSM8939_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_bimc_links[] = {
+ MSM8939_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node snoc_int_bimc = {
+ .name = "snoc_int_bimc",
+ .id = MSM8939_SNOC_INT_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 101,
+ .slv_rpm_id = 132,
+ .num_links = ARRAY_SIZE(snoc_int_bimc_links),
+ .links = snoc_int_bimc_links,
+};
+
+static const u16 snoc_pcnoc_mas_links[] = {
+ MSM8939_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_pcnoc_mas = {
+ .name = "snoc_pcnoc_mas",
+ .id = MSM8939_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_pcnoc_mas_links),
+ .links = snoc_pcnoc_mas_links,
+};
+
+static const u16 snoc_pcnoc_slv_links[] = {
+ MSM8939_PNOC_INT_0
+};
+
+static struct qcom_icc_node snoc_pcnoc_slv = {
+ .name = "snoc_pcnoc_slv",
+ .id = MSM8939_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(snoc_pcnoc_slv_links),
+ .links = snoc_pcnoc_slv_links,
+};
static struct qcom_icc_node *msm8939_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
@@ -227,9 +1273,19 @@ static struct qcom_icc_node *msm8939_snoc_nodes[] = {
[SNOC_QDSS_INT] = &qdss_int,
};
+static const struct regmap_config msm8939_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14080,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8939_snoc = {
.nodes = msm8939_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
+ .regmap_cfg = &msm8939_snoc_regmap_config,
+ .qos_offset = 0x7000,
};
static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
@@ -244,9 +1300,19 @@ static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
[SNOC_MM_INT_2] = &mm_int_2,
};
+static const struct regmap_config msm8939_snoc_mm_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14080,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8939_snoc_mm = {
.nodes = msm8939_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
+ .regmap_cfg = &msm8939_snoc_mm_regmap_config,
+ .qos_offset = 0x7000,
};
static struct qcom_icc_node *msm8939_bimc_nodes[] = {
@@ -261,9 +1327,20 @@ static struct qcom_icc_node *msm8939_bimc_nodes[] = {
[SNOC_BIMC_2_SLV] = &snoc_bimc_2_slv,
};
+static const struct regmap_config msm8939_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8939_bimc = {
.nodes = msm8939_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
+ .is_bimc_node = true,
+ .regmap_cfg = &msm8939_bimc_regmap_config,
+ .qos_offset = 0x8000,
};
static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
@@ -321,17 +1398,21 @@ static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
};
+static const struct regmap_config msm8939_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11000,
+ .fast_io = true,
+};
+
static struct qcom_icc_desc msm8939_pcnoc = {
.nodes = msm8939_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
+ .regmap_cfg = &msm8939_pcnoc_regmap_config,
+ .qos_offset = 0x7000,
};
-static int msm8939_qnoc_probe(struct platform_device *pdev)
-{
- return qnoc_probe(pdev, sizeof(msm8939_bus_clocks),
- ARRAY_SIZE(msm8939_bus_clocks), msm8939_bus_clocks);
-}
-
static const struct of_device_id msm8939_noc_of_match[] = {
{ .compatible = "qcom,msm8939-bimc", .data = &msm8939_bimc },
{ .compatible = "qcom,msm8939-pcnoc", .data = &msm8939_pcnoc },
@@ -342,7 +1423,7 @@ static const struct of_device_id msm8939_noc_of_match[] = {
MODULE_DEVICE_TABLE(of, msm8939_noc_of_match);
static struct platform_driver msm8939_noc_driver = {
- .probe = msm8939_qnoc_probe,
+ .probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8939",
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 36a7e30a00be..416c8bff8efa 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -92,84 +92,887 @@ enum {
QCS404_SLAVE_LPASS,
};
-static const struct clk_bulk_data qcs404_bus_clocks[] = {
- { .id = "bus" },
- { .id = "bus_a" },
-};
-
-DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
-DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
-DEFINE_QNODE(mas_blsp_1, QCS404_MASTER_BLSP_1, 4, 41, -1, QCS404_PNOC_INT_3);
-DEFINE_QNODE(mas_blsp_2, QCS404_MASTER_BLSP_2, 4, 39, -1, QCS404_PNOC_INT_3);
-DEFINE_QNODE(mas_xi_usb_hs1, QCS404_MASTER_XM_USB_HS1, 8, 138, -1, QCS404_PNOC_INT_0);
-DEFINE_QNODE(mas_crypto, QCS404_MASTER_CRYPTO_CORE0, 8, 23, -1, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
-DEFINE_QNODE(mas_sdcc_1, QCS404_MASTER_SDCC_1, 8, 33, -1, QCS404_PNOC_INT_0);
-DEFINE_QNODE(mas_sdcc_2, QCS404_MASTER_SDCC_2, 8, 35, -1, QCS404_PNOC_INT_0);
-DEFINE_QNODE(mas_snoc_pcnoc, QCS404_SNOC_PNOC_MAS, 8, 77, -1, QCS404_PNOC_INT_2);
-DEFINE_QNODE(mas_qpic, QCS404_MASTER_QPIC, 4, -1, -1, QCS404_PNOC_INT_0);
-DEFINE_QNODE(mas_qdss_bam, QCS404_MASTER_QDSS_BAM, 4, -1, -1, QCS404_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_bimc_snoc, QCS404_BIMC_SNOC_MAS, 8, 21, -1, QCS404_SLAVE_OCMEM_64, QCS404_SLAVE_CATS_128, QCS404_SNOC_INT_0, QCS404_SNOC_INT_1);
-DEFINE_QNODE(mas_pcnoc_snoc, QCS404_PNOC_SNOC_MAS, 8, 29, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_2, QCS404_SNOC_INT_0);
-DEFINE_QNODE(mas_qdss_etr, QCS404_MASTER_QDSS_ETR, 8, -1, -1, QCS404_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_emac, QCS404_MASTER_EMAC, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
-DEFINE_QNODE(mas_pcie, QCS404_MASTER_PCIE, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
-DEFINE_QNODE(mas_usb3, QCS404_MASTER_USB3, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
-DEFINE_QNODE(pcnoc_int_0, QCS404_PNOC_INT_0, 8, 85, 114, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
-DEFINE_QNODE(pcnoc_int_2, QCS404_PNOC_INT_2, 8, 124, 184, QCS404_PNOC_SLV_10, QCS404_SLAVE_TCU, QCS404_PNOC_SLV_11, QCS404_PNOC_SLV_2, QCS404_PNOC_SLV_3, QCS404_PNOC_SLV_0, QCS404_PNOC_SLV_1, QCS404_PNOC_SLV_6, QCS404_PNOC_SLV_7, QCS404_PNOC_SLV_4, QCS404_PNOC_SLV_8, QCS404_PNOC_SLV_9);
-DEFINE_QNODE(pcnoc_int_3, QCS404_PNOC_INT_3, 8, 125, 185, QCS404_PNOC_SNOC_SLV);
-DEFINE_QNODE(pcnoc_s_0, QCS404_PNOC_SLV_0, 4, 89, 118, QCS404_SLAVE_PRNG, QCS404_SLAVE_SPDM_WRAPPER, QCS404_SLAVE_PDM);
-DEFINE_QNODE(pcnoc_s_1, QCS404_PNOC_SLV_1, 4, 90, 119, QCS404_SLAVE_TCSR);
-DEFINE_QNODE(pcnoc_s_2, QCS404_PNOC_SLV_2, 4, -1, -1, QCS404_SLAVE_GRAPHICS_3D_CFG);
-DEFINE_QNODE(pcnoc_s_3, QCS404_PNOC_SLV_3, 4, 92, 121, QCS404_SLAVE_MESSAGE_RAM);
-DEFINE_QNODE(pcnoc_s_4, QCS404_PNOC_SLV_4, 4, 93, 122, QCS404_SLAVE_SNOC_CFG);
-DEFINE_QNODE(pcnoc_s_6, QCS404_PNOC_SLV_6, 4, 94, 123, QCS404_SLAVE_BLSP_1, QCS404_SLAVE_TLMM_NORTH, QCS404_SLAVE_EMAC_CFG);
-DEFINE_QNODE(pcnoc_s_7, QCS404_PNOC_SLV_7, 4, 95, 124, QCS404_SLAVE_TLMM_SOUTH, QCS404_SLAVE_DISPLAY_CFG, QCS404_SLAVE_SDCC_1, QCS404_SLAVE_PCIE_1, QCS404_SLAVE_SDCC_2);
-DEFINE_QNODE(pcnoc_s_8, QCS404_PNOC_SLV_8, 4, 96, 125, QCS404_SLAVE_CRYPTO_0_CFG);
-DEFINE_QNODE(pcnoc_s_9, QCS404_PNOC_SLV_9, 4, 97, 126, QCS404_SLAVE_BLSP_2, QCS404_SLAVE_TLMM_EAST, QCS404_SLAVE_PMIC_ARB);
-DEFINE_QNODE(pcnoc_s_10, QCS404_PNOC_SLV_10, 4, 157, -1, QCS404_SLAVE_USB_HS);
-DEFINE_QNODE(pcnoc_s_11, QCS404_PNOC_SLV_11, 4, 158, 246, QCS404_SLAVE_USB3);
-DEFINE_QNODE(qdss_int, QCS404_SNOC_QDSS_INT, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
-DEFINE_QNODE(snoc_int_0, QCS404_SNOC_INT_0, 8, 99, 130, QCS404_SLAVE_LPASS, QCS404_SLAVE_APPSS, QCS404_SLAVE_WCSS);
-DEFINE_QNODE(snoc_int_1, QCS404_SNOC_INT_1, 8, 100, 131, QCS404_SNOC_PNOC_SLV, QCS404_SNOC_INT_2);
-DEFINE_QNODE(snoc_int_2, QCS404_SNOC_INT_2, 8, 134, 197, QCS404_SLAVE_QDSS_STM, QCS404_SLAVE_OCIMEM);
-DEFINE_QNODE(slv_ebi, QCS404_SLAVE_EBI_CH0, 8, -1, 0, 0);
-DEFINE_QNODE(slv_bimc_snoc, QCS404_BIMC_SNOC_SLV, 8, -1, 2, QCS404_BIMC_SNOC_MAS);
-DEFINE_QNODE(slv_spdm, QCS404_SLAVE_SPDM_WRAPPER, 4, -1, -1, 0);
-DEFINE_QNODE(slv_pdm, QCS404_SLAVE_PDM, 4, -1, 41, 0);
-DEFINE_QNODE(slv_prng, QCS404_SLAVE_PRNG, 4, -1, 44, 0);
-DEFINE_QNODE(slv_tcsr, QCS404_SLAVE_TCSR, 4, -1, 50, 0);
-DEFINE_QNODE(slv_snoc_cfg, QCS404_SLAVE_SNOC_CFG, 4, -1, 70, 0);
-DEFINE_QNODE(slv_message_ram, QCS404_SLAVE_MESSAGE_RAM, 4, -1, 55, 0);
-DEFINE_QNODE(slv_disp_ss_cfg, QCS404_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_gpu_cfg, QCS404_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_blsp_1, QCS404_SLAVE_BLSP_1, 4, -1, 39, 0);
-DEFINE_QNODE(slv_tlmm_north, QCS404_SLAVE_TLMM_NORTH, 4, -1, 214, 0);
-DEFINE_QNODE(slv_pcie, QCS404_SLAVE_PCIE_1, 4, -1, -1, 0);
-DEFINE_QNODE(slv_ethernet, QCS404_SLAVE_EMAC_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_blsp_2, QCS404_SLAVE_BLSP_2, 4, -1, 37, 0);
-DEFINE_QNODE(slv_tlmm_east, QCS404_SLAVE_TLMM_EAST, 4, -1, 213, 0);
-DEFINE_QNODE(slv_tcu, QCS404_SLAVE_TCU, 8, -1, -1, 0);
-DEFINE_QNODE(slv_pmic_arb, QCS404_SLAVE_PMIC_ARB, 4, -1, 59, 0);
-DEFINE_QNODE(slv_sdcc_1, QCS404_SLAVE_SDCC_1, 4, -1, 31, 0);
-DEFINE_QNODE(slv_sdcc_2, QCS404_SLAVE_SDCC_2, 4, -1, 33, 0);
-DEFINE_QNODE(slv_tlmm_south, QCS404_SLAVE_TLMM_SOUTH, 4, -1, -1, 0);
-DEFINE_QNODE(slv_usb_hs, QCS404_SLAVE_USB_HS, 4, -1, 40, 0);
-DEFINE_QNODE(slv_usb3, QCS404_SLAVE_USB3, 4, -1, 22, 0);
-DEFINE_QNODE(slv_crypto_0_cfg, QCS404_SLAVE_CRYPTO_0_CFG, 4, -1, 52, 0);
-DEFINE_QNODE(slv_pcnoc_snoc, QCS404_PNOC_SNOC_SLV, 8, -1, 45, QCS404_PNOC_SNOC_MAS);
-DEFINE_QNODE(slv_kpss_ahb, QCS404_SLAVE_APPSS, 4, -1, -1, 0);
-DEFINE_QNODE(slv_wcss, QCS404_SLAVE_WCSS, 4, -1, 23, 0);
-DEFINE_QNODE(slv_snoc_bimc_1, QCS404_SNOC_BIMC_1_SLV, 8, -1, 104, QCS404_SNOC_BIMC_1_MAS);
-DEFINE_QNODE(slv_imem, QCS404_SLAVE_OCIMEM, 8, -1, 26, 0);
-DEFINE_QNODE(slv_snoc_pcnoc, QCS404_SNOC_PNOC_SLV, 8, -1, 28, QCS404_SNOC_PNOC_MAS);
-DEFINE_QNODE(slv_qdss_stm, QCS404_SLAVE_QDSS_STM, 4, -1, 30, 0);
-DEFINE_QNODE(slv_cats_0, QCS404_SLAVE_CATS_128, 16, -1, -1, 0);
-DEFINE_QNODE(slv_cats_1, QCS404_SLAVE_OCMEM_64, 8, -1, -1, 0);
-DEFINE_QNODE(slv_lpass, QCS404_SLAVE_LPASS, 4, -1, -1, 0);
+static const u16 mas_apps_proc_links[] = {
+ QCS404_SLAVE_EBI_CH0,
+ QCS404_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QCS404_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QCS404_SLAVE_EBI_CH0,
+ QCS404_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QCS404_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_mdp_links[] = {
+ QCS404_SLAVE_EBI_CH0,
+ QCS404_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_mdp = {
+ .name = "mas_mdp",
+ .id = QCS404_MASTER_MDP_PORT0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_mdp_links),
+ .links = mas_mdp_links,
+};
+
+static const u16 mas_snoc_bimc_1_links[] = {
+ QCS404_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_1 = {
+ .name = "mas_snoc_bimc_1",
+ .id = QCS404_SNOC_BIMC_1_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 76,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
+ .links = mas_snoc_bimc_1_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QCS404_SLAVE_EBI_CH0,
+ QCS404_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QCS404_MASTER_TCU_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QCS404_PNOC_INT_3
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QCS404_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QCS404_PNOC_INT_3
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QCS404_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_blsp_2_links[] = {
+ QCS404_PNOC_INT_3
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = QCS404_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_2_links),
+ .links = mas_blsp_2_links,
+};
+
+static const u16 mas_xi_usb_hs1_links[] = {
+ QCS404_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_xi_usb_hs1 = {
+ .name = "mas_xi_usb_hs1",
+ .id = QCS404_MASTER_XM_USB_HS1,
+ .buswidth = 8,
+ .mas_rpm_id = 138,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_xi_usb_hs1_links),
+ .links = mas_xi_usb_hs1_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QCS404_PNOC_SNOC_SLV,
+ QCS404_PNOC_INT_2
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QCS404_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QCS404_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QCS404_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QCS404_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QCS404_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QCS404_PNOC_INT_2
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QCS404_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_qpic_links[] = {
+ QCS404_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_qpic = {
+ .name = "mas_qpic",
+ .id = QCS404_MASTER_QPIC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qpic_links),
+ .links = mas_qpic_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QCS404_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QCS404_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QCS404_SLAVE_OCMEM_64,
+ QCS404_SLAVE_CATS_128,
+ QCS404_SNOC_INT_0,
+ QCS404_SNOC_INT_1
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QCS404_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SNOC_INT_2,
+ QCS404_SNOC_INT_0
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QCS404_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QCS404_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QCS404_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_emac_links[] = {
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SNOC_INT_1
+};
+
+static struct qcom_icc_node mas_emac = {
+ .name = "mas_emac",
+ .id = QCS404_MASTER_EMAC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_emac_links),
+ .links = mas_emac_links,
+};
+
+static const u16 mas_pcie_links[] = {
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SNOC_INT_1
+};
+
+static struct qcom_icc_node mas_pcie = {
+ .name = "mas_pcie",
+ .id = QCS404_MASTER_PCIE,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pcie_links),
+ .links = mas_pcie_links,
+};
+
+static const u16 mas_usb3_links[] = {
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SNOC_INT_1
+};
+
+static struct qcom_icc_node mas_usb3 = {
+ .name = "mas_usb3",
+ .id = QCS404_MASTER_USB3,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb3_links),
+ .links = mas_usb3_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QCS404_PNOC_SNOC_SLV,
+ QCS404_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QCS404_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 85,
+ .slv_rpm_id = 114,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_2_links[] = {
+ QCS404_PNOC_SLV_10,
+ QCS404_SLAVE_TCU,
+ QCS404_PNOC_SLV_11,
+ QCS404_PNOC_SLV_2,
+ QCS404_PNOC_SLV_3,
+ QCS404_PNOC_SLV_0,
+ QCS404_PNOC_SLV_1,
+ QCS404_PNOC_SLV_6,
+ QCS404_PNOC_SLV_7,
+ QCS404_PNOC_SLV_4,
+ QCS404_PNOC_SLV_8,
+ QCS404_PNOC_SLV_9
+};
+
+static struct qcom_icc_node pcnoc_int_2 = {
+ .name = "pcnoc_int_2",
+ .id = QCS404_PNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 124,
+ .slv_rpm_id = 184,
+ .num_links = ARRAY_SIZE(pcnoc_int_2_links),
+ .links = pcnoc_int_2_links,
+};
+
+static const u16 pcnoc_int_3_links[] = {
+ QCS404_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_int_3 = {
+ .name = "pcnoc_int_3",
+ .id = QCS404_PNOC_INT_3,
+ .buswidth = 8,
+ .mas_rpm_id = 125,
+ .slv_rpm_id = 185,
+ .num_links = ARRAY_SIZE(pcnoc_int_3_links),
+ .links = pcnoc_int_3_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ QCS404_SLAVE_PRNG,
+ QCS404_SLAVE_SPDM_WRAPPER,
+ QCS404_SLAVE_PDM
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = QCS404_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = 89,
+ .slv_rpm_id = 118,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QCS404_SLAVE_TCSR
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QCS404_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QCS404_SLAVE_GRAPHICS_3D_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QCS404_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QCS404_SLAVE_MESSAGE_RAM
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QCS404_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QCS404_SLAVE_SNOC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QCS404_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_6_links[] = {
+ QCS404_SLAVE_BLSP_1,
+ QCS404_SLAVE_TLMM_NORTH,
+ QCS404_SLAVE_EMAC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_6 = {
+ .name = "pcnoc_s_6",
+ .id = QCS404_PNOC_SLV_6,
+ .buswidth = 4,
+ .mas_rpm_id = 94,
+ .slv_rpm_id = 123,
+ .num_links = ARRAY_SIZE(pcnoc_s_6_links),
+ .links = pcnoc_s_6_links,
+};
+
+static const u16 pcnoc_s_7_links[] = {
+ QCS404_SLAVE_TLMM_SOUTH,
+ QCS404_SLAVE_DISPLAY_CFG,
+ QCS404_SLAVE_SDCC_1,
+ QCS404_SLAVE_PCIE_1,
+ QCS404_SLAVE_SDCC_2
+};
+
+static struct qcom_icc_node pcnoc_s_7 = {
+ .name = "pcnoc_s_7",
+ .id = QCS404_PNOC_SLV_7,
+ .buswidth = 4,
+ .mas_rpm_id = 95,
+ .slv_rpm_id = 124,
+ .num_links = ARRAY_SIZE(pcnoc_s_7_links),
+ .links = pcnoc_s_7_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ QCS404_SLAVE_CRYPTO_0_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = QCS404_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = 96,
+ .slv_rpm_id = 125,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 pcnoc_s_9_links[] = {
+ QCS404_SLAVE_BLSP_2,
+ QCS404_SLAVE_TLMM_EAST,
+ QCS404_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_9 = {
+ .name = "pcnoc_s_9",
+ .id = QCS404_PNOC_SLV_9,
+ .buswidth = 4,
+ .mas_rpm_id = 97,
+ .slv_rpm_id = 126,
+ .num_links = ARRAY_SIZE(pcnoc_s_9_links),
+ .links = pcnoc_s_9_links,
+};
+
+static const u16 pcnoc_s_10_links[] = {
+ QCS404_SLAVE_USB_HS
+};
+
+static struct qcom_icc_node pcnoc_s_10 = {
+ .name = "pcnoc_s_10",
+ .id = QCS404_PNOC_SLV_10,
+ .buswidth = 4,
+ .mas_rpm_id = 157,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_s_10_links),
+ .links = pcnoc_s_10_links,
+};
+
+static const u16 pcnoc_s_11_links[] = {
+ QCS404_SLAVE_USB3
+};
+
+static struct qcom_icc_node pcnoc_s_11 = {
+ .name = "pcnoc_s_11",
+ .id = QCS404_PNOC_SLV_11,
+ .buswidth = 4,
+ .mas_rpm_id = 158,
+ .slv_rpm_id = 246,
+ .num_links = ARRAY_SIZE(pcnoc_s_11_links),
+ .links = pcnoc_s_11_links,
+};
+
+static const u16 qdss_int_links[] = {
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SNOC_INT_1
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QCS404_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QCS404_SLAVE_LPASS,
+ QCS404_SLAVE_APPSS,
+ QCS404_SLAVE_WCSS
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QCS404_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QCS404_SNOC_PNOC_SLV,
+ QCS404_SNOC_INT_2
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QCS404_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_2_links[] = {
+ QCS404_SLAVE_QDSS_STM,
+ QCS404_SLAVE_OCIMEM
+};
+
+static struct qcom_icc_node snoc_int_2 = {
+ .name = "snoc_int_2",
+ .id = QCS404_SNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 134,
+ .slv_rpm_id = 197,
+ .num_links = ARRAY_SIZE(snoc_int_2_links),
+ .links = snoc_int_2_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QCS404_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QCS404_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QCS404_BIMC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = QCS404_SLAVE_SPDM_WRAPPER,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QCS404_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QCS404_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QCS404_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QCS404_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QCS404_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QCS404_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QCS404_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QCS404_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_tlmm_north = {
+ .name = "slv_tlmm_north",
+ .id = QCS404_SLAVE_TLMM_NORTH,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 214,
+};
+
+static struct qcom_icc_node slv_pcie = {
+ .name = "slv_pcie",
+ .id = QCS404_SLAVE_PCIE_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_ethernet = {
+ .name = "slv_ethernet",
+ .id = QCS404_SLAVE_EMAC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = QCS404_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37,
+};
+
+static struct qcom_icc_node slv_tlmm_east = {
+ .name = "slv_tlmm_east",
+ .id = QCS404_SLAVE_TLMM_EAST,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 213,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QCS404_SLAVE_TCU,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QCS404_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QCS404_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QCS404_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_tlmm_south = {
+ .name = "slv_tlmm_south",
+ .id = QCS404_SLAVE_TLMM_SOUTH,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QCS404_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_usb3 = {
+ .name = "slv_usb3",
+ .id = QCS404_SLAVE_USB3,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 22,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QCS404_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QCS404_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QCS404_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QCS404_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_wcss = {
+ .name = "slv_wcss",
+ .id = QCS404_SLAVE_WCSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 23,
+};
+
+static const u16 slv_snoc_bimc_1_links[] = {
+ QCS404_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_1 = {
+ .name = "slv_snoc_bimc_1",
+ .id = QCS404_SNOC_BIMC_1_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 104,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_1_links),
+ .links = slv_snoc_bimc_1_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QCS404_SLAVE_OCIMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QCS404_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QCS404_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QCS404_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QCS404_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QCS404_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QCS404_SLAVE_LPASS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
static struct qcom_icc_node *qcs404_bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
@@ -269,12 +1072,6 @@ static struct qcom_icc_desc qcs404_snoc = {
};
-static int qcs404_qnoc_probe(struct platform_device *pdev)
-{
- return qnoc_probe(pdev, sizeof(qcs404_bus_clocks),
- ARRAY_SIZE(qcs404_bus_clocks), qcs404_bus_clocks);
-}
-
static const struct of_device_id qcs404_noc_of_match[] = {
{ .compatible = "qcom,qcs404-bimc", .data = &qcs404_bimc },
{ .compatible = "qcom,qcs404-pcnoc", .data = &qcs404_pcnoc },
@@ -284,7 +1081,7 @@ static const struct of_device_id qcs404_noc_of_match[] = {
MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
static struct platform_driver qcs404_noc_driver = {
- .probe = qcs404_qnoc_probe,
+ .probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-qcs404",
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index fb23a5b780a4..471bb88f8828 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -16,41 +16,9 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+#include "icc-rpm.h"
#include "smd-rpm.h"
-#define RPM_BUS_MASTER_REQ 0x73616d62
-#define RPM_BUS_SLAVE_REQ 0x766c7362
-
-/* BIMC QoS */
-#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
-#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
-#define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
-
-#define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
-#define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
-#define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
-#define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
-#define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
-
-#define M_BKE_EN_EN_BMASK 0x1
-
-/* Valid for both NoC and BIMC */
-#define NOC_QOS_MODE_FIXED 0x0
-#define NOC_QOS_MODE_LIMITER 0x1
-#define NOC_QOS_MODE_BYPASS 0x2
-
-/* NoC QoS */
-#define NOC_PERM_MODE_FIXED 1
-#define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS)
-
-#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
-#define NOC_QOS_PRIORITY_P1_MASK 0xc
-#define NOC_QOS_PRIORITY_P0_MASK 0x3
-#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
-
-#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
-#define NOC_QOS_MODEn_MASK 0x3
-
enum {
SDM660_MASTER_IPA = 1,
SDM660_MASTER_CNOC_A2NOC,
@@ -159,212 +127,1368 @@ enum {
SDM660_SNOC,
};
-#define to_qcom_provider(_provider) \
- container_of(_provider, struct qcom_icc_provider, provider)
+static const char * const bus_mm_clocks[] = {
+ "bus",
+ "bus_a",
+ "iface",
+};
-static const struct clk_bulk_data bus_clocks[] = {
- { .id = "bus" },
- { .id = "bus_a" },
+static const char * const bus_a2noc_clocks[] = {
+ "bus",
+ "bus_a",
+ "ipa",
+ "ufs_axi",
+ "aggre2_ufs_axi",
+ "aggre2_usb3_axi",
+ "cfg_noc_usb2_axi",
};
-static const struct clk_bulk_data bus_mm_clocks[] = {
- { .id = "bus" },
- { .id = "bus_a" },
- { .id = "iface" },
+static const u16 mas_ipa_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
};
-static const struct clk_bulk_data bus_a2noc_clocks[] = {
- { .id = "bus" },
- { .id = "bus_a" },
- { .id = "ipa" },
- { .id = "ufs_axi" },
- { .id = "aggre2_ufs_axi" },
- { .id = "aggre2_usb3_axi" },
- { .id = "cfg_noc_usb2_axi" },
+static struct qcom_icc_node mas_ipa = {
+ .name = "mas_ipa",
+ .id = SDM660_MASTER_IPA,
+ .buswidth = 8,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_ipa_links),
+ .links = mas_ipa_links,
};
-/**
- * struct qcom_icc_provider - Qualcomm specific interconnect provider
- * @provider: generic interconnect provider
- * @bus_clks: the clk_bulk_data table of bus clocks
- * @num_clks: the total number of clk_bulk_data entries
- * @is_bimc_node: indicates whether to use bimc specific setting
- * @regmap: regmap for QoS registers read/write access
- * @mmio: NoC base iospace
- */
-struct qcom_icc_provider {
- struct icc_provider provider;
- struct clk_bulk_data *bus_clks;
- int num_clks;
- bool is_bimc_node;
- struct regmap *regmap;
- void __iomem *mmio;
-};
-
-#define SDM660_MAX_LINKS 34
-
-/**
- * struct qcom_icc_qos - Qualcomm specific interconnect QoS parameters
- * @areq_prio: node requests priority
- * @prio_level: priority level for bus communication
- * @limit_commands: activate/deactivate limiter mode during runtime
- * @ap_owned: indicates if the node is owned by the AP or by the RPM
- * @qos_mode: default qos mode for this node
- * @qos_port: qos port number for finding qos registers of this node
- */
-struct qcom_icc_qos {
- u32 areq_prio;
- u32 prio_level;
- bool limit_commands;
- bool ap_owned;
- int qos_mode;
- int qos_port;
-};
-
-/**
- * struct qcom_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @id: a unique node identifier
- * @links: an array of nodes where we can go next while traversing
- * @num_links: the total number of @links
- * @buswidth: width of the interconnect between a node and the bus (bytes)
- * @mas_rpm_id: RPM id for devices that are bus masters
- * @slv_rpm_id: RPM id for devices that are bus slaves
- * @qos: NoC QoS setting parameters
- * @rate: current bus clock rate in Hz
- */
-struct qcom_icc_node {
- unsigned char *name;
- u16 id;
- u16 links[SDM660_MAX_LINKS];
- u16 num_links;
- u16 buswidth;
- int mas_rpm_id;
- int slv_rpm_id;
- struct qcom_icc_qos qos;
- u64 rate;
-};
-
-struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
- const struct regmap_config *regmap_cfg;
-};
-
-#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
- _ap_owned, _qos_mode, _qos_prio, _qos_port, ...) \
- static struct qcom_icc_node _name = { \
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
- .mas_rpm_id = _mas_rpm_id, \
- .slv_rpm_id = _slv_rpm_id, \
- .qos.ap_owned = _ap_owned, \
- .qos.qos_mode = _qos_mode, \
- .qos.areq_prio = _qos_prio, \
- .qos.prio_level = _qos_prio, \
- .qos.qos_port = _qos_port, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
-DEFINE_QNODE(mas_ipa, SDM660_MASTER_IPA, 8, 59, -1, true, NOC_QOS_MODE_FIXED, 1, 3, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_cnoc_a2noc, SDM660_MASTER_CNOC_A2NOC, 8, 146, -1, true, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_sdcc_1, SDM660_MASTER_SDCC_1, 8, 33, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_sdcc_2, SDM660_MASTER_SDCC_2, 8, 35, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_blsp_1, SDM660_MASTER_BLSP_1, 4, 41, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_blsp_2, SDM660_MASTER_BLSP_2, 4, 39, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_ufs, SDM660_MASTER_UFS, 8, 68, -1, true, NOC_QOS_MODE_FIXED, 1, 4, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_usb_hs, SDM660_MASTER_USB_HS, 8, 42, -1, true, NOC_QOS_MODE_FIXED, 1, 1, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_usb3, SDM660_MASTER_USB3, 8, 32, -1, true, NOC_QOS_MODE_FIXED, 1, 2, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_crypto, SDM660_MASTER_CRYPTO_C0, 8, 23, -1, true, NOC_QOS_MODE_FIXED, 1, 11, SDM660_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(mas_gnoc_bimc, SDM660_MASTER_GNOC_BIMC, 4, 144, -1, true, NOC_QOS_MODE_FIXED, 0, 0, SDM660_SLAVE_EBI);
-DEFINE_QNODE(mas_oxili, SDM660_MASTER_OXILI, 4, 6, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI, SDM660_SLAVE_BIMC_SNOC);
-DEFINE_QNODE(mas_mnoc_bimc, SDM660_MASTER_MNOC_BIMC, 4, 2, -1, true, NOC_QOS_MODE_BYPASS, 0, 2, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI, SDM660_SLAVE_BIMC_SNOC);
-DEFINE_QNODE(mas_snoc_bimc, SDM660_MASTER_SNOC_BIMC, 4, 3, -1, false, -1, 0, -1, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI);
-DEFINE_QNODE(mas_pimem, SDM660_MASTER_PIMEM, 4, 113, -1, true, NOC_QOS_MODE_FIXED, 1, 4, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI);
-DEFINE_QNODE(mas_snoc_cnoc, SDM660_MASTER_SNOC_CNOC, 8, 52, -1, true, -1, 0, -1, SDM660_SLAVE_CLK_CTL, SDM660_SLAVE_QDSS_CFG, SDM660_SLAVE_QM_CFG, SDM660_SLAVE_SRVC_CNOC, SDM660_SLAVE_UFS_CFG, SDM660_SLAVE_TCSR, SDM660_SLAVE_A2NOC_SMMU_CFG, SDM660_SLAVE_SNOC_CFG, SDM660_SLAVE_TLMM_SOUTH, SDM660_SLAVE_MPM, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, SDM660_SLAVE_SDCC_2, SDM660_SLAVE_SDCC_1, SDM660_SLAVE_SPDM, SDM660_SLAVE_PMIC_ARB, SDM660_SLAVE_PRNG, SDM660_SLAVE_MSS_CFG, SDM660_SLAVE_GPUSS_CFG, SDM660_SLAVE_IMEM_CFG, SDM660_SLAVE_USB3_0, SDM660_SLAVE_A2NOC_CFG, SDM660_SLAVE_TLMM_NORTH, SDM660_SLAVE_USB_HS, SDM660_SLAVE_PDM, SDM660_SLAVE_TLMM_CENTER, SDM660_SLAVE_AHB2PHY, SDM660_SLAVE_BLSP_2, SDM660_SLAVE_BLSP_1, SDM660_SLAVE_PIMEM_CFG, SDM660_SLAVE_GLM, SDM660_SLAVE_MESSAGE_RAM, SDM660_SLAVE_BIMC_CFG, SDM660_SLAVE_CNOC_MNOC_CFG);
-DEFINE_QNODE(mas_qdss_dap, SDM660_MASTER_QDSS_DAP, 8, 49, -1, true, -1, 0, -1, SDM660_SLAVE_CLK_CTL, SDM660_SLAVE_QDSS_CFG, SDM660_SLAVE_QM_CFG, SDM660_SLAVE_SRVC_CNOC, SDM660_SLAVE_UFS_CFG, SDM660_SLAVE_TCSR, SDM660_SLAVE_A2NOC_SMMU_CFG, SDM660_SLAVE_SNOC_CFG, SDM660_SLAVE_TLMM_SOUTH, SDM660_SLAVE_MPM, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, SDM660_SLAVE_SDCC_2, SDM660_SLAVE_SDCC_1, SDM660_SLAVE_SPDM, SDM660_SLAVE_PMIC_ARB, SDM660_SLAVE_PRNG, SDM660_SLAVE_MSS_CFG, SDM660_SLAVE_GPUSS_CFG, SDM660_SLAVE_IMEM_CFG, SDM660_SLAVE_USB3_0, SDM660_SLAVE_A2NOC_CFG, SDM660_SLAVE_TLMM_NORTH, SDM660_SLAVE_USB_HS, SDM660_SLAVE_PDM, SDM660_SLAVE_TLMM_CENTER, SDM660_SLAVE_AHB2PHY, SDM660_SLAVE_BLSP_2, SDM660_SLAVE_BLSP_1, SDM660_SLAVE_PIMEM_CFG, SDM660_SLAVE_GLM, SDM660_SLAVE_MESSAGE_RAM, SDM660_SLAVE_CNOC_A2NOC, SDM660_SLAVE_BIMC_CFG, SDM660_SLAVE_CNOC_MNOC_CFG);
-DEFINE_QNODE(mas_apss_proc, SDM660_MASTER_APPS_PROC, 16, 0, -1, true, -1, 0, -1, SDM660_SLAVE_GNOC_SNOC, SDM660_SLAVE_GNOC_BIMC);
-DEFINE_QNODE(mas_cnoc_mnoc_mmss_cfg, SDM660_MASTER_CNOC_MNOC_MMSS_CFG, 8, 4, -1, true, -1, 0, -1, SDM660_SLAVE_VENUS_THROTTLE_CFG, SDM660_SLAVE_VENUS_CFG, SDM660_SLAVE_CAMERA_THROTTLE_CFG, SDM660_SLAVE_SMMU_CFG, SDM660_SLAVE_CAMERA_CFG, SDM660_SLAVE_CSI_PHY_CFG, SDM660_SLAVE_DISPLAY_THROTTLE_CFG, SDM660_SLAVE_DISPLAY_CFG, SDM660_SLAVE_MMSS_CLK_CFG, SDM660_SLAVE_MNOC_MPU_CFG, SDM660_SLAVE_MISC_CFG, SDM660_SLAVE_MMSS_CLK_XPU_CFG);
-DEFINE_QNODE(mas_cnoc_mnoc_cfg, SDM660_MASTER_CNOC_MNOC_CFG, 4, 5, -1, true, -1, 0, -1, SDM660_SLAVE_SRVC_MNOC);
-DEFINE_QNODE(mas_cpp, SDM660_MASTER_CPP, 16, 115, -1, true, NOC_QOS_MODE_BYPASS, 0, 4, SDM660_SLAVE_MNOC_BIMC);
-DEFINE_QNODE(mas_jpeg, SDM660_MASTER_JPEG, 16, 7, -1, true, NOC_QOS_MODE_BYPASS, 0, 6, SDM660_SLAVE_MNOC_BIMC);
-DEFINE_QNODE(mas_mdp_p0, SDM660_MASTER_MDP_P0, 16, 8, -1, true, NOC_QOS_MODE_BYPASS, 0, 0, SDM660_SLAVE_MNOC_BIMC); /* vrail-comp???? */
-DEFINE_QNODE(mas_mdp_p1, SDM660_MASTER_MDP_P1, 16, 61, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_MNOC_BIMC); /* vrail-comp??? */
-DEFINE_QNODE(mas_venus, SDM660_MASTER_VENUS, 16, 9, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_MNOC_BIMC);
-DEFINE_QNODE(mas_vfe, SDM660_MASTER_VFE, 16, 11, -1, true, NOC_QOS_MODE_BYPASS, 0, 5, SDM660_SLAVE_MNOC_BIMC);
-DEFINE_QNODE(mas_qdss_etr, SDM660_MASTER_QDSS_ETR, 8, 31, -1, true, NOC_QOS_MODE_FIXED, 1, 1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IMEM, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_SNOC_BIMC);
-DEFINE_QNODE(mas_qdss_bam, SDM660_MASTER_QDSS_BAM, 4, 19, -1, true, NOC_QOS_MODE_FIXED, 1, 0, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IMEM, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_SNOC_BIMC);
-DEFINE_QNODE(mas_snoc_cfg, SDM660_MASTER_SNOC_CFG, 4, 20, -1, false, -1, 0, -1, SDM660_SLAVE_SRVC_SNOC);
-DEFINE_QNODE(mas_bimc_snoc, SDM660_MASTER_BIMC_SNOC, 8, 21, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
-DEFINE_QNODE(mas_gnoc_snoc, SDM660_MASTER_GNOC_SNOC, 8, 150, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
-DEFINE_QNODE(mas_a2noc_snoc, SDM660_MASTER_A2NOC_SNOC, 16, 112, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_SNOC_BIMC, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
-DEFINE_QNODE(slv_a2noc_snoc, SDM660_SLAVE_A2NOC_SNOC, 16, -1, 143, false, -1, 0, -1, SDM660_MASTER_A2NOC_SNOC);
-DEFINE_QNODE(slv_ebi, SDM660_SLAVE_EBI, 4, -1, 0, false, -1, 0, -1, 0);
-DEFINE_QNODE(slv_hmss_l3, SDM660_SLAVE_HMSS_L3, 4, -1, 160, false, -1, 0, -1, 0);
-DEFINE_QNODE(slv_bimc_snoc, SDM660_SLAVE_BIMC_SNOC, 4, -1, 2, false, -1, 0, -1, SDM660_MASTER_BIMC_SNOC);
-DEFINE_QNODE(slv_cnoc_a2noc, SDM660_SLAVE_CNOC_A2NOC, 8, -1, 208, true, -1, 0, -1, SDM660_MASTER_CNOC_A2NOC);
-DEFINE_QNODE(slv_mpm, SDM660_SLAVE_MPM, 4, -1, 62, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_pmic_arb, SDM660_SLAVE_PMIC_ARB, 4, -1, 59, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_tlmm_north, SDM660_SLAVE_TLMM_NORTH, 8, -1, 214, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_tcsr, SDM660_SLAVE_TCSR, 4, -1, 50, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_pimem_cfg, SDM660_SLAVE_PIMEM_CFG, 4, -1, 167, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_imem_cfg, SDM660_SLAVE_IMEM_CFG, 4, -1, 54, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_message_ram, SDM660_SLAVE_MESSAGE_RAM, 4, -1, 55, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_glm, SDM660_SLAVE_GLM, 4, -1, 209, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_mss_cfg, SDM660_SLAVE_MSS_CFG, 4, -1, 48, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_tlmm_south, SDM660_SLAVE_TLMM_SOUTH, 4, -1, 217, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_ufs_cfg, SDM660_SLAVE_UFS_CFG, 4, -1, 92, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_a2noc_cfg, SDM660_SLAVE_A2NOC_CFG, 4, -1, 150, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_a2noc_smmu_cfg, SDM660_SLAVE_A2NOC_SMMU_CFG, 8, -1, 152, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_gpuss_cfg, SDM660_SLAVE_GPUSS_CFG, 8, -1, 11, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_ahb2phy, SDM660_SLAVE_AHB2PHY, 4, -1, 163, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_blsp_1, SDM660_SLAVE_BLSP_1, 4, -1, 39, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_sdcc_1, SDM660_SLAVE_SDCC_1, 4, -1, 31, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_sdcc_2, SDM660_SLAVE_SDCC_2, 4, -1, 33, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_tlmm_center, SDM660_SLAVE_TLMM_CENTER, 4, -1, 218, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_blsp_2, SDM660_SLAVE_BLSP_2, 4, -1, 37, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_pdm, SDM660_SLAVE_PDM, 4, -1, 41, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, 8, -1, 58, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_MMSS_CFG);
-DEFINE_QNODE(slv_usb_hs, SDM660_SLAVE_USB_HS, 4, -1, 40, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_usb3_0, SDM660_SLAVE_USB3_0, 4, -1, 22, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_srvc_cnoc, SDM660_SLAVE_SRVC_CNOC, 4, -1, 76, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_gnoc_bimc, SDM660_SLAVE_GNOC_BIMC, 16, -1, 210, true, -1, 0, -1, SDM660_MASTER_GNOC_BIMC);
-DEFINE_QNODE(slv_gnoc_snoc, SDM660_SLAVE_GNOC_SNOC, 8, -1, 211, true, -1, 0, -1, SDM660_MASTER_GNOC_SNOC);
-DEFINE_QNODE(slv_camera_cfg, SDM660_SLAVE_CAMERA_CFG, 4, -1, 3, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_camera_throttle_cfg, SDM660_SLAVE_CAMERA_THROTTLE_CFG, 4, -1, 154, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_misc_cfg, SDM660_SLAVE_MISC_CFG, 4, -1, 8, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_venus_throttle_cfg, SDM660_SLAVE_VENUS_THROTTLE_CFG, 4, -1, 178, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_venus_cfg, SDM660_SLAVE_VENUS_CFG, 4, -1, 10, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_mmss_clk_xpu_cfg, SDM660_SLAVE_MMSS_CLK_XPU_CFG, 4, -1, 13, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_mmss_clk_cfg, SDM660_SLAVE_MMSS_CLK_CFG, 4, -1, 12, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_mnoc_mpu_cfg, SDM660_SLAVE_MNOC_MPU_CFG, 4, -1, 14, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_display_cfg, SDM660_SLAVE_DISPLAY_CFG, 4, -1, 4, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_csi_phy_cfg, SDM660_SLAVE_CSI_PHY_CFG, 4, -1, 224, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_display_throttle_cfg, SDM660_SLAVE_DISPLAY_THROTTLE_CFG, 4, -1, 156, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_smmu_cfg, SDM660_SLAVE_SMMU_CFG, 8, -1, 205, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_mnoc_bimc, SDM660_SLAVE_MNOC_BIMC, 16, -1, 16, true, -1, 0, -1, SDM660_MASTER_MNOC_BIMC);
-DEFINE_QNODE(slv_srvc_mnoc, SDM660_SLAVE_SRVC_MNOC, 8, -1, 17, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_hmss, SDM660_SLAVE_HMSS, 8, -1, 20, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_lpass, SDM660_SLAVE_LPASS, 4, -1, 21, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_wlan, SDM660_SLAVE_WLAN, 4, -1, 206, false, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cdsp, SDM660_SLAVE_CDSP, 4, -1, 221, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_ipa, SDM660_SLAVE_IPA, 4, -1, 183, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_snoc_bimc, SDM660_SLAVE_SNOC_BIMC, 16, -1, 24, false, -1, 0, -1, SDM660_MASTER_SNOC_BIMC);
-DEFINE_QNODE(slv_snoc_cnoc, SDM660_SLAVE_SNOC_CNOC, 8, -1, 25, false, -1, 0, -1, SDM660_MASTER_SNOC_CNOC);
-DEFINE_QNODE(slv_imem, SDM660_SLAVE_IMEM, 8, -1, 26, false, -1, 0, -1, 0);
-DEFINE_QNODE(slv_pimem, SDM660_SLAVE_PIMEM, 8, -1, 166, false, -1, 0, -1, 0);
-DEFINE_QNODE(slv_qdss_stm, SDM660_SLAVE_QDSS_STM, 4, -1, 30, false, -1, 0, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, SDM660_SLAVE_SRVC_SNOC, 16, -1, 29, false, -1, 0, -1, 0);
+static const u16 mas_cnoc_a2noc_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_cnoc_a2noc = {
+ .name = "mas_cnoc_a2noc",
+ .id = SDM660_MASTER_CNOC_A2NOC,
+ .buswidth = 8,
+ .mas_rpm_id = 146,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_a2noc_links),
+ .links = mas_cnoc_a2noc_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = SDM660_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = SDM660_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = SDM660_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_blsp_2_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = SDM660_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_2_links),
+ .links = mas_blsp_2_links,
+};
+
+static const u16 mas_ufs_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_ufs = {
+ .name = "mas_ufs",
+ .id = SDM660_MASTER_UFS,
+ .buswidth = 8,
+ .mas_rpm_id = 68,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_ufs_links),
+ .links = mas_ufs_links,
+};
+
+static const u16 mas_usb_hs_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_usb_hs = {
+ .name = "mas_usb_hs",
+ .id = SDM660_MASTER_USB_HS,
+ .buswidth = 8,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_usb_hs_links),
+ .links = mas_usb_hs_links,
+};
+
+static const u16 mas_usb3_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_usb3 = {
+ .name = "mas_usb3",
+ .id = SDM660_MASTER_USB3,
+ .buswidth = 8,
+ .mas_rpm_id = 32,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_usb3_links),
+ .links = mas_usb3_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ SDM660_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = SDM660_MASTER_CRYPTO_C0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_gnoc_bimc_links[] = {
+ SDM660_SLAVE_EBI
+};
+
+static struct qcom_icc_node mas_gnoc_bimc = {
+ .name = "mas_gnoc_bimc",
+ .id = SDM660_MASTER_GNOC_BIMC,
+ .buswidth = 4,
+ .mas_rpm_id = 144,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_gnoc_bimc_links),
+ .links = mas_gnoc_bimc_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ SDM660_SLAVE_HMSS_L3,
+ SDM660_SLAVE_EBI,
+ SDM660_SLAVE_BIMC_SNOC
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = SDM660_MASTER_OXILI,
+ .buswidth = 4,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_mnoc_bimc_links[] = {
+ SDM660_SLAVE_HMSS_L3,
+ SDM660_SLAVE_EBI,
+ SDM660_SLAVE_BIMC_SNOC
+};
+
+static struct qcom_icc_node mas_mnoc_bimc = {
+ .name = "mas_mnoc_bimc",
+ .id = SDM660_MASTER_MNOC_BIMC,
+ .buswidth = 4,
+ .mas_rpm_id = 2,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_links),
+ .links = mas_mnoc_bimc_links,
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ SDM660_SLAVE_HMSS_L3,
+ SDM660_SLAVE_EBI
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .id = SDM660_MASTER_SNOC_BIMC,
+ .buswidth = 4,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links,
+};
+
+static const u16 mas_pimem_links[] = {
+ SDM660_SLAVE_HMSS_L3,
+ SDM660_SLAVE_EBI
+};
+
+static struct qcom_icc_node mas_pimem = {
+ .name = "mas_pimem",
+ .id = SDM660_MASTER_PIMEM,
+ .buswidth = 4,
+ .mas_rpm_id = 113,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_pimem_links),
+ .links = mas_pimem_links,
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+ SDM660_SLAVE_CLK_CTL,
+ SDM660_SLAVE_QDSS_CFG,
+ SDM660_SLAVE_QM_CFG,
+ SDM660_SLAVE_SRVC_CNOC,
+ SDM660_SLAVE_UFS_CFG,
+ SDM660_SLAVE_TCSR,
+ SDM660_SLAVE_A2NOC_SMMU_CFG,
+ SDM660_SLAVE_SNOC_CFG,
+ SDM660_SLAVE_TLMM_SOUTH,
+ SDM660_SLAVE_MPM,
+ SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
+ SDM660_SLAVE_SDCC_2,
+ SDM660_SLAVE_SDCC_1,
+ SDM660_SLAVE_SPDM,
+ SDM660_SLAVE_PMIC_ARB,
+ SDM660_SLAVE_PRNG,
+ SDM660_SLAVE_MSS_CFG,
+ SDM660_SLAVE_GPUSS_CFG,
+ SDM660_SLAVE_IMEM_CFG,
+ SDM660_SLAVE_USB3_0,
+ SDM660_SLAVE_A2NOC_CFG,
+ SDM660_SLAVE_TLMM_NORTH,
+ SDM660_SLAVE_USB_HS,
+ SDM660_SLAVE_PDM,
+ SDM660_SLAVE_TLMM_CENTER,
+ SDM660_SLAVE_AHB2PHY,
+ SDM660_SLAVE_BLSP_2,
+ SDM660_SLAVE_BLSP_1,
+ SDM660_SLAVE_PIMEM_CFG,
+ SDM660_SLAVE_GLM,
+ SDM660_SLAVE_MESSAGE_RAM,
+ SDM660_SLAVE_BIMC_CFG,
+ SDM660_SLAVE_CNOC_MNOC_CFG
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+ .name = "mas_snoc_cnoc",
+ .id = SDM660_MASTER_SNOC_CNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 52,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links,
+};
+
+static const u16 mas_qdss_dap_links[] = {
+ SDM660_SLAVE_CLK_CTL,
+ SDM660_SLAVE_QDSS_CFG,
+ SDM660_SLAVE_QM_CFG,
+ SDM660_SLAVE_SRVC_CNOC,
+ SDM660_SLAVE_UFS_CFG,
+ SDM660_SLAVE_TCSR,
+ SDM660_SLAVE_A2NOC_SMMU_CFG,
+ SDM660_SLAVE_SNOC_CFG,
+ SDM660_SLAVE_TLMM_SOUTH,
+ SDM660_SLAVE_MPM,
+ SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
+ SDM660_SLAVE_SDCC_2,
+ SDM660_SLAVE_SDCC_1,
+ SDM660_SLAVE_SPDM,
+ SDM660_SLAVE_PMIC_ARB,
+ SDM660_SLAVE_PRNG,
+ SDM660_SLAVE_MSS_CFG,
+ SDM660_SLAVE_GPUSS_CFG,
+ SDM660_SLAVE_IMEM_CFG,
+ SDM660_SLAVE_USB3_0,
+ SDM660_SLAVE_A2NOC_CFG,
+ SDM660_SLAVE_TLMM_NORTH,
+ SDM660_SLAVE_USB_HS,
+ SDM660_SLAVE_PDM,
+ SDM660_SLAVE_TLMM_CENTER,
+ SDM660_SLAVE_AHB2PHY,
+ SDM660_SLAVE_BLSP_2,
+ SDM660_SLAVE_BLSP_1,
+ SDM660_SLAVE_PIMEM_CFG,
+ SDM660_SLAVE_GLM,
+ SDM660_SLAVE_MESSAGE_RAM,
+ SDM660_SLAVE_CNOC_A2NOC,
+ SDM660_SLAVE_BIMC_CFG,
+ SDM660_SLAVE_CNOC_MNOC_CFG
+};
+
+static struct qcom_icc_node mas_qdss_dap = {
+ .name = "mas_qdss_dap",
+ .id = SDM660_MASTER_QDSS_DAP,
+ .buswidth = 8,
+ .mas_rpm_id = 49,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_qdss_dap_links),
+ .links = mas_qdss_dap_links,
+};
+
+static const u16 mas_apss_proc_links[] = {
+ SDM660_SLAVE_GNOC_SNOC,
+ SDM660_SLAVE_GNOC_BIMC
+};
+
+static struct qcom_icc_node mas_apss_proc = {
+ .name = "mas_apss_proc",
+ .id = SDM660_MASTER_APPS_PROC,
+ .buswidth = 16,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_apss_proc_links),
+ .links = mas_apss_proc_links,
+};
+
+static const u16 mas_cnoc_mnoc_mmss_cfg_links[] = {
+ SDM660_SLAVE_VENUS_THROTTLE_CFG,
+ SDM660_SLAVE_VENUS_CFG,
+ SDM660_SLAVE_CAMERA_THROTTLE_CFG,
+ SDM660_SLAVE_SMMU_CFG,
+ SDM660_SLAVE_CAMERA_CFG,
+ SDM660_SLAVE_CSI_PHY_CFG,
+ SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
+ SDM660_SLAVE_DISPLAY_CFG,
+ SDM660_SLAVE_MMSS_CLK_CFG,
+ SDM660_SLAVE_MNOC_MPU_CFG,
+ SDM660_SLAVE_MISC_CFG,
+ SDM660_SLAVE_MMSS_CLK_XPU_CFG
+};
+
+static struct qcom_icc_node mas_cnoc_mnoc_mmss_cfg = {
+ .name = "mas_cnoc_mnoc_mmss_cfg",
+ .id = SDM660_MASTER_CNOC_MNOC_MMSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = 4,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_mnoc_mmss_cfg_links),
+ .links = mas_cnoc_mnoc_mmss_cfg_links,
+};
+
+static const u16 mas_cnoc_mnoc_cfg_links[] = {
+ SDM660_SLAVE_SRVC_MNOC
+};
+
+static struct qcom_icc_node mas_cnoc_mnoc_cfg = {
+ .name = "mas_cnoc_mnoc_cfg",
+ .id = SDM660_MASTER_CNOC_MNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = 5,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_mnoc_cfg_links),
+ .links = mas_cnoc_mnoc_cfg_links,
+};
+
+static const u16 mas_cpp_links[] = {
+ SDM660_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = SDM660_MASTER_CPP,
+ .buswidth = 16,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_cpp_links),
+ .links = mas_cpp_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ SDM660_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = SDM660_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_mdp_p0_links[] = {
+ SDM660_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_mdp_p0 = {
+ .name = "mas_mdp_p0",
+ .id = SDM660_MASTER_MDP_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_mdp_p0_links),
+ .links = mas_mdp_p0_links,
+};
+
+static const u16 mas_mdp_p1_links[] = {
+ SDM660_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_mdp_p1 = {
+ .name = "mas_mdp_p1",
+ .id = SDM660_MASTER_MDP_P1,
+ .buswidth = 16,
+ .mas_rpm_id = 61,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_mdp_p1_links),
+ .links = mas_mdp_p1_links,
+};
+
+static const u16 mas_venus_links[] = {
+ SDM660_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = SDM660_MASTER_VENUS,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_venus_links),
+ .links = mas_venus_links,
+};
+
+static const u16 mas_vfe_links[] = {
+ SDM660_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = SDM660_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_vfe_links),
+ .links = mas_vfe_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ SDM660_SLAVE_PIMEM,
+ SDM660_SLAVE_IMEM,
+ SDM660_SLAVE_SNOC_CNOC,
+ SDM660_SLAVE_SNOC_BIMC
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = SDM660_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ SDM660_SLAVE_PIMEM,
+ SDM660_SLAVE_IMEM,
+ SDM660_SLAVE_SNOC_CNOC,
+ SDM660_SLAVE_SNOC_BIMC
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = SDM660_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ SDM660_SLAVE_SRVC_SNOC
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .name = "mas_snoc_cfg",
+ .id = SDM660_MASTER_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = 20,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ SDM660_SLAVE_PIMEM,
+ SDM660_SLAVE_IPA,
+ SDM660_SLAVE_QDSS_STM,
+ SDM660_SLAVE_LPASS,
+ SDM660_SLAVE_HMSS,
+ SDM660_SLAVE_CDSP,
+ SDM660_SLAVE_SNOC_CNOC,
+ SDM660_SLAVE_WLAN,
+ SDM660_SLAVE_IMEM
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = SDM660_MASTER_BIMC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_gnoc_snoc_links[] = {
+ SDM660_SLAVE_PIMEM,
+ SDM660_SLAVE_IPA,
+ SDM660_SLAVE_QDSS_STM,
+ SDM660_SLAVE_LPASS,
+ SDM660_SLAVE_HMSS,
+ SDM660_SLAVE_CDSP,
+ SDM660_SLAVE_SNOC_CNOC,
+ SDM660_SLAVE_WLAN,
+ SDM660_SLAVE_IMEM
+};
+
+static struct qcom_icc_node mas_gnoc_snoc = {
+ .name = "mas_gnoc_snoc",
+ .id = SDM660_MASTER_GNOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 150,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_gnoc_snoc_links),
+ .links = mas_gnoc_snoc_links,
+};
+
+static const u16 mas_a2noc_snoc_links[] = {
+ SDM660_SLAVE_PIMEM,
+ SDM660_SLAVE_IPA,
+ SDM660_SLAVE_QDSS_STM,
+ SDM660_SLAVE_LPASS,
+ SDM660_SLAVE_HMSS,
+ SDM660_SLAVE_SNOC_BIMC,
+ SDM660_SLAVE_CDSP,
+ SDM660_SLAVE_SNOC_CNOC,
+ SDM660_SLAVE_WLAN,
+ SDM660_SLAVE_IMEM
+};
+
+static struct qcom_icc_node mas_a2noc_snoc = {
+ .name = "mas_a2noc_snoc",
+ .id = SDM660_MASTER_A2NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 112,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_a2noc_snoc_links),
+ .links = mas_a2noc_snoc_links,
+};
+
+static const u16 slv_a2noc_snoc_links[] = {
+ SDM660_MASTER_A2NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a2noc_snoc = {
+ .name = "slv_a2noc_snoc",
+ .id = SDM660_SLAVE_A2NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 143,
+ .num_links = ARRAY_SIZE(slv_a2noc_snoc_links),
+ .links = slv_a2noc_snoc_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = SDM660_SLAVE_EBI,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static struct qcom_icc_node slv_hmss_l3 = {
+ .name = "slv_hmss_l3",
+ .id = SDM660_SLAVE_HMSS_L3,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 160,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ SDM660_MASTER_BIMC_SNOC
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = SDM660_SLAVE_BIMC_SNOC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static const u16 slv_cnoc_a2noc_links[] = {
+ SDM660_MASTER_CNOC_A2NOC
+};
+
+static struct qcom_icc_node slv_cnoc_a2noc = {
+ .name = "slv_cnoc_a2noc",
+ .id = SDM660_SLAVE_CNOC_A2NOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 208,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_a2noc_links),
+ .links = slv_cnoc_a2noc_links,
+};
+
+static struct qcom_icc_node slv_mpm = {
+ .name = "slv_mpm",
+ .id = SDM660_SLAVE_MPM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 62,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = SDM660_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm_north = {
+ .name = "slv_tlmm_north",
+ .id = SDM660_SLAVE_TLMM_NORTH,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 214,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = SDM660_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pimem_cfg = {
+ .name = "slv_pimem_cfg",
+ .id = SDM660_SLAVE_PIMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 167,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = SDM660_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = SDM660_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_glm = {
+ .name = "slv_glm",
+ .id = SDM660_SLAVE_GLM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 209,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = SDM660_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = SDM660_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = SDM660_SLAVE_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 60,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = SDM660_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_cnoc_mnoc_cfg_links[] = {
+ SDM660_MASTER_CNOC_MNOC_CFG
+};
+
+static struct qcom_icc_node slv_cnoc_mnoc_cfg = {
+ .name = "slv_cnoc_mnoc_cfg",
+ .id = SDM660_SLAVE_CNOC_MNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 66,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_mnoc_cfg_links),
+ .links = slv_cnoc_mnoc_cfg_links,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = SDM660_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_qm_cfg = {
+ .name = "slv_qm_cfg",
+ .id = SDM660_SLAVE_QM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 212,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = SDM660_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 47,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_mss_cfg = {
+ .name = "slv_mss_cfg",
+ .id = SDM660_SLAVE_MSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 48,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm_south = {
+ .name = "slv_tlmm_south",
+ .id = SDM660_SLAVE_TLMM_SOUTH,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 217,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_ufs_cfg = {
+ .name = "slv_ufs_cfg",
+ .id = SDM660_SLAVE_UFS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 92,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_a2noc_cfg = {
+ .name = "slv_a2noc_cfg",
+ .id = SDM660_SLAVE_A2NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 150,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_a2noc_smmu_cfg = {
+ .name = "slv_a2noc_smmu_cfg",
+ .id = SDM660_SLAVE_A2NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 152,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_gpuss_cfg = {
+ .name = "slv_gpuss_cfg",
+ .id = SDM660_SLAVE_GPUSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_ahb2phy = {
+ .name = "slv_ahb2phy",
+ .id = SDM660_SLAVE_AHB2PHY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 163,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = SDM660_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = SDM660_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = SDM660_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm_center = {
+ .name = "slv_tlmm_center",
+ .id = SDM660_SLAVE_TLMM_CENTER,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 218,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = SDM660_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = SDM660_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_cnoc_mnoc_mmss_cfg_links[] = {
+ SDM660_MASTER_CNOC_MNOC_MMSS_CFG
+};
+
+static struct qcom_icc_node slv_cnoc_mnoc_mmss_cfg = {
+ .name = "slv_cnoc_mnoc_mmss_cfg",
+ .id = SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 58,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_mnoc_mmss_cfg_links),
+ .links = slv_cnoc_mnoc_mmss_cfg_links,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = SDM660_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_usb3_0 = {
+ .name = "slv_usb3_0",
+ .id = SDM660_SLAVE_USB3_0,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 22,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_srvc_cnoc = {
+ .name = "slv_srvc_cnoc",
+ .id = SDM660_SLAVE_SRVC_CNOC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 76,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_gnoc_bimc_links[] = {
+ SDM660_MASTER_GNOC_BIMC
+};
+
+static struct qcom_icc_node slv_gnoc_bimc = {
+ .name = "slv_gnoc_bimc",
+ .id = SDM660_SLAVE_GNOC_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 210,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_gnoc_bimc_links),
+ .links = slv_gnoc_bimc_links,
+};
+
+static const u16 slv_gnoc_snoc_links[] = {
+ SDM660_MASTER_GNOC_SNOC
+};
+
+static struct qcom_icc_node slv_gnoc_snoc = {
+ .name = "slv_gnoc_snoc",
+ .id = SDM660_SLAVE_GNOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 211,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_gnoc_snoc_links),
+ .links = slv_gnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = SDM660_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_camera_throttle_cfg = {
+ .name = "slv_camera_throttle_cfg",
+ .id = SDM660_SLAVE_CAMERA_THROTTLE_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 154,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_misc_cfg = {
+ .name = "slv_misc_cfg",
+ .id = SDM660_SLAVE_MISC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_throttle_cfg = {
+ .name = "slv_venus_throttle_cfg",
+ .id = SDM660_SLAVE_VENUS_THROTTLE_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 178,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = SDM660_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_mmss_clk_xpu_cfg = {
+ .name = "slv_mmss_clk_xpu_cfg",
+ .id = SDM660_SLAVE_MMSS_CLK_XPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 13,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_mmss_clk_cfg = {
+ .name = "slv_mmss_clk_cfg",
+ .id = SDM660_SLAVE_MMSS_CLK_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 12,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_mnoc_mpu_cfg = {
+ .name = "slv_mnoc_mpu_cfg",
+ .id = SDM660_SLAVE_MNOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 14,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = SDM660_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_csi_phy_cfg = {
+ .name = "slv_csi_phy_cfg",
+ .id = SDM660_SLAVE_CSI_PHY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 224,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_display_throttle_cfg = {
+ .name = "slv_display_throttle_cfg",
+ .id = SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 156,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_smmu_cfg = {
+ .name = "slv_smmu_cfg",
+ .id = SDM660_SLAVE_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 205,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_mnoc_bimc_links[] = {
+ SDM660_MASTER_MNOC_BIMC
+};
+
+static struct qcom_icc_node slv_mnoc_bimc = {
+ .name = "slv_mnoc_bimc",
+ .id = SDM660_SLAVE_MNOC_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_mnoc_bimc_links),
+ .links = slv_mnoc_bimc_links,
+};
+
+static struct qcom_icc_node slv_srvc_mnoc = {
+ .name = "slv_srvc_mnoc",
+ .id = SDM660_SLAVE_SRVC_MNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 17,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_hmss = {
+ .name = "slv_hmss",
+ .id = SDM660_SLAVE_HMSS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = SDM660_SLAVE_LPASS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_wlan = {
+ .name = "slv_wlan",
+ .id = SDM660_SLAVE_WLAN,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 206,
+};
+
+static struct qcom_icc_node slv_cdsp = {
+ .name = "slv_cdsp",
+ .id = SDM660_SLAVE_CDSP,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 221,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_ipa = {
+ .name = "slv_ipa",
+ .id = SDM660_SLAVE_IPA,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 183,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ SDM660_MASTER_SNOC_BIMC
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = SDM660_SLAVE_SNOC_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links,
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+ SDM660_MASTER_SNOC_CNOC
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+ .name = "slv_snoc_cnoc",
+ .id = SDM660_SLAVE_SNOC_CNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 25,
+ .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+ .links = slv_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = SDM660_SLAVE_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node slv_pimem = {
+ .name = "slv_pimem",
+ .id = SDM660_SLAVE_PIMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 166,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = SDM660_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = SDM660_SLAVE_SRVC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 29,
+};
static struct qcom_icc_node *sdm660_a2noc_nodes[] = {
[MASTER_IPA] = &mas_ipa,
@@ -391,6 +1515,8 @@ static const struct regmap_config sdm660_a2noc_regmap_config = {
static struct qcom_icc_desc sdm660_a2noc = {
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
+ .clocks = bus_a2noc_clocks,
+ .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
.regmap_cfg = &sdm660_a2noc_regmap_config,
};
@@ -416,6 +1542,7 @@ static const struct regmap_config sdm660_bimc_regmap_config = {
static struct qcom_icc_desc sdm660_bimc = {
.nodes = sdm660_bimc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
+ .is_bimc_node = true,
.regmap_cfg = &sdm660_bimc_regmap_config,
};
@@ -528,6 +1655,8 @@ static const struct regmap_config sdm660_mnoc_regmap_config = {
static struct qcom_icc_desc sdm660_mnoc = {
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
+ .clocks = bus_mm_clocks,
+ .num_clocks = ARRAY_SIZE(bus_mm_clocks),
.regmap_cfg = &sdm660_mnoc_regmap_config,
};
@@ -565,353 +1694,6 @@ static struct qcom_icc_desc sdm660_snoc = {
.regmap_cfg = &sdm660_snoc_regmap_config,
};
-static int qcom_icc_bimc_set_qos_health(struct regmap *rmap,
- struct qcom_icc_qos *qos,
- int regnum)
-{
- u32 val;
- u32 mask;
-
- val = qos->prio_level;
- mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
-
- val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
- mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
-
- /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
- if (regnum != 3) {
- val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
- mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
- }
-
- return regmap_update_bits(rmap,
- M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
- mask, val);
-}
-
-static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw,
- bool bypass_mode)
-{
- struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
- struct icc_provider *provider;
- u32 mode = NOC_QOS_MODE_BYPASS;
- u32 val = 0;
- int i, rc = 0;
-
- qn = src->data;
- provider = src->provider;
- qp = to_qcom_provider(provider);
-
- if (qn->qos.qos_mode != -1)
- mode = qn->qos.qos_mode;
-
- /* QoS Priority: The QoS Health parameters are getting considered
- * only if we are NOT in Bypass Mode.
- */
- if (mode != NOC_QOS_MODE_BYPASS) {
- for (i = 3; i >= 0; i--) {
- rc = qcom_icc_bimc_set_qos_health(qp->regmap,
- &qn->qos, i);
- if (rc)
- return rc;
- }
-
- /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
- val = 1;
- }
-
- return regmap_update_bits(qp->regmap, M_BKE_EN_ADDR(qn->qos.qos_port),
- M_BKE_EN_EN_BMASK, val);
-}
-
-static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
- struct qcom_icc_qos *qos)
-{
- u32 val;
- int rc;
-
- /* Must be updated one at a time, P1 first, P0 last */
- val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
- rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
- NOC_QOS_PRIORITY_P1_MASK, val);
- if (rc)
- return rc;
-
- return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
- NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
-}
-
-static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
-{
- struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
- struct icc_provider *provider;
- u32 mode = NOC_QOS_MODE_BYPASS;
- int rc = 0;
-
- qn = src->data;
- provider = src->provider;
- qp = to_qcom_provider(provider);
-
- if (qn->qos.qos_port < 0) {
- dev_dbg(src->provider->dev,
- "NoC QoS: Skipping %s: vote aggregated on parent.\n",
- qn->name);
- return 0;
- }
-
- if (qn->qos.qos_mode != -1)
- mode = qn->qos.qos_mode;
-
- if (mode == NOC_QOS_MODE_FIXED) {
- dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
- qn->name);
- rc = qcom_icc_noc_set_qos_priority(qp->regmap, &qn->qos);
- if (rc)
- return rc;
- } else if (mode == NOC_QOS_MODE_BYPASS) {
- dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
- qn->name);
- }
-
- return regmap_update_bits(qp->regmap,
- NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
- NOC_QOS_MODEn_MASK, mode);
-}
-
-static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
-{
- struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
- struct qcom_icc_node *qn = node->data;
-
- dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
-
- if (qp->is_bimc_node)
- return qcom_icc_set_bimc_qos(node, sum_bw,
- (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS));
-
- return qcom_icc_set_noc_qos(node, sum_bw);
-}
-
-static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
-{
- int ret = 0;
-
- if (mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- mas_rpm_id, ret);
- return ret;
- }
- }
-
- if (slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
- slv_rpm_id, ret);
- return ret;
- }
- }
-
- return ret;
-}
-
-static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
- struct icc_provider *provider;
- struct icc_node *n;
- u64 sum_bw;
- u64 max_peak_bw;
- u64 rate;
- u32 agg_avg = 0;
- u32 agg_peak = 0;
- int ret, i;
-
- qn = src->data;
- provider = src->provider;
- qp = to_qcom_provider(provider);
-
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
-
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
-
- if (!qn->qos.ap_owned) {
- /* send bandwidth request message to the RPM processor */
- ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
- if (ret)
- return ret;
- } else if (qn->qos.qos_mode != -1) {
- /* set bandwidth directly from the AP */
- ret = qcom_icc_qos_set(src, sum_bw);
- if (ret)
- return ret;
- }
-
- rate = max(sum_bw, max_peak_bw);
-
- do_div(rate, qn->buswidth);
-
- if (qn->rate == rate)
- return 0;
-
- for (i = 0; i < qp->num_clks; i++) {
- ret = clk_set_rate(qp->bus_clks[i].clk, rate);
- if (ret) {
- pr_err("%s clk_set_rate error: %d\n",
- qp->bus_clks[i].id, ret);
- return ret;
- }
- }
-
- qn->rate = rate;
-
- return 0;
-}
-
-static int qnoc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- struct resource *res;
- size_t num_nodes, i;
- int ret;
-
- /* wait for the RPM proxy */
- if (!qcom_icc_rpm_smd_available())
- return -EPROBE_DEFER;
-
- desc = of_device_get_match_data(dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (of_device_is_compatible(dev->of_node, "qcom,sdm660-mnoc")) {
- qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
- sizeof(bus_mm_clocks), GFP_KERNEL);
- qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
- } else if (of_device_is_compatible(dev->of_node, "qcom,sdm660-a2noc")) {
- qp->bus_clks = devm_kmemdup(dev, bus_a2noc_clocks,
- sizeof(bus_a2noc_clocks), GFP_KERNEL);
- qp->num_clks = ARRAY_SIZE(bus_a2noc_clocks);
- } else {
- if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
- qp->is_bimc_node = true;
-
- qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
- GFP_KERNEL);
- qp->num_clks = ARRAY_SIZE(bus_clocks);
- }
- if (!qp->bus_clks)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- qp->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(qp->mmio)) {
- dev_err(dev, "Cannot ioremap interconnect bus resource\n");
- return PTR_ERR(qp->mmio);
- }
-
- qp->regmap = devm_regmap_init_mmio(dev, qp->mmio, desc->regmap_cfg);
- if (IS_ERR(qp->regmap)) {
- dev_err(dev, "Cannot regmap interconnect bus resource\n");
- return PTR_ERR(qp->regmap);
- }
-
- ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
- provider->dev = dev;
- provider->set = qcom_icc_set;
- provider->aggregate = icc_std_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- provider->data = data;
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(provider);
-
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
-}
-
static const struct of_device_id sdm660_noc_of_match[] = {
{ .compatible = "qcom,sdm660-a2noc", .data = &sdm660_a2noc },
{ .compatible = "qcom,sdm660-bimc", .data = &sdm660_bimc },
diff --git a/drivers/interconnect/samsung/Kconfig b/drivers/interconnect/samsung/Kconfig
index 6820e4f772cc..fbee87e379d0 100644
--- a/drivers/interconnect/samsung/Kconfig
+++ b/drivers/interconnect/samsung/Kconfig
@@ -6,8 +6,10 @@ config INTERCONNECT_SAMSUNG
Interconnect drivers for Samsung SoCs.
config INTERCONNECT_EXYNOS
- tristate "Exynos generic interconnect driver"
+ tristate "Exynos SoC generic interconnect driver"
depends on INTERCONNECT_SAMSUNG
default y if ARCH_EXYNOS
help
- Generic interconnect driver for Exynos SoCs.
+ Generic interconnect driver for Samsung Exynos SoCs (e.g. Exynos3250,
+ Exynos4210, Exynos4412, Exynos542x, Exynos5433).
+ Choose Y here only if you build for such Samsung SoC.
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 2a822b229bd0..1eacd43cb436 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -20,7 +20,7 @@
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
@@ -121,8 +121,10 @@ struct ivhd_entry {
u8 type;
u16 devid;
u8 flags;
- u32 ext;
- u32 hidh;
+ struct_group(ext_hid,
+ u32 ext;
+ u32 hidh;
+ );
u64 cid;
u8 uidf;
u8 uidl;
@@ -964,7 +966,7 @@ static bool copy_device_table(void)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (sme_active() && is_kdump_kernel())
+ old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
dev_table_size)
: memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -1377,7 +1379,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
+ BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
+ memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
hid[ACPIHID_HID_LEN - 1] = '\0';
if (!(*hid)) {
@@ -3032,7 +3035,8 @@ static int __init amd_iommu_init(void)
static bool amd_iommu_sme_check(void)
{
- if (!sme_active() || (boot_cpu_data.x86 != 0x17))
+ if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
+ (boot_cpu_data.x86 != 0x17))
return true;
/* For Fam17h, a specific level of support is required */
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 1722bb161841..9e5da037d949 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/io-pgtable.h>
+#include <linux/cc_platform.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping.
*/
- if (!mem_encrypt_active() && dev_data->iommu_v2)
+ if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index a9e568276c99..13cbeb997cc1 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -17,6 +17,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/cc_platform.h>
#include "amd_iommu.h"
@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
* When memory encryption is active the device is likely not in a
* direct-mapped domain. Forbid using IOMMUv2 functionality for now.
*/
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -ENODEV;
if (!amd_iommu_v2_supported())
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3303d707bab4..e80261d17a49 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -25,6 +25,7 @@
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
+#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
else
iommu_set_default_translated(false);
- if (iommu_default_passthrough() && mem_encrypt_active()) {
+ if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
iommu_set_default_translated(false);
}
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index c709861198e5..20d2b9ec1227 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
{
int res;
int i;
- struct tty_driver *tty;
+ struct tty_driver *drv;
struct ipoctal_channel *channel;
struct ipack_region *region;
void __iomem *addr;
@@ -359,38 +359,38 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
/* Register the TTY device */
/* Each IP-OCTAL channel is a TTY port */
- tty = tty_alloc_driver(NR_CHANNELS, TTY_DRIVER_REAL_RAW |
+ drv = tty_alloc_driver(NR_CHANNELS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(tty))
- return PTR_ERR(tty);
+ if (IS_ERR(drv))
+ return PTR_ERR(drv);
/* Fill struct tty_driver with ipoctal data */
- tty->owner = THIS_MODULE;
- tty->driver_name = KBUILD_MODNAME;
- tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
- if (!tty->name) {
+ drv->owner = THIS_MODULE;
+ drv->driver_name = KBUILD_MODNAME;
+ drv->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
+ if (!drv->name) {
res = -ENOMEM;
goto err_put_driver;
}
- tty->major = 0;
-
- tty->minor_start = 0;
- tty->type = TTY_DRIVER_TYPE_SERIAL;
- tty->subtype = SERIAL_TYPE_NORMAL;
- tty->init_termios = tty_std_termios;
- tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- tty->init_termios.c_ispeed = 9600;
- tty->init_termios.c_ospeed = 9600;
-
- tty_set_operations(tty, &ipoctal_fops);
- res = tty_register_driver(tty);
+ drv->major = 0;
+
+ drv->minor_start = 0;
+ drv->type = TTY_DRIVER_TYPE_SERIAL;
+ drv->subtype = SERIAL_TYPE_NORMAL;
+ drv->init_termios = tty_std_termios;
+ drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ drv->init_termios.c_ispeed = 9600;
+ drv->init_termios.c_ospeed = 9600;
+
+ tty_set_operations(drv, &ipoctal_fops);
+ res = tty_register_driver(drv);
if (res) {
dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
goto err_free_name;
}
/* Save struct tty_driver for use it when uninstalling the device */
- ipoctal->tty_drv = tty;
+ ipoctal->tty_drv = drv;
for (i = 0; i < NR_CHANNELS; i++) {
struct device *tty_dev;
@@ -407,7 +407,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
- tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
+ tty_dev = tty_port_register_device_attr(&channel->tty_port, drv,
i, NULL, channel, NULL);
if (IS_ERR(tty_dev)) {
dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
@@ -429,9 +429,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
return 0;
err_free_name:
- kfree(tty->name);
+ kfree(drv->name);
err_put_driver:
- tty_driver_kref_put(tty);
+ tty_driver_kref_put(drv);
return res;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index aca7b595c4c7..7038957f4a77 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -115,18 +115,24 @@ config BCM6345_L1_IRQ
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7038_L1_IRQ
- bool
+ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7120_L2_IRQ
- bool
+ tristate "Broadcom STB 7120-style L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
config BRCMSTB_L2_IRQ
- bool
+ tristate "Broadcom STB generic L2 interrupt controller driver"
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
@@ -400,8 +406,9 @@ config IRQ_UNIPHIER_AIDET
Support for the UniPhier AIDET (ARM Interrupt Detector).
config MESON_IRQ_GPIO
- bool "Meson GPIO Interrupt Multiplexer"
- depends on ARCH_MESON
+ tristate "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
select IRQ_DOMAIN_HIERARCHY
help
Support Meson SoC Family GPIO Interrupt Multiplexer
@@ -602,4 +609,12 @@ config APPLE_AIC
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
+config MCHP_EIC
+ bool "Microchip External Interrupt Controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for Microchip External Interrupt Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index f88cbf36a9d2..c1f611cbfbf8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -116,3 +116,4 @@ obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
+obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 6fc145aacaf0..3759dc36cc8f 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -245,7 +245,7 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
irq = FIELD_GET(AIC_EVENT_NUM, event);
if (type == AIC_EVENT_TYPE_HW)
- handle_domain_irq(aic_irqc->hw_domain, irq, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain, irq);
else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
aic_handle_ipi(regs);
else if (event != 0)
@@ -392,25 +392,25 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
}
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_PHYS, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL0_PHYS);
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_VIRT, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL0_VIRT);
if (is_kernel_in_hyp_mode()) {
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_PHYS, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL02_PHYS);
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_VIRT, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL02_VIRT);
}
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
@@ -674,7 +674,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
for_each_set_bit(i, &firing, AIC_NR_SWIPI)
- handle_domain_irq(aic_irqc->ipi_domain, i, regs);
+ generic_handle_domain_irq(aic_irqc->ipi_domain, i);
/*
* No ordering needed here; at worst this just changes the timing of
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 53e0fb0562c1..80906bfec845 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -589,12 +589,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
irq = msinr - PCI_MSI_DOORBELL_START;
- if (is_chained)
- generic_handle_domain_irq(armada_370_xp_msi_inner_domain,
- irq);
- else
- handle_domain_irq(armada_370_xp_msi_inner_domain,
- irq, regs);
+ generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
}
}
#else
@@ -646,8 +641,8 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
break;
if (irqnr > 1) {
- handle_domain_irq(armada_370_xp_mpic_domain,
- irqnr, regs);
+ generic_handle_domain_irq(armada_370_xp_mpic_domain,
+ irqnr);
continue;
}
@@ -666,7 +661,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
& IPI_DOORBELL_MASK;
for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
- handle_domain_irq(ipi_domain, ipi, regs);
+ generic_handle_domain_irq(ipi_domain, ipi);
}
#endif
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 58717cd44f99..62ccf2c0c414 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -100,7 +100,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
if (stat == 0)
break;
irq += ffs(stat) - 1;
- handle_domain_irq(vic->dom, irq, regs);
+ generic_handle_domain_irq(vic->dom, irq);
}
}
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
index 476d6024aaf2..223dd2f97d28 100644
--- a/drivers/irqchip/irq-ativic32.c
+++ b/drivers/irqchip/irq-ativic32.c
@@ -5,11 +5,14 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <nds32_intrinsic.h>
+#include <asm/irq_regs.h>
+
unsigned long wake_mask;
static void ativic32_ack_irq(struct irq_data *data)
@@ -103,10 +106,25 @@ static irq_hw_number_t get_intr_src(void)
- NDS32_VECTOR_offINTERRUPT;
}
-asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+static void ativic32_handle_irq(struct pt_regs *regs)
{
irq_hw_number_t hwirq = get_intr_src();
- handle_domain_irq(root_domain, hwirq, regs);
+ generic_handle_domain_irq(root_domain, hwirq);
+}
+
+/*
+ * TODO: convert nds32 to GENERIC_IRQ_MULTI_HANDLER so that this entry logic
+ * can live in arch code.
+ */
+asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ ativic32_handle_irq(regs);
+ set_irq_regs(old_regs);
+ irq_exit();
}
int __init ativic32_init_irq(struct device_node *node, struct device_node *parent)
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 2c999dc310c1..4631f6847953 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -71,7 +71,7 @@ aic_handle(struct pt_regs *regs)
if (!irqstat)
irq_reg_writel(gc, 0, AT91_AIC_EOICR);
else
- handle_domain_irq(aic_domain, irqnr, regs);
+ generic_handle_domain_irq(aic_domain, irqnr);
}
static int aic_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index fb4ad2aaa727..145535bd7560 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -80,7 +80,7 @@ aic5_handle(struct pt_regs *regs)
if (!irqstat)
irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
else
- handle_domain_irq(aic5_domain, irqnr, regs);
+ generic_handle_domain_irq(aic5_domain, irqnr);
}
static void aic5_mask(struct irq_data *d)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index adc1556ed332..e94e2882286c 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -246,7 +246,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
- handle_domain_irq(intc.domain, hwirq, regs);
+ generic_handle_domain_irq(intc.domain, hwirq);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 501facdb4570..51491c3c6fdd 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -143,7 +143,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
if (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_domain_irq(intc.domain, hwirq, regs);
+ generic_handle_domain_irq(intc.domain, hwirq);
}
}
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index e3483789f4df..fd079215c17f 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -132,16 +132,12 @@ static void bcm6345_l1_irq_handle(struct irq_desc *desc)
int base = idx * IRQS_PER_WORD;
unsigned long pending;
irq_hw_number_t hwirq;
- unsigned int irq;
pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
- irq = irq_linear_revmap(intc->domain, base + hwirq);
- if (irq)
- do_IRQ(irq);
- else
+ if (generic_handle_domain_irq(intc->domain, base + hwirq))
spurious_interrupt();
}
}
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index a035c385ca7a..a62b96237b82 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -28,9 +28,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/syscore_ops.h>
-#ifdef CONFIG_ARM
-#include <asm/smp_plat.h>
-#endif
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -127,7 +124,7 @@ static void bcm7038_l1_irq_handle(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
#else
cpu = intc->cpus[0];
@@ -194,6 +191,7 @@ static void bcm7038_l1_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&intc->lock, flags);
}
+#if defined(CONFIG_MIPS) && defined(CONFIG_SMP)
static int bcm7038_l1_set_affinity(struct irq_data *d,
const struct cpumask *dest,
bool force)
@@ -220,32 +218,6 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
-
-#ifdef CONFIG_SMP
-static void bcm7038_l1_cpu_offline(struct irq_data *d)
-{
- struct cpumask *mask = irq_data_get_affinity_mask(d);
- int cpu = smp_processor_id();
- cpumask_t new_affinity;
-
- /* This CPU was not on the affinity mask */
- if (!cpumask_test_cpu(cpu, mask))
- return;
-
- if (cpumask_weight(mask) > 1) {
- /*
- * Multiple CPU affinity, remove this CPU from the affinity
- * mask
- */
- cpumask_copy(&new_affinity, mask);
- cpumask_clear_cpu(cpu, &new_affinity);
- } else {
- /* Only CPU, put on the lowest online CPU */
- cpumask_clear(&new_affinity);
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
- }
- irq_set_affinity_locked(d, &new_affinity, false);
-}
#endif
static int __init bcm7038_l1_init_one(struct device_node *dn,
@@ -328,7 +300,7 @@ static int bcm7038_l1_suspend(void)
u32 val;
/* Wakeup interrupt should only come from the boot cpu */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
boot_cpu = cpu_logical_map(0);
#else
boot_cpu = 0;
@@ -352,7 +324,7 @@ static void bcm7038_l1_resume(void)
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
boot_cpu = cpu_logical_map(0);
#else
boot_cpu = 0;
@@ -395,9 +367,8 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
.irq_set_affinity = bcm7038_l1_set_affinity,
-#ifdef CONFIG_SMP
- .irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
#ifdef CONFIG_PM_SLEEP
.irq_set_wake = bcm7038_l1_set_wake,
@@ -416,7 +387,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ irqd_set_single_target(irq_get_irq_data(virq));
return 0;
}
@@ -484,4 +455,8 @@ out_free:
return ret;
}
-IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
+IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
+MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index f23d7651ea84..d80e67a6aad2 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -220,6 +220,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct bcm7120_l2_intc_data *data;
+ struct platform_device *pdev;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret = 0;
@@ -230,7 +231,13 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (!data)
return -ENOMEM;
- data->num_parent_irqs = of_irq_count(dn);
+ pdev = of_find_device_by_node(dn);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto out_free_data;
+ }
+
+ data->num_parent_irqs = platform_irq_count(pdev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
@@ -329,6 +336,7 @@ out_unmap:
if (data->map_base[idx])
iounmap(data->map_base[idx]);
}
+out_free_data:
kfree(data);
return ret;
}
@@ -347,8 +355,9 @@ static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
"BCM3380 L2");
}
-IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
- bcm7120_l2_intc_probe_7120);
-
-IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc",
- bcm7120_l2_intc_probe_3380);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7120_l2)
+IRQCHIP_MATCH("brcm,bcm7120-l2-intc", bcm7120_l2_intc_probe_7120)
+IRQCHIP_MATCH("brcm,bcm3380-l2-intc", bcm7120_l2_intc_probe_3380)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7120_l2)
+MODULE_DESCRIPTION("Broadcom STB 7120-style L2 interrupt controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 8e0911561f2d..e4efc08ac594 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,16 +275,18 @@ static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
{
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
}
-IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
-IRQCHIP_DECLARE(brcmstb_hif_spi_l2_intc, "brcm,hif-spi-l2-intc",
- brcmstb_l2_edge_intc_of_init);
-IRQCHIP_DECLARE(brcmstb_upg_aux_aon_l2_intc, "brcm,upg-aux-aon-l2-intc",
- brcmstb_l2_edge_intc_of_init);
static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
}
-IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
- brcmstb_l2_lvl_intc_of_init);
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2)
+IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2)
+MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index d0da29aeedc8..77ebe7e47e0e 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -77,14 +77,14 @@ static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
readw_relaxed(clps711x_intc->intsr[0]);
if (irqstat)
- handle_domain_irq(clps711x_intc->domain,
- fls(irqstat) - 1, regs);
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1);
irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
readw_relaxed(clps711x_intc->intsr[1]);
if (irqstat)
- handle_domain_irq(clps711x_intc->domain,
- fls(irqstat) - 1 + 16, regs);
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1 + 16);
} while (irqstat);
}
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index ab91afa86755..d36f536506ba 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -138,7 +138,7 @@ static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
if (hwirq == 0)
return 0;
- handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
+ generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq));
return 1;
}
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index a1534edef7fa..cb403c960ac0 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -74,8 +74,8 @@ static void csky_mpintc_handler(struct pt_regs *regs)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
- handle_domain_irq(root_domain,
- readl_relaxed(reg_base + INTCL_RDYIR), regs);
+ generic_handle_domain_irq(root_domain,
+ readl_relaxed(reg_base + INTCL_RDYIR));
}
static void csky_mpintc_enable(struct irq_data *d)
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
index 810ccc4fe476..123eb7bfc117 100644
--- a/drivers/irqchip/irq-davinci-aintc.c
+++ b/drivers/irqchip/irq-davinci-aintc.c
@@ -73,7 +73,7 @@ davinci_aintc_handle_irq(struct pt_regs *regs)
irqnr >>= 2;
irqnr -= 1;
- handle_domain_irq(davinci_aintc_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(davinci_aintc_irq_domain, irqnr);
}
/* ARM Interrupt Controller Initialization */
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index 276da2772e7f..7482c8ed34b2 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -135,7 +135,7 @@ davinci_cp_intc_handle_irq(struct pt_regs *regs)
return;
}
- handle_domain_irq(davinci_cp_intc_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr);
}
static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index fc38d2da11b9..3b0d78aac13b 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -50,7 +50,7 @@ static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs)
return;
}
- handle_domain_irq(digicolor_irq_domain, hwirq, regs);
+ generic_handle_domain_irq(digicolor_irq_domain, hwirq);
} while (1);
}
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index a67266e44491..d5c1c750c8d2 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -42,7 +42,7 @@ static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs)
while (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_domain_irq(d, hwirq, regs);
+ generic_handle_domain_irq(d, hwirq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 0bf98425dca5..5cc268880f8e 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -134,7 +134,7 @@ asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *r
while ((status = readl(FT010_IRQ_STATUS(f->base)))) {
irq = ffs(status) - 1;
- handle_domain_irq(f->domain, irq, regs);
+ generic_handle_domain_irq(f->domain, irq);
}
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fd4e9a37fea6..daec3309b014 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -660,7 +660,7 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
* PSR.I will be restored when we ERET to the
* interrupted context.
*/
- err = handle_domain_nmi(gic_data.domain, irqnr, regs);
+ err = generic_handle_domain_nmi(gic_data.domain, irqnr);
if (err)
gic_deactivate_unhandled(irqnr);
@@ -728,7 +728,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
else
isb();
- if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
+ if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_deactivate_unhandled(irqnr);
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 5f22c9d65e57..b8bb46c65a97 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -369,7 +369,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
this_cpu_write(sgi_intid, irqstat);
}
- handle_domain_irq(gic->domain, irqnr, regs);
+ generic_handle_domain_irq(gic->domain, irqnr);
} while (1);
}
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 058ebaebe2c4..46161f6ff289 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -206,7 +206,7 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (irqnr <= HIP04_MAX_IRQS)
- handle_domain_irq(hip04_data.domain, irqnr, regs);
+ generic_handle_domain_irq(hip04_data.domain, irqnr);
} while (irqnr > HIP04_MAX_IRQS);
}
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index 37e0749215c7..fb68f8c59fbb 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -114,7 +114,7 @@ asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
for_each_set_bit(i, &status, 32)
- handle_domain_irq(ixi->domain, i, regs);
+ generic_handle_domain_irq(ixi->domain, i);
/*
* IXP465/IXP435 has an upper IRQ status register
@@ -122,7 +122,7 @@ asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
if (ixi->is_356) {
status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
for_each_set_bit(i, &status, 32)
- handle_domain_irq(ixi->domain, i + 32, regs);
+ generic_handle_domain_irq(ixi->domain, i + 32);
}
}
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 5e6f6e25f2ae..a29357f39450 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -126,7 +126,7 @@ static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
while (hwirq) {
irq = __ffs(hwirq);
hwirq &= ~BIT(irq);
- handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+ generic_handle_domain_irq(lpc32xx_mic_irqc->domain, irq);
}
}
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
new file mode 100644
index 000000000000..c726a19837d2
--- /dev/null
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip External Interrupt Controller driver
+ *
+ * Copyright (C) 2021 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MCHP_EIC_GFCS (0x0)
+#define MCHP_EIC_SCFG(x) (0x4 + (x) * 0x4)
+#define MCHP_EIC_SCFG_EN BIT(16)
+#define MCHP_EIC_SCFG_LVL BIT(9)
+#define MCHP_EIC_SCFG_POL BIT(8)
+
+#define MCHP_EIC_NIRQ (2)
+
+/*
+ * struct mchp_eic - EIC private data structure
+ * @base: base address
+ * @clk: peripheral clock
+ * @domain: irq domain
+ * @irqs: irqs b/w eic and gic
+ * @scfg: backup for scfg registers (necessary for backup and self-refresh mode)
+ * @wakeup_source: wakeup source mask
+ */
+struct mchp_eic {
+ void __iomem *base;
+ struct clk *clk;
+ struct irq_domain *domain;
+ u32 irqs[MCHP_EIC_NIRQ];
+ u32 scfg[MCHP_EIC_NIRQ];
+ u32 wakeup_source;
+};
+
+static struct mchp_eic *eic;
+
+static void mchp_eic_irq_mask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_mask_parent(d);
+}
+
+static void mchp_eic_irq_unmask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp |= MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_unmask_parent(d);
+}
+
+static int mchp_eic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int parent_irq_type;
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~(MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL);
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ tmp |= MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ tmp |= MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ tmp |= MCHP_EIC_SCFG_POL;
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ return irq_chip_set_type_parent(d, parent_irq_type);
+}
+
+static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ irq_set_irq_wake(eic->irqs[d->hwirq], on);
+ if (on)
+ eic->wakeup_source |= BIT(d->hwirq);
+ else
+ eic->wakeup_source &= ~BIT(d->hwirq);
+
+ return 0;
+}
+
+static int mchp_eic_irq_suspend(void)
+{
+ unsigned int hwirq;
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ eic->scfg[hwirq] = readl_relaxed(eic->base +
+ MCHP_EIC_SCFG(hwirq));
+
+ if (!eic->wakeup_source)
+ clk_disable_unprepare(eic->clk);
+
+ return 0;
+}
+
+static void mchp_eic_irq_resume(void)
+{
+ unsigned int hwirq;
+
+ if (!eic->wakeup_source)
+ clk_prepare_enable(eic->clk);
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ writel_relaxed(eic->scfg[hwirq], eic->base +
+ MCHP_EIC_SCFG(hwirq));
+}
+
+static struct syscore_ops mchp_eic_syscore_ops = {
+ .suspend = mchp_eic_irq_suspend,
+ .resume = mchp_eic_irq_resume,
+};
+
+static struct irq_chip mchp_eic_chip = {
+ .name = "eic",
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+ .irq_mask = mchp_eic_irq_mask,
+ .irq_unmask = mchp_eic_irq_unmask,
+ .irq_set_type = mchp_eic_irq_set_type,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = mchp_eic_irq_set_wake,
+};
+
+static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ if (ret || hwirq >= MCHP_EIC_NIRQ)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &mchp_eic_chip, eic);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = eic->irqs[hwirq];
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops mchp_eic_domain_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = mchp_eic_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int mchp_eic_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *parent_domain = NULL;
+ int ret, i;
+
+ eic = kzalloc(sizeof(*eic), GFP_KERNEL);
+ if (!eic)
+ return -ENOMEM;
+
+ eic->base = of_iomap(node, 0);
+ if (!eic->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ ret = -ENODEV;
+ goto unmap;
+ }
+
+ eic->clk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(eic->clk)) {
+ ret = PTR_ERR(eic->clk);
+ goto unmap;
+ }
+
+ ret = clk_prepare_enable(eic->clk);
+ if (ret)
+ goto unmap;
+
+ for (i = 0; i < MCHP_EIC_NIRQ; i++) {
+ struct of_phandle_args irq;
+
+ /* Disable it, if any. */
+ writel_relaxed(0UL, eic->base + MCHP_EIC_SCFG(i));
+
+ ret = of_irq_parse_one(node, i, &irq);
+ if (ret)
+ goto clk_unprepare;
+
+ if (WARN_ON(irq.args_count != 3)) {
+ ret = -EINVAL;
+ goto clk_unprepare;
+ }
+
+ eic->irqs[i] = irq.args[1];
+ }
+
+ eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
+ node, &mchp_eic_domain_ops, eic);
+ if (!eic->domain) {
+ pr_err("%pOF: Failed to add domain\n", node);
+ ret = -ENODEV;
+ goto clk_unprepare;
+ }
+
+ register_syscore_ops(&mchp_eic_syscore_ops);
+
+ pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
+
+ return 0;
+
+clk_unprepare:
+ clk_disable_unprepare(eic->clk);
+unmap:
+ iounmap(eic->base);
+free:
+ kfree(eic);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic)
+IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
+IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
+
+MODULE_DESCRIPTION("Microchip External Interrupt Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index e50676ce2ec8..d90ff0b92480 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -436,8 +436,7 @@ static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
.translate = meson_gpio_irq_domain_translate,
};
-static int __init meson_gpio_irq_parse_dt(struct device_node *node,
- struct meson_gpio_irq_controller *ctl)
+static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl)
{
const struct of_device_id *match;
int ret;
@@ -463,8 +462,7 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
return 0;
}
-static int __init meson_gpio_irq_of_init(struct device_node *node,
- struct device_node *parent)
+static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain, *parent_domain;
struct meson_gpio_irq_controller *ctl;
@@ -521,5 +519,10 @@ free_ctl:
return ret;
}
-IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
- meson_gpio_irq_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc)
+IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 54c7092cc61d..d02b05a067d9 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -381,24 +381,35 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
spin_unlock_irqrestore(&gic_lock, flags);
}
-static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+static void gic_all_vpes_irq_cpu_online(void)
{
- struct gic_all_vpes_chip_data *cd;
- unsigned int intr;
+ static const unsigned int local_intrs[] = {
+ GIC_LOCAL_INT_TIMER,
+ GIC_LOCAL_INT_PERFCTR,
+ GIC_LOCAL_INT_FDC,
+ };
+ unsigned long flags;
+ int i;
- intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- cd = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&gic_lock, flags);
- write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
- if (cd->mask)
- write_gic_vl_smask(BIT(intr));
+ for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
+ unsigned int intr = local_intrs[i];
+ struct gic_all_vpes_chip_data *cd;
+
+ cd = &gic_all_vpes_chip_data[intr];
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+ }
+
+ spin_unlock_irqrestore(&gic_lock, flags);
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
.name = "MIPS GIC Local",
.irq_mask = gic_mask_local_irq_all_vpes,
.irq_unmask = gic_unmask_local_irq_all_vpes,
- .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -477,6 +488,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+ /*
+ * If adding support for more per-cpu interrupts, keep the the
+ * array in gic_all_vpes_irq_cpu_online() in sync.
+ */
switch (intr) {
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
@@ -663,8 +678,8 @@ static int gic_cpu_startup(unsigned int cpu)
/* Clear all local IRQ masks (ie. disable all local interrupts) */
write_gic_vl_rmask(~0);
- /* Invoke irq_cpu_online callbacks to enable desired interrupts */
- irq_cpu_online();
+ /* Enable desired interrupts */
+ gic_all_vpes_irq_cpu_online();
return 0;
}
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 4a74ac7b7c42..83455ca72439 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -230,7 +230,7 @@ static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
if (!(hwirq & SEL_INT_PENDING))
return;
hwirq &= SEL_INT_NUM_MASK;
- handle_domain_irq(icu_data[0].domain, hwirq, regs);
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
@@ -241,7 +241,7 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
if (!(hwirq & SEL_INT_PENDING))
return;
hwirq &= SEL_INT_NUM_MASK;
- handle_domain_irq(icu_data[0].domain, hwirq, regs);
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
/* MMP (ARMv5) */
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 090bc3f4f7d8..3e7297fc5948 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -347,7 +347,6 @@ builtin_platform_driver(mvebu_icu_subset_driver);
static int mvebu_icu_probe(struct platform_device *pdev)
{
struct mvebu_icu *icu;
- struct resource *res;
int i;
icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
@@ -357,8 +356,7 @@ static int mvebu_icu_probe(struct platform_device *pdev)
icu->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- icu->base = devm_ioremap_resource(&pdev->dev, res);
+ icu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(icu->base))
return PTR_ERR(icu->base);
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index dc1cee4b0fe1..870f9866b8da 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -121,14 +121,12 @@ static int mvebu_pic_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct mvebu_pic *pic;
struct irq_chip *irq_chip;
- struct resource *res;
pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
if (!pic)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pic->base = devm_ioremap_resource(&pdev->dev, res);
+ pic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pic->base))
return PTR_ERR(pic->base);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index d1f5740cd575..55cb6b5a686e 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -136,7 +136,7 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
irqnr = __raw_readl(icoll_priv.stat);
__raw_writel(irqnr, icoll_priv.vector);
- handle_domain_irq(icoll_domain, irqnr, regs);
+ generic_handle_domain_irq(icoll_domain, irqnr);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index b31c4cff4d3a..63bac3f78863 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -37,10 +37,25 @@
static struct irq_domain *nvic_irq_domain;
+static void __nvic_handle_irq(irq_hw_number_t hwirq)
+{
+ generic_handle_domain_irq(nvic_irq_domain, hwirq);
+}
+
+/*
+ * TODO: restructure the ARMv7M entry logic so that this entry logic can live
+ * in arch code.
+ */
asmlinkage void __exception_irq_entry
nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
{
- handle_domain_irq(nvic_irq_domain, hwirq, regs);
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ __nvic_handle_irq(hwirq);
+ set_irq_regs(old_regs);
+ irq_exit();
}
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index d360a6eddd6d..dc82162ba763 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -357,7 +357,7 @@ omap_intc_handle_irq(struct pt_regs *regs)
}
irqnr &= ACTIVEIRQ_MASK;
- handle_domain_irq(domain, irqnr, regs);
+ generic_handle_domain_irq(domain, irqnr);
}
static int __init intc_of_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index 03d2366118dd..49b47e787644 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -116,7 +116,7 @@ static void or1k_pic_handle_irq(struct pt_regs *regs)
int irq = -1;
while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
- handle_domain_irq(root_domain, irq, regs);
+ generic_handle_domain_irq(root_domain, irq);
}
static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index b6868f7b805a..17c2c7a07f10 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -42,8 +42,8 @@ __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
gc->mask_cache;
while (stat) {
u32 hwirq = __fls(stat);
- handle_domain_irq(orion_irq_domain,
- gc->irq_base + hwirq, regs);
+ generic_handle_domain_irq(orion_irq_domain,
+ gc->irq_base + hwirq);
stat &= ~(1 << hwirq);
}
}
diff --git a/drivers/irqchip/irq-rda-intc.c b/drivers/irqchip/irq-rda-intc.c
index 960168303b73..9f0144a73777 100644
--- a/drivers/irqchip/irq-rda-intc.c
+++ b/drivers/irqchip/irq-rda-intc.c
@@ -53,7 +53,7 @@ static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs)
while (stat) {
hwirq = __fls(stat);
- handle_domain_irq(rda_irq_domain, hwirq, regs);
+ generic_handle_domain_irq(rda_irq_domain, hwirq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 8017f6d32d52..b65bd8878d4f 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -37,7 +37,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
break;
#endif
default:
- handle_domain_irq(intc_domain, cause, regs);
+ generic_handle_domain_irq(intc_domain, cause);
break;
}
}
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index dbccc7dafbf8..31c202a1ae62 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -140,8 +140,8 @@ sa1100_handle_irq(struct pt_regs *regs)
if (mask == 0)
break;
- handle_domain_irq(sa1100_normal_irqdomain,
- ffs(mask) - 1, regs);
+ generic_handle_domain_irq(sa1100_normal_irqdomain,
+ ffs(mask) - 1);
} while (1);
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 33c76710f845..b7cb2da71888 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -850,7 +850,6 @@ static int stm32_exti_probe(struct platform_device *pdev)
struct irq_domain *parent_domain, *domain;
struct stm32_exti_host_data *host_data;
const struct stm32_exti_drv_data *drv_data;
- struct resource *res;
host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
if (!host_data)
@@ -888,8 +887,7 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (!host_data->chips_data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host_data->base = devm_ioremap_resource(dev, res);
+ host_data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host_data->base))
return PTR_ERR(host_data->base);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 8a315d6a3399..dd506ebfdacb 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -195,7 +195,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
return;
do {
- handle_domain_irq(irq_ic_data->irq_domain, hwirq, regs);
+ generic_handle_domain_irq(irq_ic_data->irq_domain, hwirq);
hwirq = readl(irq_ic_data->irq_base +
SUN4I_IRQ_VECTOR_REG) >> 2;
} while (hwirq != 0);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 97f454ec376b..8eba08db33b2 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -650,7 +650,6 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
struct device_node *parent_node, *node;
struct ti_sci_inta_irq_domain *inta;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
node = dev_of_node(dev);
@@ -694,8 +693,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
return PTR_ERR(inta->global_event);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- inta->base = devm_ioremap_resource(dev, res);
+ inta->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(inta->base))
return PTR_ERR(inta->base);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 34337a61b1ef..f032db23b30f 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -93,15 +93,13 @@ static int ts4800_ic_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct ts4800_irq_data *data;
struct irq_chip *irq_chip;
- struct resource *res;
int parent_irq;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 75be350cf82f..f2757b6aecc8 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -105,7 +105,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
while ((status = readl(f->base + IRQ_STATUS))) {
irq = ffs(status) - 1;
- handle_domain_irq(f->domain, irq, regs);
+ generic_handle_domain_irq(f->domain, irq);
handled = 1;
}
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 1e1f2d115257..9e3d5561e04e 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -208,7 +208,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
- handle_domain_irq(vic->domain, irq, regs);
+ generic_handle_domain_irq(vic->domain, irq);
handled = 1;
}
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 5bce936af5d9..e17dd3a8c2d5 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -183,7 +183,7 @@ static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
continue;
}
- handle_domain_irq(intc[i].domain, irqnr, regs);
+ generic_handle_domain_irq(intc[i].domain, irqnr);
}
}
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
index f3ac392d5bc8..0dcbeb1a05a1 100644
--- a/drivers/irqchip/irq-wpcm450-aic.c
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -69,7 +69,7 @@ static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
/* Read IPER to signal that nIRQ can be de-asserted */
hwirq = readl(aic->regs + AIC_IPER) / 4;
- handle_domain_irq(aic->domain, hwirq, regs);
+ generic_handle_domain_irq(aic->domain, hwirq);
}
static void wpcm450_aic_eoi(struct irq_data *d)
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 84163f1ebfcf..7a72620fc478 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -50,7 +50,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
- handle_domain_irq(zevio_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(zevio_irq_domain, irqnr);
}
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e501cb03f211..bd087cca1c1d 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
pci_set_master(hc->pdev);
if (!hc->irq) {
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return 1;
+ return -EINVAL;
}
hc->hw.pci_io =
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
if (!hc->hw.pci_io) {
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return 1;
+ return -ENOMEM;
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
@@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
if (!buffer) {
printk(KERN_WARNING
"HFC-PCI: Error allocating memory for FIFO!\n");
- return 1;
+ return -ENOMEM;
}
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
@@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
"HFC-PCI: Error in ioremap for PCI!\n");
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
hc->hw.dmahandle);
- return 1;
+ return -ENOMEM;
}
printk(KERN_INFO
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 185e17055317..6fe9d700dfef 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -207,7 +207,7 @@ static ssize_t flash_fault_show(struct device *dev,
mask <<= 1;
}
- return sprintf(buf, "%s\n", buf);
+ return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(flash_fault);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 4e7b78a84149..072491d3e17b 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -157,7 +157,6 @@ EXPORT_SYMBOL_GPL(led_trigger_read);
/* Caller must ensure led_cdev->trigger_lock held */
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
- unsigned long flags;
char *event = NULL;
char *envp[2];
const char *name;
@@ -171,10 +170,13 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
/* Remove any existing trigger */
if (led_cdev->trigger) {
- write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
- list_del(&led_cdev->trig_list);
- write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock,
- flags);
+ spin_lock(&led_cdev->trigger->leddev_list_lock);
+ list_del_rcu(&led_cdev->trig_list);
+ spin_unlock(&led_cdev->trigger->leddev_list_lock);
+
+ /* ensure it's no longer visible on the led_cdevs list */
+ synchronize_rcu();
+
cancel_work_sync(&led_cdev->set_brightness_work);
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
@@ -186,9 +188,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
- write_lock_irqsave(&trig->leddev_list_lock, flags);
- list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
- write_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ spin_lock(&trig->leddev_list_lock);
+ list_add_tail_rcu(&led_cdev->trig_list, &trig->led_cdevs);
+ spin_unlock(&trig->leddev_list_lock);
led_cdev->trigger = trig;
if (trig->activate)
@@ -223,9 +225,10 @@ err_add_groups:
trig->deactivate(led_cdev);
err_activate:
- write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
- list_del(&led_cdev->trig_list);
- write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ spin_lock(&led_cdev->trigger->leddev_list_lock);
+ list_del_rcu(&led_cdev->trig_list);
+ spin_unlock(&led_cdev->trigger->leddev_list_lock);
+ synchronize_rcu();
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_set_brightness(led_cdev, LED_OFF);
@@ -285,7 +288,7 @@ int led_trigger_register(struct led_trigger *trig)
struct led_classdev *led_cdev;
struct led_trigger *_trig;
- rwlock_init(&trig->leddev_list_lock);
+ spin_lock_init(&trig->leddev_list_lock);
INIT_LIST_HEAD(&trig->led_cdevs);
down_write(&triggers_list_lock);
@@ -378,15 +381,14 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
- unsigned long flags;
if (!trig)
return;
- read_lock_irqsave(&trig->leddev_list_lock, flags);
- list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -397,20 +399,19 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
- unsigned long flags;
if (!trig)
return;
- read_lock_irqsave(&trig->leddev_list_lock, flags);
- list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
invert);
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ rcu_read_unlock();
}
void led_trigger_blink(struct led_trigger *trig,
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 1f1d57288085..dc6816d36d06 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
+ depends on !PREEMPT_RT
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fe63d5ee201b..d33913d523c1 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -848,7 +848,8 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
cmd->read = cmd->info.devaddr & 0x01;
switch(cmd->info.type) {
case SMU_I2C_TRANSFER_SIMPLE:
- memset(&cmd->info.sublen, 0, 4);
+ cmd->info.sublen = 0;
+ memset(cmd->info.subaddr, 0, sizeof(cmd->info.subaddr));
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c9fc06c7e685..d9cd3606040e 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -8,6 +8,18 @@ menuconfig MAILBOX
if MAILBOX
+config APPLE_MAILBOX
+ tristate "Apple Mailbox driver"
+ depends on ARCH_APPLE || (ARM64 && COMPILE_TEST)
+ default ARCH_APPLE
+ help
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have a Apple SoC.
+
config ARM_MHU
tristate "ARM MHU Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c2089f04887e..338cc05e5431 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -58,3 +58,5 @@ obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
new file mode 100644
index 000000000000..72942002a54a
--- /dev/null
+++ b/drivers/mailbox/apple-mailbox.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/apple-mailbox.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+struct apple_mbox {
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ struct mbox_chan chan;
+
+ struct device *dev;
+ struct mbox_controller controller;
+};
+
+static const struct of_device_id apple_mbox_of_match[];
+
+static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_full);
+}
+
+static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_send(apple_mbox))
+ return -EBUSY;
+
+ dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
+ apple_mbox->regs + apple_mbox->hw->a2i_send1);
+
+ return 0;
+}
+
+static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_empty);
+}
+
+static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_recv(apple_mbox))
+ return -ENOMSG;
+
+ msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
+ msg->msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
+
+ dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ return 0;
+}
+
+static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ struct apple_mbox_msg *msg = data;
+ int ret;
+
+ ret = apple_mbox_hw_send(apple_mbox, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+ enable_irq(apple_mbox->irq_send_empty);
+
+ return 0;
+}
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_chan_send_data will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ disable_irq_nosync(apple_mbox->irq_send_empty);
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+ struct apple_mbox_msg msg;
+
+ while (apple_mbox_hw_recv(apple_mbox, &msg) == 0)
+ mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
+ apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_enable);
+ }
+
+ enable_irq(apple_mbox->irq_recv_not_empty);
+ return 0;
+}
+
+static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ disable_irq(apple_mbox->irq_recv_not_empty);
+}
+
+static const struct mbox_chan_ops apple_mbox_ops = {
+ .send_data = apple_mbox_chan_send_data,
+ .startup = apple_mbox_chan_startup,
+ .shutdown = apple_mbox_chan_shutdown,
+};
+
+static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ if (args->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[0];
+}
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *match;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+ if (!match->data)
+ return -EINVAL;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->dev = dev;
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->hw = match->data;
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = &mbox->chan;
+ mbox->controller.ops = &apple_mbox_ops;
+ mbox->controller.txdone_irq = true;
+ mbox->controller.of_xlate = apple_mbox_of_xlate;
+ mbox->chan.con_priv = mbox;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
+ mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
+ irqname, mbox);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mbox->controller);
+}
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,t8103-asc-mailbox", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,t8103-m3-mailbox", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index 86b7ce3549c5..fbfd0202047c 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -137,7 +137,6 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret = 0;
- struct resource *iomem;
struct bcm2835_mbox *mbox;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
@@ -153,8 +152,7 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
return ret;
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index 395ddc250828..e41bd2f5ea46 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -240,7 +240,6 @@ static int hi3660_mbox_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct hi3660_mbox *mbox;
struct mbox_chan *chan;
- struct resource *res;
unsigned long ch;
int err;
@@ -248,8 +247,7 @@ static int hi3660_mbox_probe(struct platform_device *pdev)
if (!mbox)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->base = devm_ioremap_resource(dev, res);
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 560cd09538b1..fca61f5312d9 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -264,7 +264,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct hi6220_mbox *mbox;
- struct resource *res;
int i, err;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
@@ -287,15 +286,13 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
if (mbox->irq < 0)
return mbox->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->ipc = devm_ioremap_resource(dev, res);
+ mbox->ipc = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->ipc)) {
dev_err(dev, "ioremap ipc failed\n");
return PTR_ERR(mbox->ipc);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mbox->base = devm_ioremap_resource(dev, res);
+ mbox->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mbox->base)) {
dev_err(dev, "ioremap buffer failed\n");
return PTR_ERR(mbox->base);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 0ce75c6b36b6..ffe36a6bef9e 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/s4.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -18,6 +19,8 @@
#define IMX_MU_CHANS 16
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
+/* TX0/RX0 */
+#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
enum imx_mu_chan_type {
@@ -47,6 +50,11 @@ struct imx_sc_rpc_msg_max {
u32 data[7];
};
+struct imx_s4_rpc_msg_max {
+ struct imx_s4_rpc_msg hdr;
+ u32 data[254];
+};
+
struct imx_mu_con_priv {
unsigned int idx;
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
@@ -58,6 +66,7 @@ struct imx_mu_con_priv {
struct imx_mu_priv {
struct device *dev;
void __iomem *base;
+ void *msg;
spinlock_t xcr_lock; /* control register lock */
struct mbox_controller mbox;
@@ -75,7 +84,8 @@ struct imx_mu_priv {
enum imx_mu_type {
IMX_MU_V1,
- IMX_MU_V2,
+ IMX_MU_V2 = BIT(1),
+ IMX_MU_V2_S4 = BIT(15),
};
struct imx_mu_dcfg {
@@ -89,18 +99,18 @@ struct imx_mu_dcfg {
u32 xCR[4]; /* Control Registers */
};
-#define IMX_MU_xSR_GIPn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
-#define IMX_MU_xSR_RFn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
-#define IMX_MU_xSR_TEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Enable */
-#define IMX_MU_xCR_GIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
/* Receive Interrupt Enable */
-#define IMX_MU_xCR_RIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
/* Transmit Interrupt Enable */
-#define IMX_MU_xCR_TIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
-#define IMX_MU_xCR_GIRn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -167,14 +177,22 @@ static int imx_mu_generic_rx(struct imx_mu_priv *priv,
return 0;
}
-static int imx_mu_scu_tx(struct imx_mu_priv *priv,
- struct imx_mu_con_priv *cp,
- void *data)
+static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
{
- struct imx_sc_rpc_msg_max *msg = data;
u32 *arg = data;
int i, ret;
u32 xsr;
+ u32 size, max_size, num_tr;
+
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ num_tr = 8;
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ num_tr = 4;
+ }
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -183,27 +201,27 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
* sizeof yields bytes.
*/
- if (msg->hdr.size > sizeof(*msg) / 4) {
+ if (size > max_size / 4) {
/*
* The real message size can be different to
- * struct imx_sc_rpc_msg_max size
+ * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
*/
- dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
- for (i = 0; i < 4 && i < msg->hdr.size; i++)
- imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
- for (; i < msg->hdr.size; i++) {
+ for (i = 0; i < num_tr && i < size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
+ for (; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
xsr,
- xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % 4),
+ xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
0, 100);
if (ret) {
dev_err(priv->dev, "Send data index: %d timeout\n", i);
return ret;
}
- imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
@@ -216,23 +234,32 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
return 0;
}
-static int imx_mu_scu_rx(struct imx_mu_priv *priv,
- struct imx_mu_con_priv *cp)
+static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
{
- struct imx_sc_rpc_msg_max msg;
- u32 *data = (u32 *)&msg;
+ u32 *data;
int i, ret;
u32 xsr;
+ u32 size, max_size;
+
+ data = (u32 *)priv->msg;
imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR);
- if (msg.hdr.size > sizeof(msg) / 4) {
- dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ }
+
+ if (size > max_size / 4) {
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
- for (i = 1; i < msg.hdr.size; i++) {
+ for (i = 1; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0, 100);
if (ret) {
@@ -243,7 +270,7 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
}
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
- mbox_chan_received_data(cp->chan, (void *)&msg);
+ mbox_chan_received_data(cp->chan, (void *)priv->msg);
return 0;
}
@@ -394,8 +421,8 @@ static const struct mbox_chan_ops imx_mu_ops = {
.shutdown = imx_mu_shutdown,
};
-static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
{
u32 type, idx, chan;
@@ -478,11 +505,12 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
}
-static void imx_mu_init_scu(struct imx_mu_priv *priv)
+static void imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
+ int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
- for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ for (i = 0; i < num_chans; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
cp->idx = i < 2 ? 0 : i - 2;
@@ -493,8 +521,8 @@ static void imx_mu_init_scu(struct imx_mu_priv *priv)
"imx_mu_chan[%i-%i]", cp->type, cp->idx);
}
- priv->mbox.num_chans = IMX_MU_SCU_CHANS;
- priv->mbox.of_xlate = imx_mu_scu_xlate;
+ priv->mbox.num_chans = num_chans;
+ priv->mbox.of_xlate = imx_mu_specific_xlate;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
@@ -508,6 +536,7 @@ static int imx_mu_probe(struct platform_device *pdev)
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
int ret;
+ u32 size;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -528,6 +557,15 @@ static int imx_mu_probe(struct platform_device *pdev)
return -EINVAL;
priv->dcfg = dcfg;
+ if (priv->dcfg->type & IMX_MU_V2_S4)
+ size = sizeof(struct imx_s4_rpc_msg_max);
+ else
+ size = sizeof(struct imx_sc_rpc_msg_max);
+
+ priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (IS_ERR(priv->msg))
+ return PTR_ERR(priv->msg);
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
if (PTR_ERR(priv->clk) != -ENOENT)
@@ -623,10 +661,21 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xCR = {0x110, 0x114, 0x120, 0x128},
};
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
+ .type = IMX_MU_V2 | IMX_MU_V2_S4,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x110, 0x114, 0x120, 0x128},
+};
+
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
- .tx = imx_mu_scu_tx,
- .rx = imx_mu_scu_rx,
- .init = imx_mu_init_scu,
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
@@ -637,6 +686,7 @@ static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
{ .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
+ { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index 75282666fb06..afb320e9d69c 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -285,7 +285,6 @@ static const struct mbox_chan_ops altera_mbox_ops = {
static int altera_mbox_probe(struct platform_device *pdev)
{
struct altera_mbox *mbox;
- struct resource *regs;
struct mbox_chan *chans;
int ret;
@@ -299,9 +298,7 @@ static int altera_mbox_probe(struct platform_device *pdev)
if (!chans)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs);
+ mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index ab3a6ab762d3..823061dd8c8e 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -408,7 +408,6 @@ static int sti_mbox_probe(struct platform_device *pdev)
struct sti_mbox_device *mdev;
struct device_node *np = pdev->dev.of_node;
struct mbox_chan *chans;
- struct resource *res;
int irq;
int ret;
@@ -425,8 +424,7 @@ static int sti_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mdev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->base = devm_ioremap_resource(&pdev->dev, res);
+ mdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->base))
return PTR_ERR(mdev->base);
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index 5b3a2dcd5955..946ea773ec33 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -170,7 +170,6 @@ static const struct mbox_chan_ops slimpro_mbox_ops = {
static int slimpro_mbox_probe(struct platform_device *pdev)
{
struct slimpro_mbox *ctx;
- struct resource *regs;
void __iomem *mb_base;
int rc;
int i;
@@ -181,8 +180,7 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mb_base = devm_ioremap_resource(&pdev->dev, regs);
+ mb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mb_base))
return PTR_ERR(mb_base);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 64175a893312..a8845b162dbf 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -195,7 +195,6 @@ static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
- WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
data.sta = sta;
data.data = cb->data;
data.pkt = task->pkt;
@@ -525,21 +524,20 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct cmdq *cmdq;
int err, i;
struct gce_plat *plat_data;
struct device_node *phandle = dev->of_node;
struct device_node *node;
int alias_id = 0;
- char clk_name[4] = "gce";
+ static const char * const clk_name = "gce";
+ static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cmdq->base = devm_ioremap_resource(dev, res);
+ cmdq->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cmdq->base))
return PTR_ERR(cmdq->base);
@@ -570,12 +568,9 @@ static int cmdq_probe(struct platform_device *pdev)
if (cmdq->gce_num > 1) {
for_each_child_of_node(phandle->parent, node) {
- char clk_id[8];
-
alias_id = of_alias_get_id(node, clk_name);
- if (alias_id < cmdq->gce_num) {
- snprintf(clk_id, sizeof(clk_id), "%s%d", clk_name, alias_id);
- cmdq->clocks[alias_id].id = clk_id;
+ if (alias_id >= 0 && alias_id < cmdq->gce_num) {
+ cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
dev_err(dev, "failed to get gce clk: %d\n", alias_id);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 7295e3835e30..58f3d569f095 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -699,7 +699,6 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
static int omap_mbox_probe(struct platform_device *pdev)
{
- struct resource *mem;
int ret;
struct mbox_chan *chnls;
struct omap_mbox **list, *mbox, *mboxblk;
@@ -776,8 +775,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0296558f9e22..887a3704c12e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -52,6 +52,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
@@ -62,31 +63,48 @@
#define MBOX_IRQ_NAME "pcc-mbox"
-static struct mbox_chan *pcc_mbox_channels;
-
-/* Array of cached virtual address for doorbell registers */
-static void __iomem **pcc_doorbell_vaddr;
-/* Array of cached virtual address for doorbell ack registers */
-static void __iomem **pcc_doorbell_ack_vaddr;
-/* Array of doorbell interrupts */
-static int *pcc_doorbell_irq;
+/**
+ * struct pcc_chan_reg - PCC register bundle
+ *
+ * @vaddr: cached virtual address for this register
+ * @gas: pointer to the generic address structure for this register
+ * @preserve_mask: bitmask to preserve when writing to this register
+ * @set_mask: bitmask to set when writing to this register
+ * @status_mask: bitmask to determine and/or update the status for this register
+ */
+struct pcc_chan_reg {
+ void __iomem *vaddr;
+ struct acpi_generic_address *gas;
+ u64 preserve_mask;
+ u64 set_mask;
+ u64 status_mask;
+};
-static struct mbox_controller pcc_mbox_ctrl = {};
/**
- * get_pcc_channel - Given a PCC subspace idx, get
- * the respective mbox_channel.
- * @id: PCC subspace index.
+ * struct pcc_chan_info - PCC channel specific information
*
- * Return: ERR_PTR(errno) if error, else pointer
- * to mbox channel.
+ * @chan: PCC channel information with Shared Memory Region info
+ * @db: PCC register bundle for the doorbell register
+ * @plat_irq_ack: PCC register bundle for the platform interrupt acknowledge
+ * register
+ * @cmd_complete: PCC register bundle for the command complete check register
+ * @cmd_update: PCC register bundle for the command complete update register
+ * @error: PCC register bundle for the error status register
+ * @plat_irq: platform interrupt
*/
-static struct mbox_chan *get_pcc_channel(int id)
-{
- if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
- return ERR_PTR(-ENOENT);
+struct pcc_chan_info {
+ struct pcc_mbox_chan chan;
+ struct pcc_chan_reg db;
+ struct pcc_chan_reg plat_irq_ack;
+ struct pcc_chan_reg cmd_complete;
+ struct pcc_chan_reg cmd_update;
+ struct pcc_chan_reg error;
+ int plat_irq;
+};
- return &pcc_mbox_channels[id];
-}
+#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
+static struct pcc_chan_info *chan_info;
+static int pcc_chan_count;
/*
* PCC can be used with perf critical drivers such as CPPC
@@ -96,10 +114,8 @@ static struct mbox_chan *get_pcc_channel(int id)
* The below read_register and write_registers are used to read and
* write from perf critical registers such as PCC doorbell register
*/
-static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
{
- int ret_val = 0;
-
switch (bit_width) {
case 8:
*val = readb(vaddr);
@@ -113,19 +129,11 @@ static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
case 64:
*val = readq(vaddr);
break;
- default:
- pr_debug("Error: Cannot read register of %u bit width",
- bit_width);
- ret_val = -EFAULT;
- break;
}
- return ret_val;
}
-static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
{
- int ret_val = 0;
-
switch (bit_width) {
case 8:
writeb(val, vaddr);
@@ -139,13 +147,54 @@ static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
case 64:
writeq(val, vaddr);
break;
- default:
- pr_debug("Error: Cannot write register of %u bit width",
- bit_width);
- ret_val = -EFAULT;
- break;
}
- return ret_val;
+}
+
+static int pcc_chan_reg_read(struct pcc_chan_reg *reg, u64 *val)
+{
+ int ret = 0;
+
+ if (!reg->gas) {
+ *val = 0;
+ return 0;
+ }
+
+ if (reg->vaddr)
+ read_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_read(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_write(struct pcc_chan_reg *reg, u64 val)
+{
+ int ret = 0;
+
+ if (!reg->gas)
+ return 0;
+
+ if (reg->vaddr)
+ write_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_write(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_read_modify_write(struct pcc_chan_reg *reg)
+{
+ int ret = 0;
+ u64 val;
+
+ ret = pcc_chan_reg_read(reg, &val);
+ if (ret)
+ return ret;
+
+ val &= reg->preserve_mask;
+ val |= reg->set_mask;
+
+ return pcc_chan_reg_write(reg, val);
}
/**
@@ -174,42 +223,42 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
+ * @irq: interrupt number
+ * @p: data/cookie passed from the caller to identify the channel
+ *
+ * Returns: IRQ_HANDLED if interrupt is handled or IRQ_NONE if not
*/
static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
- struct acpi_generic_address *doorbell_ack;
- struct acpi_pcct_hw_reduced *pcct_ss;
+ struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
- u64 doorbell_ack_preserve;
- u64 doorbell_ack_write;
- u64 doorbell_ack_val;
+ u64 val;
int ret;
- pcct_ss = chan->con_priv;
+ pchan = chan->con_priv;
- mbox_chan_received_data(chan, NULL);
+ ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (ret)
+ return IRQ_NONE;
- if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
- struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv;
- u32 id = chan - pcc_mbox_channels;
+ val &= pchan->cmd_complete.status_mask;
+ if (!val)
+ return IRQ_NONE;
- doorbell_ack = &pcct2_ss->platform_ack_register;
- doorbell_ack_preserve = pcct2_ss->ack_preserve_mask;
- doorbell_ack_write = pcct2_ss->ack_write_mask;
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return IRQ_NONE;
+ val &= pchan->error.status_mask;
+ if (val) {
+ val &= ~pchan->error.status_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return IRQ_NONE;
+ }
- ret = read_register(pcc_doorbell_ack_vaddr[id],
- &doorbell_ack_val,
- doorbell_ack->bit_width);
- if (ret)
- return IRQ_NONE;
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
- ret = write_register(pcc_doorbell_ack_vaddr[id],
- (doorbell_ack_val & doorbell_ack_preserve)
- | doorbell_ack_write,
- doorbell_ack->bit_width);
- if (ret)
- return IRQ_NONE;
- }
+ mbox_chan_received_data(chan, NULL);
return IRQ_HANDLED;
}
@@ -224,29 +273,26 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* ACPI package. This is used to lookup the array of PCC
* subspaces as parsed by the PCC Mailbox controller.
*
- * Return: Pointer to the Mailbox Channel if successful or
- * ERR_PTR.
+ * Return: Pointer to the PCC Mailbox Channel if successful or ERR_PTR.
*/
-struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
- struct device *dev = pcc_mbox_ctrl.dev;
+ struct pcc_chan_info *pchan;
struct mbox_chan *chan;
+ struct device *dev;
unsigned long flags;
- /*
- * Each PCC Subspace is a Mailbox Channel.
- * The PCC Clients get their PCC Subspace ID
- * from their own tables and pass it here.
- * This returns a pointer to the PCC subspace
- * for the Client to operate on.
- */
- chan = get_pcc_channel(subspace_id);
+ if (subspace_id < 0 || subspace_id >= pcc_chan_count)
+ return ERR_PTR(-ENOENT);
+ pchan = chan_info + subspace_id;
+ chan = pchan->chan.mchan;
if (IS_ERR(chan) || chan->cl) {
dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
+ dev = chan->mbox->dev;
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
@@ -260,44 +306,40 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
spin_unlock_irqrestore(&chan->lock, flags);
- if (pcc_doorbell_irq[subspace_id] > 0) {
+ if (pchan->plat_irq > 0) {
int rc;
- rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id],
- pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan);
+ rc = devm_request_irq(dev, pchan->plat_irq, pcc_mbox_irq, 0,
+ MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
- pcc_doorbell_irq[subspace_id]);
- pcc_mbox_free_channel(chan);
- chan = ERR_PTR(rc);
+ pchan->plat_irq);
+ pcc_mbox_free_channel(&pchan->chan);
+ return ERR_PTR(rc);
}
}
- return chan;
+ return &pchan->chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
/**
* pcc_mbox_free_channel - Clients call this to free their Channel.
*
- * @chan: Pointer to the mailbox channel as returned by
- * pcc_mbox_request_channel()
+ * @pchan: Pointer to the PCC mailbox channel as returned by
+ * pcc_mbox_request_channel()
*/
-void pcc_mbox_free_channel(struct mbox_chan *chan)
+void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
- u32 id = chan - pcc_mbox_channels;
+ struct pcc_chan_info *pchan_info = to_pcc_chan_info(pchan);
+ struct mbox_chan *chan = pchan->mchan;
unsigned long flags;
if (!chan || !chan->cl)
return;
- if (id >= pcc_mbox_ctrl.num_chans) {
- pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n");
- return;
- }
-
- if (pcc_doorbell_irq[id] > 0)
- devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+ if (pchan_info->plat_irq > 0)
+ devm_free_irq(chan->mbox->dev, pchan_info->plat_irq, chan);
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
@@ -323,40 +365,14 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
- struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_generic_address *doorbell;
- u64 doorbell_preserve;
- u64 doorbell_val;
- u64 doorbell_write;
- u32 id = chan - pcc_mbox_channels;
- int ret = 0;
-
- if (id >= pcc_mbox_ctrl.num_chans) {
- pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
- return -ENOENT;
- }
+ int ret;
+ struct pcc_chan_info *pchan = chan->con_priv;
- doorbell = &pcct_ss->doorbell_register;
- doorbell_preserve = pcct_ss->preserve_mask;
- doorbell_write = pcct_ss->write_mask;
+ ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ if (ret)
+ return ret;
- /* Sync notification from OS to Platform. */
- if (pcc_doorbell_vaddr[id]) {
- ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
- doorbell->bit_width);
- if (ret)
- return ret;
- ret = write_register(pcc_doorbell_vaddr[id],
- (doorbell_val & doorbell_preserve) | doorbell_write,
- doorbell->bit_width);
- } else {
- ret = acpi_read(&doorbell_val, doorbell);
- if (ret)
- return ret;
- ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
- doorbell);
- }
- return ret;
+ return pcc_chan_reg_read_modify_write(&pchan->db);
}
static const struct mbox_chan_ops pcc_chan_ops = {
@@ -364,7 +380,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
};
/**
- * parse_pcc_subspaces -- Count PCC subspaces defined
+ * parse_pcc_subspace - Count PCC subspaces defined
* @header: Pointer to the ACPI subtable header under the PCCT.
* @end: End of subtable entry.
*
@@ -384,41 +400,172 @@ static int parse_pcc_subspace(union acpi_subtable_headers *header,
return -EINVAL;
}
+static int
+pcc_chan_reg_init(struct pcc_chan_reg *reg, struct acpi_generic_address *gas,
+ u64 preserve_mask, u64 set_mask, u64 status_mask, char *name)
+{
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (!(gas->bit_width >= 8 && gas->bit_width <= 64 &&
+ is_power_of_2(gas->bit_width))) {
+ pr_err("Error: Cannot access register of %u bit width",
+ gas->bit_width);
+ return -EFAULT;
+ }
+
+ reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8);
+ if (!reg->vaddr) {
+ pr_err("Failed to ioremap PCC %s register\n", name);
+ return -ENOMEM;
+ }
+ }
+ reg->gas = gas;
+ reg->preserve_mask = preserve_mask;
+ reg->set_mask = set_mask;
+ reg->status_mask = status_mask;
+ return 0;
+}
+
/**
* pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register
- * There should be one entry per PCC client.
- * @id: PCC subspace index.
- * @pcct_ss: Pointer to the ACPI subtable header under the PCCT.
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
*
* Return: 0 for Success, else errno.
*
- * This gets called for each entry in the PCC table.
+ * There should be one entry per PCC channel. This gets called for each
+ * entry in the PCC table. This uses PCCY Type1 structure for all applicable
+ * types(Type 1-4) to fetch irq
*/
-static int pcc_parse_subspace_irq(int id,
- struct acpi_pcct_hw_reduced *pcct_ss)
+static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
{
- pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->platform_interrupt,
- (u32)pcct_ss->flags);
- if (pcc_doorbell_irq[id] <= 0) {
+ int ret = 0;
+ struct acpi_pcct_hw_reduced *pcct_ss;
+
+ if (pcct_entry->type < ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
+ pcct_entry->type > ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return 0;
+
+ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+ pchan->plat_irq = pcc_map_interrupt(pcct_ss->platform_interrupt,
+ (u32)pcct_ss->flags);
+ if (pchan->plat_irq <= 0) {
pr_err("PCC GSI %d not registered\n",
pcct_ss->platform_interrupt);
return -EINVAL;
}
- if (pcct_ss->header.type
- == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
- pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap(
- pcct2_ss->platform_ack_register.address,
- pcct2_ss->platform_ack_register.bit_width / 8);
- if (!pcc_doorbell_ack_vaddr[id]) {
- pr_err("Failed to ioremap PCC ACK register\n");
- return -ENOMEM;
- }
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct2_ss->platform_ack_register,
+ pcct2_ss->ack_preserve_mask,
+ pcct2_ss->ack_write_mask, 0,
+ "PLAT IRQ ACK");
+
+ } else if (pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE ||
+ pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) {
+ struct acpi_pcct_ext_pcc_master *pcct_ext = (void *)pcct_ss;
+
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct_ext->platform_ack_register,
+ pcct_ext->ack_preserve_mask,
+ pcct_ext->ack_set_mask, 0,
+ "PLAT IRQ ACK");
}
- return 0;
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_db_reg - Parse the PCC doorbell register
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ int ret = 0;
+
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss;
+
+ pcct_ss = (struct acpi_pcct_subspace *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ss->doorbell_register,
+ pcct_ss->preserve_mask,
+ pcct_ss->write_mask, 0, "Doorbell");
+
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext;
+
+ pcct_ext = (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ext->doorbell_register,
+ pcct_ext->preserve_mask,
+ pcct_ext->write_mask, 0, "Doorbell");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_complete,
+ &pcct_ext->cmd_complete_register,
+ 0, 0, pcct_ext->cmd_complete_mask,
+ "Command Complete Check");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_update,
+ &pcct_ext->cmd_update_register,
+ pcct_ext->cmd_update_preserve_mask,
+ pcct_ext->cmd_update_set_mask, 0,
+ "Command Complete Update");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->error,
+ &pcct_ext->error_status_register,
+ 0, 0, pcct_ext->error_status_mask,
+ "Error Status");
+ }
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_shmem - Parse the PCC Shared Memory Region information
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ */
+static void pcc_parse_subspace_shmem(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss =
+ (struct acpi_pcct_subspace *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ss->base_address;
+ pchan->chan.shmem_size = pcct_ss->length;
+ pchan->chan.latency = pcct_ss->latency;
+ pchan->chan.max_access_rate = pcct_ss->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ss->min_turnaround_time;
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext =
+ (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ext->base_address;
+ pchan->chan.shmem_size = pcct_ext->length;
+ pchan->chan.latency = pcct_ext->latency;
+ pchan->chan.max_access_rate = pcct_ext->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ext->min_turnaround_time;
+ }
}
/**
@@ -428,16 +575,12 @@ static int pcc_parse_subspace_irq(int id,
*/
static int __init acpi_pcc_probe(void)
{
+ int count, i, rc = 0;
+ acpi_status status;
struct acpi_table_header *pcct_tbl;
- struct acpi_subtable_header *pcct_entry;
- struct acpi_table_pcct *acpi_pcct_tbl;
struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
- int count, i, rc;
- acpi_status status = AE_OK;
- /* Search for PCCT */
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
-
if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
@@ -459,33 +602,60 @@ static int __init acpi_pcc_probe(void)
pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
rc = -EINVAL;
- goto err_put_pcct;
+ } else {
+ pcc_chan_count = count;
}
- pcc_mbox_channels = kcalloc(count, sizeof(struct mbox_chan),
- GFP_KERNEL);
- if (!pcc_mbox_channels) {
- pr_err("Could not allocate space for PCC mbox channels\n");
- rc = -ENOMEM;
- goto err_put_pcct;
- }
+ acpi_put_table(pcct_tbl);
+
+ return rc;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_controller *pcc_mbox_ctrl;
+ struct mbox_chan *pcc_mbox_channels;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ struct acpi_table_pcct *acpi_pcct_tbl;
+ acpi_status status = AE_OK;
+ int i, rc, count = pcc_chan_count;
+
+ /* Search for PCCT */
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl)
+ return -ENODEV;
- pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
- if (!pcc_doorbell_vaddr) {
+ pcc_mbox_channels = devm_kcalloc(dev, count, sizeof(*pcc_mbox_channels),
+ GFP_KERNEL);
+ if (!pcc_mbox_channels) {
rc = -ENOMEM;
- goto err_free_mbox;
+ goto err;
}
- pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
- if (!pcc_doorbell_ack_vaddr) {
+ chan_info = devm_kcalloc(dev, count, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info) {
rc = -ENOMEM;
- goto err_free_db_vaddr;
+ goto err;
}
- pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL);
- if (!pcc_doorbell_irq) {
+ pcc_mbox_ctrl = devm_kmalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL);
+ if (!pcc_mbox_ctrl) {
rc = -ENOMEM;
- goto err_free_db_ack_vaddr;
+ goto err;
}
/* Point to the first PCC subspace entry */
@@ -494,85 +664,55 @@ static int __init acpi_pcc_probe(void)
acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
- pcc_mbox_ctrl.txdone_irq = true;
+ pcc_mbox_ctrl->txdone_irq = true;
for (i = 0; i < count; i++) {
- struct acpi_generic_address *db_reg;
- struct acpi_pcct_subspace *pcct_ss;
- pcc_mbox_channels[i].con_priv = pcct_entry;
+ struct pcc_chan_info *pchan = chan_info + i;
- if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
- pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
- struct acpi_pcct_hw_reduced *pcct_hrss;
+ pcc_mbox_channels[i].con_priv = pchan;
+ pchan->chan.mchan = &pcc_mbox_channels[i];
- pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry;
+ if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
+ !pcc_mbox_ctrl->txdone_irq) {
+ pr_err("Plaform Interrupt flag must be set to 1");
+ rc = -EINVAL;
+ goto err;
+ }
- if (pcc_mbox_ctrl.txdone_irq) {
- rc = pcc_parse_subspace_irq(i, pcct_hrss);
- if (rc < 0)
- goto err;
- }
+ if (pcc_mbox_ctrl->txdone_irq) {
+ rc = pcc_parse_subspace_irq(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
}
- pcct_ss = (struct acpi_pcct_subspace *) pcct_entry;
+ rc = pcc_parse_subspace_db_reg(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
+
+ pcc_parse_subspace_shmem(pchan, pcct_entry);
- /* If doorbell is in system memory cache the virt address */
- db_reg = &pcct_ss->doorbell_register;
- if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
- db_reg->bit_width/8);
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
- pcc_mbox_ctrl.num_chans = count;
+ pcc_mbox_ctrl->num_chans = count;
- pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl->num_chans);
- return 0;
+ pcc_mbox_ctrl->chans = pcc_mbox_channels;
+ pcc_mbox_ctrl->ops = &pcc_chan_ops;
+ pcc_mbox_ctrl->dev = dev;
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ rc = mbox_controller_register(pcc_mbox_ctrl);
+ if (rc)
+ pr_err("Err registering PCC as Mailbox controller: %d\n", rc);
+ else
+ return 0;
err:
- kfree(pcc_doorbell_irq);
-err_free_db_ack_vaddr:
- kfree(pcc_doorbell_ack_vaddr);
-err_free_db_vaddr:
- kfree(pcc_doorbell_vaddr);
-err_free_mbox:
- kfree(pcc_mbox_channels);
-err_put_pcct:
acpi_put_table(pcct_tbl);
return rc;
}
-/**
- * pcc_mbox_probe - Called when we find a match for the
- * PCCT platform device. This is purely used to represent
- * the PCCT as a virtual device for registering with the
- * generic Mailbox framework.
- *
- * @pdev: Pointer to platform device returned when a match
- * is found.
- *
- * Return: 0 for Success, else errno.
- */
-static int pcc_mbox_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- pcc_mbox_ctrl.chans = pcc_mbox_channels;
- pcc_mbox_ctrl.ops = &pcc_chan_ops;
- pcc_mbox_ctrl.dev = &pdev->dev;
-
- pr_info("Registering PCC driver as Mailbox controller\n");
- ret = mbox_controller_register(&pcc_mbox_ctrl);
-
- if (ret) {
- pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
- ret = -ENODEV;
- }
-
- return ret;
-}
-
static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
index b6e34952246b..a5922ac0b0bf 100644
--- a/drivers/mailbox/platform_mhu.c
+++ b/drivers/mailbox/platform_mhu.c
@@ -117,7 +117,6 @@ static int platform_mhu_probe(struct platform_device *pdev)
int i, err;
struct platform_mhu *mhu;
struct device *dev = &pdev->dev;
- struct resource *res;
int platform_mhu_reg[MHU_CHANS] = {
MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
};
@@ -127,8 +126,7 @@ static int platform_mhu_probe(struct platform_device *pdev)
if (!mhu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mhu->base = devm_ioremap_resource(dev, res);
+ mhu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mhu->base)) {
dev_err(dev, "ioremap failed\n");
return PTR_ERR(mhu->base);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 82ccfaf14b24..9325d2abc745 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -33,10 +33,6 @@ static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
.offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
};
-static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
.offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
};
@@ -49,18 +45,6 @@ static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
.offset = 16, .clk_name = NULL
};
-static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
-static const struct qcom_apcs_ipc_data sdm660_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
-static const struct qcom_apcs_ipc_data sm6125_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
.offset = 12, .clk_name = NULL
};
@@ -95,7 +79,6 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
struct qcom_apcs_ipc *apcs;
const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
unsigned long i;
int ret;
@@ -104,8 +87,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (!apcs)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -160,21 +142,22 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &sm6125_apcs_data },
+ { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
{}
};
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index b84e0587937c..15d538fe2113 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -205,7 +205,6 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_ipcc *ipcc;
- struct resource *res;
unsigned long i;
int ret;
u32 ip_ver;
@@ -235,8 +234,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
}
/* regs */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipcc->reg_base = devm_ioremap_resource(dev, res);
+ ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ipcc->reg_base))
return PTR_ERR(ipcc->reg_base);
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
index ccecf2e5941d..7f8d931042d3 100644
--- a/drivers/mailbox/sun6i-msgbox.c
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -197,7 +197,6 @@ static int sun6i_msgbox_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mbox_chan *chans;
struct reset_control *reset;
- struct resource *res;
struct sun6i_msgbox *mbox;
int i, ret;
@@ -246,13 +245,7 @@ static int sun6i_msgbox_probe(struct platform_device *pdev)
goto err_disable_unprepare;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto err_disable_unprepare;
- }
-
- mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 5fc989a6d452..9ed9c955add7 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -178,7 +178,6 @@
#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
-#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -190,6 +189,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include "bcache_ondisk.h"
#include "bset.h"
#include "util.h"
#include "closure.h"
@@ -395,8 +395,6 @@ struct cached_dev {
atomic_t io_errors;
unsigned int error_limit;
unsigned int offline_seconds;
-
- char backing_dev_name[BDEVNAME_SIZE];
};
enum alloc_reserve {
@@ -470,8 +468,6 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
-
- char cache_dev_name[BDEVNAME_SIZE];
};
struct gc_stat {
diff --git a/drivers/md/bcache/bcache_ondisk.h b/drivers/md/bcache/bcache_ondisk.h
new file mode 100644
index 000000000000..97413586195b
--- /dev/null
+++ b/drivers/md/bcache/bcache_ondisk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_BCACHE_H
+#define _LINUX_BCACHE_H
+
+/*
+ * Bcache on disk data structures
+ */
+
+#include <linux/types.h>
+
+#define BITMASK(name, type, field, offset, size) \
+static inline __u64 name(const type *k) \
+{ return (k->field >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << size) << offset); \
+ k->field |= (v & ~(~0ULL << size)) << offset; \
+}
+
+/* Btree keys - all units are in sectors */
+
+struct bkey {
+ __u64 high;
+ __u64 low;
+ __u64 ptr[];
+};
+
+#define KEY_FIELD(name, field, offset, size) \
+ BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size) \
+static inline __u64 name(const struct bkey *k, unsigned int i) \
+{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
+{ \
+ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
+ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
+}
+
+#define KEY_SIZE_BITS 16
+#define KEY_MAX_U64S 8
+
+KEY_FIELD(KEY_PTRS, high, 60, 3)
+KEY_FIELD(__PAD0, high, 58, 2)
+KEY_FIELD(KEY_CSUM, high, 56, 2)
+KEY_FIELD(__PAD1, high, 55, 1)
+KEY_FIELD(KEY_DIRTY, high, 36, 1)
+
+KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
+KEY_FIELD(KEY_INODE, high, 0, 20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline __u64 KEY_OFFSET(const struct bkey *k)
+{
+ return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
+{
+ k->low = v;
+}
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(inode, offset, size) \
+((struct bkey) { \
+ .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
+ .low = (offset) \
+})
+
+#define ZERO_KEY KEY(0, 0, 0)
+
+#define MAX_KEY_INODE (~(~0 << 20))
+#define MAX_KEY_OFFSET (~0ULL >> 1)
+#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
+
+#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
+
+#define PTR_DEV_BITS 12
+
+PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
+PTR_FIELD(PTR_OFFSET, 8, 43)
+PTR_FIELD(PTR_GEN, 0, 8)
+
+#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
+
+#define MAKE_PTR(gen, offset, dev) \
+ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+
+/* Bkey utility code */
+
+static inline unsigned long bkey_u64s(const struct bkey *k)
+{
+ return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
+}
+
+static inline unsigned long bkey_bytes(const struct bkey *k)
+{
+ return bkey_u64s(k) * sizeof(__u64);
+}
+
+#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+ SET_KEY_INODE(dest, KEY_INODE(src));
+ SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + bkey_u64s(k));
+}
+
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + nr_keys);
+}
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD 8
+
+#define BKEY_PADDED(key) \
+ union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
+
+/* Superblock */
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
+#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
+#define BCACHE_SB_MAX_VERSION 6
+
+#define SB_SECTOR 8
+#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
+#define SB_SIZE 4096
+#define SB_LABEL_SIZE 32
+#define SB_JOURNAL_BUCKETS 256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET 8
+
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+
+struct cache_sb_disk {
+ __le64 csum;
+ __le64 offset; /* sector where this sb was written */
+ __le64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __le64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __le64 flags;
+ __le64 seq;
+
+ __le64 feature_compat;
+ __le64 feature_incompat;
+ __le64 feature_ro_compat;
+
+ __le64 pad[5];
+
+ union {
+ struct {
+ /* Cache devices */
+ __le64 nbuckets; /* device size */
+
+ __le16 block_size; /* sectors */
+ __le16 bucket_size; /* sectors */
+
+ __le16 nr_in_set;
+ __le16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __le64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __le32 last_mount; /* time overflow in y2106 */
+
+ __le16 first_bucket;
+ union {
+ __le16 njournal_buckets;
+ __le16 keys;
+ };
+ __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+ __le16 obso_bucket_size_hi; /* obsoleted */
+};
+
+/*
+ * This is for in-memory bcache super block.
+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
+ * size, ordering and even whole struct size may be different
+ * from cache_sb_disk.
+ */
+struct cache_sb {
+ __u64 offset; /* sector where this sb was written */
+ __u64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __u64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __u64 flags;
+ __u64 seq;
+
+ __u64 feature_compat;
+ __u64 feature_incompat;
+ __u64 feature_ro_compat;
+
+ union {
+ struct {
+ /* Cache devices */
+ __u64 nbuckets; /* device size */
+
+ __u16 block_size; /* sectors */
+ __u16 nr_in_set;
+ __u16 nr_this_dev;
+ __u32 bucket_size; /* sectors */
+ };
+ struct {
+ /* Backing devices */
+ __u64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __u32 last_mount; /* time overflow in y2106 */
+
+ __u16 first_bucket;
+ union {
+ __u16 njournal_buckets;
+ __u16 keys;
+ };
+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+ return sb->version == BCACHE_SB_VERSION_BDEV
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
+}
+
+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU 0U
+#define CACHE_REPLACEMENT_FIFO 1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH 0U
+#define CACHE_MODE_WRITEBACK 1U
+#define CACHE_MODE_WRITEAROUND 2U
+#define CACHE_MODE_NONE 3U
+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE 0U
+#define BDEV_STATE_CLEAN 1U
+#define BDEV_STATE_DIRTY 2U
+#define BDEV_STATE_STALE 3U
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define JSET_MAGIC 0x245235c1a3625032ULL
+#define PSET_MAGIC 0x6750e15f87337f91ULL
+#define BSET_MAGIC 0x90135c78b99e07f5ULL
+
+static inline __u64 jset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ JSET_MAGIC;
+}
+
+static inline __u64 pset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ PSET_MAGIC;
+}
+
+static inline __u64 bset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ BSET_MAGIC;
+}
+
+/*
+ * Journal
+ *
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1 1
+#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
+#define BCACHE_JSET_VERSION 1
+
+struct jset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ __u64 last_seq;
+
+ BKEY_PADDED(uuid_bucket);
+ BKEY_PADDED(btree_root);
+ __u16 btree_level;
+ __u16 pad[3];
+
+ __u64 prio_bucket[MAX_CACHES_PER_SET];
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* Bucket prios/gens */
+
+struct prio_set {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 pad;
+
+ __u64 next_bucket;
+
+ struct bucket_disk {
+ __u16 prio;
+ __u8 gen;
+ } __attribute((packed)) data[];
+};
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry {
+ union {
+ struct {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg; /* time overflow in y2106 */
+ __u32 last_reg;
+ __u32 invalidated;
+
+ __u32 flags;
+ /* Size of flash only volumes */
+ __u64 sectors;
+ };
+
+ __u8 pad[128];
+ };
+};
+
+BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+
+/* Btree nodes */
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_CSUM 1
+#define BCACHE_BSET_VERSION 1
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* OBSOLETE */
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry_v0 {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg;
+ __u32 last_reg;
+ __u32 invalidated;
+ __u32 pad;
+};
+
+#endif /* _LINUX_BCACHE_H */
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index a50dcfda656f..d795c84246b0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -2,10 +2,10 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
-#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "bcache_ondisk.h"
#include "util.h" /* for time_stats */
/*
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 0595559de174..93b67b8d31c3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -141,7 +141,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
uint64_t crc = b->key.ptr[0];
void *data = (void *) i + 8, *end = bset_bkey_last(i);
- crc = bch_crc64_update(crc, data, end - data);
+ crc = crc64_be(crc, data, end - data);
return crc ^ 0xffffffffffffffffULL;
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 116edda845c3..6230dfdd9286 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -127,21 +127,20 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
- void *p1 = kmap_atomic(bv.bv_page);
+ void *p1 = bvec_kmap_local(&bv);
void *p2;
cbv = bio_iter_iovec(check, citer);
- p2 = page_address(cbv.bv_page);
+ p2 = bvec_kmap_local(&cbv);
- cache_set_err_on(memcmp(p1 + bv.bv_offset,
- p2 + bv.bv_offset,
- bv.bv_len),
+ cache_set_err_on(memcmp(p1, p2, bv.bv_len),
dc->disk.c,
- "verify failed at dev %s sector %llu",
- dc->backing_dev_name,
+ "verify failed at dev %pg sector %llu",
+ dc->bdev,
(uint64_t) bio->bi_iter.bi_sector);
- kunmap_atomic(p1);
+ kunmap_local(p2);
+ kunmap_local(p1);
bio_advance_iter(check, &citer, bv.bv_len);
}
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6d2b7b84a7b7..634922c5601d 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -6,7 +6,7 @@
* Copyright 2020 Coly Li <colyli@suse.de>
*
*/
-#include <linux/bcache.h>
+#include "bcache_ondisk.h"
#include "bcache.h"
#include "features.h"
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index d1c8fd3977fc..09161b89c63e 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -2,10 +2,11 @@
#ifndef _BCACHE_FEATURES_H
#define _BCACHE_FEATURES_H
-#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "bcache_ondisk.h"
+
#define BCH_FEATURE_COMPAT 0
#define BCH_FEATURE_RO_COMPAT 1
#define BCH_FEATURE_INCOMPAT 2
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e4388fe3ab7e..9c6f9ec55b72 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -65,15 +65,15 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
* we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
*/
if (bio->bi_opf & REQ_RAHEAD) {
- pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
- dc->backing_dev_name);
+ pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
+ dc->bdev);
return;
}
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
- pr_err("%s: IO error on backing device, unrecoverable\n",
- dc->backing_dev_name);
+ pr_err("%pg: IO error on backing device, unrecoverable\n",
+ dc->bdev);
else
bch_cached_dev_error(dc);
}
@@ -123,13 +123,13 @@ void bch_count_io_errors(struct cache *ca,
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s%s\n",
- ca->cache_dev_name, m,
+ pr_err("%pg: IO error on %s%s\n",
+ ca->bdev, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
- "%s: too many IO errors %s\n",
- ca->cache_dev_name, m);
+ "%pg: too many IO errors %s\n",
+ ca->bdev, m);
}
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 6d1de889baeb..d15aae6c51c1 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -46,7 +46,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
- csum = bch_crc64_update(csum, d, bv.bv_len);
+ csum = crc64_be(csum, d, bv.bv_len);
kunmap(bv.bv_page);
}
@@ -651,8 +651,8 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- pr_err("Can't flush %s: returned bi_status %i\n",
- dc->backing_dev_name, bio->bi_status);
+ pr_err("Can't flush %pg: returned bi_status %i\n",
+ dc->bdev, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;
@@ -1163,7 +1163,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_submit_bio(struct bio *bio)
+void cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct block_device *orig_bdev = bio->bi_bdev;
@@ -1176,7 +1176,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
dc->io_disable)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (likely(d->c)) {
@@ -1222,8 +1222,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
} else
/* I/O request sent to backing device */
detached_dev_do_request(d, bio, orig_bdev, start_time);
-
- return BLK_QC_T_NONE;
}
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1273,7 +1271,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-blk_qc_t flash_dev_submit_bio(struct bio *bio)
+void flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
@@ -1282,7 +1280,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
@@ -1298,7 +1296,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
- return BLK_QC_T_NONE;
+ return;
} else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1314,7 +1312,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
}
continue_at(cl, search_free, NULL);
- return BLK_QC_T_NONE;
}
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 82b38366a95d..38ab4856eaab 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
-blk_qc_t cached_dev_submit_bio(struct bio *bio);
+void cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
-blk_qc_t flash_dev_submit_bio(struct bio *bio);
+void flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f2874c77ff79..4a9a65dff95e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1002,7 +1002,7 @@ static void calc_cached_dev_sectors(struct cache_set *c)
struct cached_dev *dc;
list_for_each_entry(dc, &c->cached_devs, list)
- sectors += bdev_sectors(dc->bdev);
+ sectors += bdev_nr_sectors(dc->bdev);
c->cached_dev_sectors = sectors;
}
@@ -1026,8 +1026,8 @@ static int cached_dev_status_update(void *arg)
dc->offline_seconds = 0;
if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
- pr_err("%s: device offline for %d seconds\n",
- dc->backing_dev_name,
+ pr_err("%pg: device offline for %d seconds\n",
+ dc->bdev,
BACKING_DEV_OFFLINE_TIMEOUT);
pr_err("%s: disable I/O request due to backing device offline\n",
dc->disk.name);
@@ -1058,15 +1058,13 @@ int bch_cached_dev_run(struct cached_dev *dc)
};
if (dc->io_disable) {
- pr_err("I/O disabled on cached dev %s\n",
- dc->backing_dev_name);
+ pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
ret = -EIO;
goto out;
}
if (atomic_xchg(&dc->running, 1)) {
- pr_info("cached dev %s is running already\n",
- dc->backing_dev_name);
+ pr_info("cached dev %pg is running already\n", dc->bdev);
ret = -EBUSY;
goto out;
}
@@ -1082,7 +1080,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
closure_sync(&cl);
}
- add_disk(d->disk);
+ ret = add_disk(d->disk);
+ if (ret)
+ goto out;
bd_link_disk_holder(dc->bdev, dc->disk.disk);
/*
* won't show up in the uevent file, use udevadm monitor -e instead
@@ -1154,16 +1154,16 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
- calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
+ calc_cached_dev_sectors(dc->disk.c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
mutex_unlock(&bch_register_lock);
- pr_info("Caching disabled for %s\n", dc->backing_dev_name);
+ pr_info("Caching disabled for %pg\n", dc->bdev);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@@ -1203,29 +1203,27 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -ENOENT;
if (dc->disk.c) {
- pr_err("Can't attach %s: already attached\n",
- dc->backing_dev_name);
+ pr_err("Can't attach %pg: already attached\n", dc->bdev);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
- pr_err("Can't attach %s: shutting down\n",
- dc->backing_dev_name);
+ pr_err("Can't attach %pg: shutting down\n", dc->bdev);
return -EINVAL;
}
if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size\n",
- dc->backing_dev_name);
+ pr_err("Couldn't attach %pg: block size less than set's block size\n",
+ dc->bdev);
return -EINVAL;
}
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
- pr_err("Tried to attach %s but duplicate UUID already attached\n",
- dc->backing_dev_name);
+ pr_err("Tried to attach %pg but duplicate UUID already attached\n",
+ dc->bdev);
return -EINVAL;
}
@@ -1243,15 +1241,13 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- pr_err("Couldn't find uuid for %s in set\n",
- dc->backing_dev_name);
+ pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
- pr_err("Not caching %s, no room for UUID\n",
- dc->backing_dev_name);
+ pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
return -EINVAL;
}
}
@@ -1319,8 +1315,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
*/
kthread_stop(dc->writeback_thread);
cancel_writeback_rate_update_dwork(dc);
- pr_err("Couldn't run cached device %s\n",
- dc->backing_dev_name);
+ pr_err("Couldn't run cached device %pg\n", dc->bdev);
return ret;
}
@@ -1336,8 +1331,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
- pr_info("Caching %s as %s on set %pU\n",
- dc->backing_dev_name,
+ pr_info("Caching %pg as %s on set %pU\n",
+ dc->bdev,
dc->disk.disk->disk_name,
dc->disk.c->set_uuid);
return 0;
@@ -1461,7 +1456,6 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct cache_set *c;
int ret = -ENOMEM;
- bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
@@ -1476,7 +1470,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
- pr_info("registered backing device %s\n", dc->backing_dev_name);
+ pr_info("registered backing device %pg\n", dc->bdev);
list_add(&dc->list, &uncached_devices);
/* attach to a matched cache set if it exists */
@@ -1493,7 +1487,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
return 0;
err:
- pr_notice("error %s: %s\n", dc->backing_dev_name, err);
+ pr_notice("error %pg: %s\n", dc->bdev, err);
bcache_device_stop(&dc->disk);
return ret;
}
@@ -1534,10 +1528,11 @@ static void flash_dev_flush(struct closure *cl)
static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
+ int err = -ENOMEM;
struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ goto err_ret;
closure_init(&d->cl, NULL);
set_closure_fn(&d->cl, flash_dev_flush, system_wq);
@@ -1551,9 +1546,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_attach(d, c, u - c->uuids);
bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
- add_disk(d->disk);
+ err = add_disk(d->disk);
+ if (err)
+ goto err;
- if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
+ err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
+ if (err)
goto err;
bcache_device_link(d, c, "volume");
@@ -1567,7 +1565,8 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
return 0;
err:
kobject_put(&d->kobj);
- return -ENOMEM;
+err_ret:
+ return err;
}
static int flash_devs_run(struct cache_set *c)
@@ -1621,8 +1620,8 @@ bool bch_cached_dev_error(struct cached_dev *dc)
/* make others know io_disable is true earlier */
smp_mb();
- pr_err("stop %s: too many IO errors on backing device %s\n",
- dc->disk.disk->disk_name, dc->backing_dev_name);
+ pr_err("stop %s: too many IO errors on backing device %pg\n",
+ dc->disk.disk->disk_name, dc->bdev);
bcache_device_stop(&dc->disk);
return true;
@@ -2338,7 +2337,7 @@ err_btree_alloc:
err_free:
module_put(THIS_MODULE);
if (err)
- pr_notice("error %s: %s\n", ca->cache_dev_name, err);
+ pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@@ -2348,7 +2347,6 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
const char *err = NULL; /* must be set for any error case */
int ret = 0;
- bdevname(bdev, ca->cache_dev_name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@@ -2390,14 +2388,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %s\n", ca->cache_dev_name);
+ pr_info("registered cache device %pg\n", ca->bdev);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error %s: %s\n", ca->cache_dev_name, err);
+ pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@@ -2617,8 +2615,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
- if (!dc)
+ if (!dc) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
goto out_put_sb_page;
+ }
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_disk, bdev, dc);
@@ -2629,11 +2630,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca)
+ if (!ca) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
goto out_put_sb_page;
+ }
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ ret = register_cache(sb, sb_disk, bdev, ca);
+ if (ret)
goto out_free_sb;
}
@@ -2750,7 +2755,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
* The reason bch_register_lock is not held to call
* bch_cache_set_stop() and bcache_device_stop() is to
* avoid potential deadlock during reboot, because cache
- * set or bcache device stopping process will acqurie
+ * set or bcache device stopping process will acquire
* bch_register_lock too.
*
* We are safe here because bcache_is_reboot sets to
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 05ac1d6fbbf3..1f0dce30fa75 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -271,7 +271,7 @@ SHOW(__bch_cached_dev)
}
if (attr == &sysfs_backing_dev_name) {
- snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
+ snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev);
strcat(buf, "\n");
return strlen(buf);
}
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 215df32f567b..c1752ba2e05b 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -51,13 +51,27 @@ STORE(fn) \
#define sysfs_printf(file, fmt, ...) \
do { \
if (attr == &sysfs_ ## file) \
- return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \
+ return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \
} while (0)
#define sysfs_print(file, var) \
do { \
if (attr == &sysfs_ ## file) \
- return snprint(buf, PAGE_SIZE, var); \
+ return sysfs_emit(buf, \
+ __builtin_types_compatible_p(typeof(var), int) \
+ ? "%i\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned int) \
+ ? "%u\n" : \
+ __builtin_types_compatible_p(typeof(var), long) \
+ ? "%li\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned long)\
+ ? "%lu\n" : \
+ __builtin_types_compatible_p(typeof(var), int64_t) \
+ ? "%lli\n" : \
+ __builtin_types_compatible_p(typeof(var), uint64_t) \
+ ? "%llu\n" : \
+ __builtin_types_compatible_p(typeof(var), const char *) \
+ ? "%s\n" : "%i\n", var); \
} while (0)
#define sysfs_hprint(file, val) \
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index b64460a76267..6f3cb7c92130 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -340,23 +340,6 @@ static inline int bch_strtoul_h(const char *cp, long *res)
_r; \
})
-#define snprint(buf, size, var) \
- snprintf(buf, size, \
- __builtin_types_compatible_p(typeof(var), int) \
- ? "%i\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned int) \
- ? "%u\n" : \
- __builtin_types_compatible_p(typeof(var), long) \
- ? "%li\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned long)\
- ? "%lu\n" : \
- __builtin_types_compatible_p(typeof(var), int64_t) \
- ? "%lli\n" : \
- __builtin_types_compatible_p(typeof(var), uint64_t) \
- ? "%llu\n" : \
- __builtin_types_compatible_p(typeof(var), const char *) \
- ? "%s\n" : "%i\n", var)
-
ssize_t bch_hprint(char *buf, int64_t v);
bool bch_is_zero(const char *p, size_t n);
@@ -548,14 +531,6 @@ static inline uint64_t bch_crc64(const void *p, size_t len)
return crc ^ 0xffffffffffffffffULL;
}
-static inline uint64_t bch_crc64_update(uint64_t crc,
- const void *p,
- size_t len)
-{
- crc = crc64_be(crc, p, len);
- return crc;
-}
-
/*
* A stepwise-linear pseudo-exponential. This returns 1 << (x >>
* frac_bits), with the less-significant bits filled in by linear
@@ -584,8 +559,4 @@ static inline unsigned int fract_exp_two(unsigned int x,
void bch_bio_map(struct bio *bio, void *base);
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
-static inline sector_t bdev_sectors(struct block_device *bdev)
-{
- return bdev->bd_inode->i_size >> 9;
-}
#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 8120da278161..c7560f66dca8 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -45,7 +45,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* backing volume uses about 2% of the cache for dirty data.
*/
uint32_t bdev_share =
- div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
c->cached_dev_sectors);
uint64_t cache_dirty_target =
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index a3b71350eec8..745e3ab4aa0a 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -8,6 +8,7 @@
#define DM_BIO_RECORD_H
#include <linux/bio.h>
+#include <linux/blk-integrity.h>
/*
* There are lots of mutable fields in the bio struct that get
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 50f3e673729c..104ebc1f08dc 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1525,7 +1525,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
- sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t s = bdev_nr_sectors(c->bdev);
if (s >= c->start)
s -= c->start;
else
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 89a73204dbf4..2874f222c313 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -334,7 +334,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
int r;
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
/* FIXME: see if we can lose the max sectors limit */
if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index bdd500447dea..447d030036d1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1940,7 +1940,7 @@ static void cache_dtr(struct dm_target *ti)
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index edd22e4d65df..4599632d7a84 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1514,7 +1514,7 @@ error:
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
/*---------------------------------------------------------------------------*/
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 55dccdfbcb22..b855fef4f38a 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -13,7 +13,7 @@
#include <linux/ktime.h>
#include <linux/genhd.h>
#include <linux/blk-mq.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <trace/events/block.h>
@@ -200,7 +200,7 @@ struct dm_table {
struct dm_md_mempools *mempools;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- struct blk_keyslot_manager *ksm;
+ struct blk_crypto_profile *crypto_profile;
#endif
};
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 916b7da16de2..292f7896f733 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -15,6 +15,7 @@
#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 3163e2b1418e..03672204b0e3 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -415,7 +415,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
struct dust_device *dd = ti->private;
- sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t size = bdev_nr_sectors(dd->dev->bdev);
bool invalid_msg = false;
int r = -EINVAL;
unsigned long long tmp, block;
@@ -544,8 +544,7 @@ static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (dd->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (dd->start || ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index d25989660a76..7ce5d509b940 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -416,7 +416,7 @@ static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
* Only pass ioctls through if the device sizes match exactly.
*/
*bdev = dev->bdev;
- return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+ return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
}
static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 2a78f6874143..1f6bf152b3c7 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1681,7 +1681,7 @@ static int era_message(struct dm_target *ti, unsigned argc, char **argv,
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
static int era_iterate_devices(struct dm_target *ti,
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 3f4139ac1f60..b5f20eba3641 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,7 +168,7 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
*/
static inline sector_t get_dev_size(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(bdev);
}
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 4b94ffe6f2d4..345229d7e59c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -456,8 +456,7 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (fc->start ||
- ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
return 1;
return 0;
}
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 2c5edfbd7711..957999998d70 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -12,6 +12,7 @@
#include "dm-ima.h"
#include <linux/ima.h>
+#include <linux/sched/mm.h>
#include <crypto/hash.h>
#include <linux/crypto.h>
#include <crypto/hash_info.h>
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index dc03b70f6e65..d0f788e72abf 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4113,11 +4113,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
- ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
if (!ic->meta_dev)
ic->meta_device_sectors = ic->data_device_sectors;
else
- ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
if (!journal_sectors) {
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
@@ -4367,7 +4367,7 @@ try_smaller_buffer:
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
- DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
+ DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 679b4c0a2eea..66ba16713f69 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -135,8 +135,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (lc->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (lc->start || ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index d93a4db23512..46de085a9670 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -446,7 +446,7 @@ static int log_super(struct log_writes_c *lc)
static inline sector_t logdev_last_sector(struct log_writes_c *lc)
{
- return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(lc->logdev->bdev);
}
static int log_writes_kthread(void *arg)
@@ -851,7 +851,7 @@ static int log_writes_prepare_ioctl(struct dm_target *ti,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 1ecf75ef276a..06f328928a7f 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -447,7 +447,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
bdev_logical_block_size(lc->header_location.
bdev));
- if (buf_size > i_size_read(dev->bdev->bd_inode)) {
+ if (buf_size > bdev_nr_bytes(dev->bdev)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 694aaca4eea2..90dc9cc48881 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -530,7 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
bdev = pgpath->path.dev->bdev;
q = bdev_get_queue(bdev);
- clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
+ clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
BLK_MQ_REQ_NOWAIT);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
@@ -579,7 +579,7 @@ static void multipath_release_clone(struct request *clone,
clone->io_start_time_ns);
}
- blk_put_request(clone);
+ blk_mq_free_request(clone);
}
/*
@@ -2061,7 +2061,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ if (!r && ti->len != bdev_nr_sectors((*bdev)))
return 1;
return r;
}
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 1856a1b125cc..875bca30a0dd 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -27,6 +27,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/sched/clock.h>
#define DM_MSG_PREFIX "multipath historical-service-time"
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index d9ef52159a22..2b26435a6946 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1261,7 +1261,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
md_rdev_init(jdev);
jdev->mddev = &rs->md;
jdev->bdev = rs->journal_dev.dev->bdev;
- jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
+ jdev->sectors = bdev_nr_sectors(jdev->bdev);
if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
rs->ti->error = "No space for raid4/5/6 journal";
return -ENOSPC;
@@ -1607,7 +1607,7 @@ static int _check_data_dev_sectors(struct raid_set *rs)
rdev_for_each(rdev, &rs->md)
if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
- ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ ds = min(ds, bdev_nr_sectors(rdev->bdev));
if (ds < rs->md.dev_sectors) {
rs->ti->error = "Component device(s) too small";
return -EINVAL;
@@ -2662,7 +2662,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
+ bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a896dea9750e..579ab6183d4d 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -7,7 +7,6 @@
#include "dm-core.h"
#include "dm-rq.h"
-#include <linux/elevator.h> /* for rq_end_sector() */
#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "core-rq"
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 028a92ff6d57..534dc2ca8bb0 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -529,7 +529,7 @@ static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
* Only pass ioctls through if the device sizes match exactly.
*/
if (ti->len + sctx->path_list[path_nr].start !=
- i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ bdev_nr_sectors((*bdev)))
return 1;
return 0;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2111daaacaba..bcddc5effd15 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -169,7 +170,7 @@ static void free_devices(struct list_head *devices, struct mapped_device *md)
}
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t);
+static void dm_table_destroy_crypto_profile(struct dm_table *t);
void dm_table_destroy(struct dm_table *t)
{
@@ -199,7 +200,7 @@ void dm_table_destroy(struct dm_table *t)
dm_free_md_mempools(t->mempools);
- dm_table_destroy_keyslot_manager(t);
+ dm_table_destroy_crypto_profile(t);
kfree(t);
}
@@ -226,8 +227,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
- sector_t dev_size =
- i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t dev_size = bdev_nr_sectors(bdev);
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
@@ -1186,8 +1186,8 @@ static int dm_table_register_integrity(struct dm_table *t)
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-struct dm_keyslot_manager {
- struct blk_keyslot_manager ksm;
+struct dm_crypto_profile {
+ struct blk_crypto_profile profile;
struct mapped_device *md;
};
@@ -1213,13 +1213,11 @@ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
* When an inline encryption key is evicted from a device-mapper device, evict
* it from all the underlying devices.
*/
-static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int dm_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key, unsigned int slot)
{
- struct dm_keyslot_manager *dksm = container_of(ksm,
- struct dm_keyslot_manager,
- ksm);
- struct mapped_device *md = dksm->md;
+ struct mapped_device *md =
+ container_of(profile, struct dm_crypto_profile, profile)->md;
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
@@ -1239,150 +1237,148 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
return args.err;
}
-static const struct blk_ksm_ll_ops dm_ksm_ll_ops = {
- .keyslot_evict = dm_keyslot_evict,
-};
-
-static int device_intersect_crypto_modes(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
+static int
+device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct blk_keyslot_manager *parent = data;
- struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
+ struct blk_crypto_profile *parent = data;
+ struct blk_crypto_profile *child =
+ bdev_get_queue(dev->bdev)->crypto_profile;
- blk_ksm_intersect_modes(parent, child);
+ blk_crypto_intersect_capabilities(parent, child);
return 0;
}
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
- struct dm_keyslot_manager *dksm = container_of(ksm,
- struct dm_keyslot_manager,
- ksm);
+ struct dm_crypto_profile *dmcp = container_of(profile,
+ struct dm_crypto_profile,
+ profile);
- if (!ksm)
+ if (!profile)
return;
- blk_ksm_destroy(ksm);
- kfree(dksm);
+ blk_crypto_profile_destroy(profile);
+ kfree(dmcp);
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
- dm_destroy_keyslot_manager(t->ksm);
- t->ksm = NULL;
+ dm_destroy_crypto_profile(t->crypto_profile);
+ t->crypto_profile = NULL;
}
/*
- * Constructs and initializes t->ksm with a keyslot manager that
- * represents the common set of crypto capabilities of the devices
- * described by the dm_table. However, if the constructed keyslot
- * manager does not support a superset of the crypto capabilities
- * supported by the current keyslot manager of the mapped_device,
- * it returns an error instead, since we don't support restricting
- * crypto capabilities on table changes. Finally, if the constructed
- * keyslot manager doesn't actually support any crypto modes at all,
- * it just returns NULL.
+ * Constructs and initializes t->crypto_profile with a crypto profile that
+ * represents the common set of crypto capabilities of the devices described by
+ * the dm_table. However, if the constructed crypto profile doesn't support all
+ * crypto capabilities that are supported by the current mapped_device, it
+ * returns an error instead, since we don't support removing crypto capabilities
+ * on table changes. Finally, if the constructed crypto profile is "empty" (has
+ * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
*/
-static int dm_table_construct_keyslot_manager(struct dm_table *t)
+static int dm_table_construct_crypto_profile(struct dm_table *t)
{
- struct dm_keyslot_manager *dksm;
- struct blk_keyslot_manager *ksm;
+ struct dm_crypto_profile *dmcp;
+ struct blk_crypto_profile *profile;
struct dm_target *ti;
unsigned int i;
- bool ksm_is_empty = true;
+ bool empty_profile = true;
- dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
- if (!dksm)
+ dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
+ if (!dmcp)
return -ENOMEM;
- dksm->md = t->md;
+ dmcp->md = t->md;
- ksm = &dksm->ksm;
- blk_ksm_init_passthrough(ksm);
- ksm->ksm_ll_ops = dm_ksm_ll_ops;
- ksm->max_dun_bytes_supported = UINT_MAX;
- memset(ksm->crypto_modes_supported, 0xFF,
- sizeof(ksm->crypto_modes_supported));
+ profile = &dmcp->profile;
+ blk_crypto_profile_init(profile, 0);
+ profile->ll_ops.keyslot_evict = dm_keyslot_evict;
+ profile->max_dun_bytes_supported = UINT_MAX;
+ memset(profile->modes_supported, 0xFF,
+ sizeof(profile->modes_supported));
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
- blk_ksm_intersect_modes(ksm, NULL);
+ blk_crypto_intersect_capabilities(profile, NULL);
break;
}
if (!ti->type->iterate_devices)
continue;
- ti->type->iterate_devices(ti, device_intersect_crypto_modes,
- ksm);
+ ti->type->iterate_devices(ti,
+ device_intersect_crypto_capabilities,
+ profile);
}
- if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
+ if (t->md->queue &&
+ !blk_crypto_has_capabilities(profile,
+ t->md->queue->crypto_profile)) {
DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
- dm_destroy_keyslot_manager(ksm);
+ dm_destroy_crypto_profile(profile);
return -EINVAL;
}
/*
- * If the new KSM doesn't actually support any crypto modes, we may as
- * well represent it with a NULL ksm.
+ * If the new profile doesn't actually support any crypto capabilities,
+ * we may as well represent it with a NULL profile.
*/
- ksm_is_empty = true;
- for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
- if (ksm->crypto_modes_supported[i]) {
- ksm_is_empty = false;
+ for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
+ if (profile->modes_supported[i]) {
+ empty_profile = false;
break;
}
}
- if (ksm_is_empty) {
- dm_destroy_keyslot_manager(ksm);
- ksm = NULL;
+ if (empty_profile) {
+ dm_destroy_crypto_profile(profile);
+ profile = NULL;
}
/*
- * t->ksm is only set temporarily while the table is being set
- * up, and it gets set to NULL after the capabilities have
- * been transferred to the request_queue.
+ * t->crypto_profile is only set temporarily while the table is being
+ * set up, and it gets set to NULL after the profile has been
+ * transferred to the request_queue.
*/
- t->ksm = ksm;
+ t->crypto_profile = profile;
return 0;
}
-static void dm_update_keyslot_manager(struct request_queue *q,
- struct dm_table *t)
+static void dm_update_crypto_profile(struct request_queue *q,
+ struct dm_table *t)
{
- if (!t->ksm)
+ if (!t->crypto_profile)
return;
- /* Make the ksm less restrictive */
- if (!q->ksm) {
- blk_ksm_register(t->ksm, q);
+ /* Make the crypto profile less restrictive. */
+ if (!q->crypto_profile) {
+ blk_crypto_register(t->crypto_profile, q);
} else {
- blk_ksm_update_capabilities(q->ksm, t->ksm);
- dm_destroy_keyslot_manager(t->ksm);
+ blk_crypto_update_capabilities(q->crypto_profile,
+ t->crypto_profile);
+ dm_destroy_crypto_profile(t->crypto_profile);
}
- t->ksm = NULL;
+ t->crypto_profile = NULL;
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static int dm_table_construct_keyslot_manager(struct dm_table *t)
+static int dm_table_construct_crypto_profile(struct dm_table *t)
{
return 0;
}
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
}
-static void dm_update_keyslot_manager(struct request_queue *q,
- struct dm_table *t)
+static void dm_update_crypto_profile(struct request_queue *q,
+ struct dm_table *t)
{
}
@@ -1414,9 +1410,9 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_construct_keyslot_manager(t);
+ r = dm_table_construct_crypto_profile(t);
if (r) {
- DMERR("could not construct keyslot manager.");
+ DMERR("could not construct crypto profile.");
return r;
}
@@ -2070,7 +2066,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return r;
}
- dm_update_keyslot_manager(q, t);
+ dm_update_crypto_profile(q, t);
disk_update_readahead(t->md->disk);
return 0;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index c88ed14d49e6..1a96a07cbf44 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -549,7 +549,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
- sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
if (bdev_size > THIN_METADATA_MAX_SECTORS)
bdev_size = THIN_METADATA_MAX_SECTORS;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4c67b77c23c1..ec119d2422d5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3212,7 +3212,7 @@ static int metadata_pre_commit_callback(void *context)
static sector_t get_dev_size(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(bdev);
}
static void warn_if_metadata_device_too_big(struct block_device *bdev)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 88288c8d6bc8..a7efe83aad29 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -18,6 +18,7 @@
#include "dm-verity-verify-sig.h"
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/scatterlist.h>
#define DM_MSG_PREFIX "verity"
@@ -833,8 +834,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
*bdev = v->data_dev->bdev;
- if (v->data_start ||
- ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 18320444fb0a..017806096b91 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -2341,7 +2341,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Cache data device lookup failed";
goto bad;
}
- wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
+ wc->memory_map_size = bdev_nr_bytes(wc->ssd_dev->bdev);
/*
* Parse the cache block size
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index ae1bc48c0043..8dc21c09329f 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -733,7 +733,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
- dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ dev->capacity = bdev_nr_sectors(bdev);
if (ti->begin) {
ti->error = "Partial mapping is not supported";
goto err;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 76d9da49fda7..63aa52263658 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -29,7 +29,7 @@
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#define DM_MSG_PREFIX "core"
@@ -1183,14 +1183,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
-static blk_qc_t __map_bio(struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
- blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1226,7 +1225,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
- ret = submit_bio_noacct(clone);
+ submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
if (unlikely(swap_bios_limit(ti, clone))) {
@@ -1248,8 +1247,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-
- return ret;
}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
@@ -1336,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
}
-static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target_io *tio, unsigned *len)
{
struct bio *clone = &tio->clone;
@@ -1346,8 +1343,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
__bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, *len);
-
- return __map_bio(tio);
+ __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
@@ -1361,7 +1357,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
while ((bio = bio_list_pop(&blist))) {
tio = container_of(bio, struct dm_target_io, clone);
- (void) __clone_and_map_simple_bio(ci, tio, len);
+ __clone_and_map_simple_bio(ci, tio, len);
}
}
@@ -1405,7 +1401,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
free_tio(tio);
return r;
}
- (void) __map_bio(tio);
+ __map_bio(tio);
return 0;
}
@@ -1520,11 +1516,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+static void __split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
- blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
@@ -1567,19 +1562,17 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio);
trace_block_split(b, bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
+ submit_bio_noacct(bio);
}
}
/* drop the extra reference count */
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
- return ret;
}
-static blk_qc_t dm_submit_bio(struct bio *bio)
+static void dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
- blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@@ -1609,10 +1602,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
if (is_abnormal_io(bio))
blk_queue_split(&bio);
- ret = __split_and_process_bio(md, map, bio);
+ __split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
- return ret;
}
/*-----------------------------------------------------------------
@@ -1671,14 +1663,14 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+static void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
- dm_destroy_keyslot_manager(q->ksm);
+ dm_destroy_crypto_profile(q->crypto_profile);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
@@ -1704,7 +1696,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
dm_sysfs_exit(md);
del_gendisk(md->disk);
}
- dm_queue_destroy_keyslot_manager(md->queue);
+ dm_queue_destroy_crypto_profile(md->queue);
blk_cleanup_disk(md->disk);
}
@@ -2086,7 +2078,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (r)
return r;
- add_disk(md->disk);
+ r = add_disk(md->disk);
+ if (r)
+ return r;
r = dm_sysfs_init(md);
if (r) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6c0c3d0d905a..5111ed966947 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -41,6 +41,7 @@
#include <linux/sched/signal.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/badblocks.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
@@ -51,6 +52,7 @@
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
+#include <linux/major.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
@@ -352,7 +354,7 @@ static bool create_on_open = true;
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
-void md_new_event(struct mddev *mddev)
+void md_new_event(void)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
@@ -441,19 +443,19 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_submit_bio(struct bio *bio)
+static void md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return;
}
blk_queue_split(&bio);
@@ -462,15 +464,13 @@ static blk_qc_t md_submit_bio(struct bio *bio)
if (bio_sectors(bio) != 0)
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
md_handle_request(mddev, bio);
-
- return BLK_QC_T_NONE;
}
/* mddev_suspend makes sure no new requests are submitted
@@ -888,8 +888,7 @@ static struct md_personality *find_pers(int level, char *clevel)
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
- sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
- return MD_NEW_SIZE_SECTORS(num_sectors);
+ return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
}
static int alloc_disk_sb(struct md_rdev *rdev)
@@ -1631,8 +1630,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/
switch(minor_version) {
case 0:
- sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
- sb_start -= 8*2;
+ sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
@@ -1787,10 +1785,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
else
ret = 0;
}
- if (minor_version) {
- sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
- sectors -= rdev->data_offset;
- } else
+ if (minor_version)
+ sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
+ else
sectors = rdev->sb_start;
if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
@@ -2168,8 +2165,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
- max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
- max_sectors -= rdev->data_offset;
+ max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
@@ -2178,7 +2174,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
} else {
/* minor version 0; superblock after data */
sector_t sb_start, bm_space;
- sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
+ sector_t dev_size = bdev_nr_sectors(rdev->bdev);
/* 8K is for superblock */
sb_start = dev_size - 8*2;
@@ -2886,7 +2882,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_new_event(mddev);
+ md_new_event();
md_wakeup_thread(mddev->thread);
return 0;
}
@@ -2976,7 +2972,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* -write_error - clears WriteErrorSeen
* {,-}failfast - set/clear FailFast
*/
+
+ struct mddev *mddev = rdev->mddev;
int err = -EINVAL;
+ bool need_update_sb = false;
+
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
@@ -2991,7 +2991,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
- struct mddev *mddev = rdev->mddev;
err = 0;
if (mddev_is_clustered(mddev))
err = md_cluster_ops->remove_disk(mddev, rdev);
@@ -3002,16 +3001,18 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
- md_new_event(mddev);
+ md_new_event();
}
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
mddev_create_serial_pool(rdev->mddev, rdev, false);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
@@ -3037,9 +3038,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "failfast")) {
set_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-failfast")) {
clear_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
@@ -3118,6 +3121,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(ExternalBbl, &rdev->flags);
err = 0;
}
+ if (need_update_sb)
+ md_update_sb(mddev, 1);
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
@@ -3382,7 +3387,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
if (!sectors)
return -EBUSY;
} else if (!sectors)
- sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
+ sectors = bdev_nr_sectors(rdev->bdev) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
@@ -3709,7 +3714,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
kobject_init(&rdev->kobj, &rdev_ktype);
- size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
+ size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
if (!size) {
pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
@@ -4099,7 +4104,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify_dirent_safe(mddev->sysfs_level);
- md_new_event(mddev);
+ md_new_event();
rv = len;
out_unlock:
mddev_unlock(mddev);
@@ -4620,7 +4625,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
export_rdev(rdev);
mddev_unlock(mddev);
if (!err)
- md_new_event(mddev);
+ md_new_event();
return err ? err : len;
}
@@ -5490,6 +5495,10 @@ static struct attribute *md_default_attrs[] = {
NULL,
};
+static const struct attribute_group md_default_group = {
+ .attrs = md_default_attrs,
+};
+
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
@@ -5512,6 +5521,12 @@ static const struct attribute_group md_redundancy_group = {
.attrs = md_redundancy_attrs,
};
+static const struct attribute_group *md_attr_groups[] = {
+ &md_default_group,
+ &md_bitmap_group,
+ NULL,
+};
+
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@@ -5587,7 +5602,7 @@ static const struct sysfs_ops md_sysfs_ops = {
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
- .default_attrs = md_default_attrs,
+ .default_groups = md_attr_groups,
};
int mdp_major = 0;
@@ -5596,7 +5611,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -5663,7 +5677,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
- goto abort;
+ goto out_unlock_disks_mutex;
}
spin_unlock(&all_mddevs_lock);
}
@@ -5676,7 +5690,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto abort;
+ goto out_unlock_disks_mutex;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5700,27 +5714,25 @@ static int md_alloc(dev_t dev, char *name)
disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
- add_disk(disk);
+ error = add_disk(disk);
+ if (error)
+ goto out_cleanup_disk;
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
- if (error) {
- /* This isn't possible, but as kobject_init_and_add is marked
- * __must_check, we must do something with the result
- */
- pr_debug("md: cannot register %s/md - name in use\n",
- disk->disk_name);
- error = 0;
- }
- if (mddev->kobj.sd &&
- sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- pr_debug("pointless warning\n");
- abort:
+ if (error)
+ goto out_del_gendisk;
+
+ kobject_uevent(&mddev->kobj, KOBJ_ADD);
+ mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
+ mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
+ goto out_unlock_disks_mutex;
+
+out_del_gendisk:
+ del_gendisk(disk);
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out_unlock_disks_mutex:
mutex_unlock(&disks_mutex);
- if (!error && mddev->kobj.sd) {
- kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
- mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
- }
mddev_put(mddev);
return error;
}
@@ -6034,7 +6046,7 @@ int md_run(struct mddev *mddev)
if (mddev->sb_flags)
md_update_sb(mddev, 0);
- md_new_event(mddev);
+ md_new_event();
return 0;
bitmap_abort:
@@ -6424,7 +6436,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
- md_new_event(mddev);
+ md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@@ -6880,7 +6892,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (!mddev->persistent) {
pr_debug("md: nonpersistent superblock ...\n");
- rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ rdev->sb_start = bdev_nr_sectors(rdev->bdev);
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
@@ -6928,7 +6940,7 @@ kick_rdev:
md_wakeup_thread(mddev->thread);
else
md_update_sb(mddev, 1);
- md_new_event(mddev);
+ md_new_event();
return 0;
busy:
@@ -6967,7 +6979,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
- rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ rdev->sb_start = bdev_nr_sectors(rdev->bdev);
rdev->sectors = rdev->sb_start;
@@ -7001,7 +7013,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
abort_export:
@@ -7975,7 +7987,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
- md_new_event(mddev);
+ md_new_event();
}
EXPORT_SYMBOL(md_error);
@@ -8859,7 +8871,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
- md_new_event(mddev);
+ md_new_event();
update_time = jiffies;
blk_start_plug(&plug);
@@ -8930,7 +8942,7 @@ void md_do_sync(struct md_thread *thread)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
- md_new_event(mddev);
+ md_new_event();
if (last_check + window > io_sectors || j == max_sectors)
continue;
@@ -9154,7 +9166,7 @@ static int remove_and_add_spares(struct mddev *mddev,
sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
- md_new_event(mddev);
+ md_new_event();
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@ -9188,7 +9200,7 @@ static void md_start_sync(struct work_struct *ws)
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ md_new_event();
}
/*
@@ -9447,7 +9459,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ md_new_event();
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4c96c36bd01a..53ea7a6961de 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,7 +731,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
-extern void md_new_event(struct mddev *mddev);
+extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 19598bd38939..7dc8026cf6ee 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1496,7 +1496,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!r1_bio->bios[i])
continue;
- if (first_clone) {
+ if (first_clone && test_bit(WriteMostly, &rdev->flags)) {
/* do behind I/O ?
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
@@ -1529,13 +1529,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->bios[i] = mbio;
- mbio->bi_iter.bi_sector = (r1_bio->sector +
- conf->mirrors[i].rdev->data_offset);
- bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
+ mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
+ bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
- if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
- !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ if (test_bit(FailFast, &rdev->flags) &&
+ !test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
@@ -1546,7 +1545,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
+ mbio->bi_bdev = (void *)rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index aa2636582841..dde98f65bd04 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4647,7 +4647,7 @@ out:
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
abort:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 02ed53b20654..9c1a5877cf9f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7732,10 +7732,7 @@ static int raid5_run(struct mddev *mddev)
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
- /* Round up to power of 2, as discard handling
- * currently assumes that */
- while ((stripe-1) & stripe)
- stripe = (stripe | (stripe-1)) + 1;
+ stripe = roundup_pow_of_two(stripe);
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
@@ -8282,7 +8279,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
}
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index 9ba3a00dce31..94ef3349b8d6 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -8,6 +8,8 @@ config CEC_NOTIFIER
config CEC_PIN
bool
+menu "CEC support"
+
config MEDIA_CEC_RC
bool "HDMI CEC RC integration"
depends on CEC_CORE && RC_CORE
@@ -37,3 +39,5 @@ source "drivers/media/cec/i2c/Kconfig"
source "drivers/media/cec/platform/Kconfig"
source "drivers/media/cec/usb/Kconfig"
endif
+
+endmenu
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 8c613aa649c6..a60b6f03a6a1 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -957,7 +957,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
* so we can kick off the pending transmit.
*/
delta = ktime_us_delta(ts, pin->ts);
- if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time) {
pin->tx_nacked = false;
if (tx_custom_start(pin))
@@ -968,7 +968,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
cec_pin_low(pin);
break;
}
- if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time - 1)
pin->state = CEC_ST_TX_WAIT;
break;
diff --git a/drivers/media/cec/platform/meson/ao-cec-g12a.c b/drivers/media/cec/platform/meson/ao-cec-g12a.c
index 891533060d49..68fe6d6a8178 100644
--- a/drivers/media/cec/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/cec/platform/meson/ao-cec-g12a.c
@@ -633,7 +633,6 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
{
struct meson_ao_cec_g12a_device *ao_cec;
struct device *hdmi_dev;
- struct resource *res;
void __iomem *base;
int ret, irq;
@@ -664,8 +663,7 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
ao_cec->adap->owner = THIS_MODULE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_probe_adapter;
diff --git a/drivers/media/cec/platform/meson/ao-cec.c b/drivers/media/cec/platform/meson/ao-cec.c
index 09aff82c3773..6b440f0635d9 100644
--- a/drivers/media/cec/platform/meson/ao-cec.c
+++ b/drivers/media/cec/platform/meson/ao-cec.c
@@ -602,7 +602,6 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
{
struct meson_ao_cec_device *ao_cec;
struct device *hdmi_dev;
- struct resource *res;
int ret, irq;
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
@@ -626,8 +625,7 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->adap->owner = THIS_MODULE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
+ ao_cec->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
goto out_probe_adapter;
diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
index 028a09a7531e..ce9a9d922f11 100644
--- a/drivers/media/cec/platform/s5p/s5p_cec.c
+++ b/drivers/media/cec/platform/s5p/s5p_cec.c
@@ -178,7 +178,6 @@ static int s5p_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *hdmi_dev;
- struct resource *res;
struct s5p_cec_dev *cec;
bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
int ret;
@@ -212,8 +211,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
if (IS_ERR(cec->pmu))
return -EPROBE_DEFER;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->reg = devm_ioremap_resource(dev, res);
+ cec->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->reg))
return PTR_ERR(cec->reg);
diff --git a/drivers/media/cec/platform/sti/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c
index f0c73e64b586..abf8e8bcbb34 100644
--- a/drivers/media/cec/platform/sti/stih-cec.c
+++ b/drivers/media/cec/platform/sti/stih-cec.c
@@ -299,7 +299,6 @@ static const struct cec_adap_ops sti_cec_adap_ops = {
static int stih_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct stih_cec *cec;
struct device *hdmi_dev;
int ret;
@@ -315,8 +314,7 @@ static int stih_cec_probe(struct platform_device *pdev)
cec->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->regs = devm_ioremap_resource(dev, res);
+ cec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->regs))
return PTR_ERR(cec->regs);
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index 0ffd89712536..40db7911b437 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -255,7 +255,6 @@ static const struct regmap_config stm32_cec_regmap_cfg = {
static int stm32_cec_probe(struct platform_device *pdev)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL;
- struct resource *res;
struct stm32_cec *cec;
void __iomem *mmio;
int ret;
@@ -266,8 +265,7 @@ static int stm32_cec_probe(struct platform_device *pdev)
cec->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(&pdev->dev, res);
+ mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index bceaf91faa15..7d4bc2733f2b 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -414,10 +414,10 @@ struct smscore_registry_entry_t {
static struct list_head g_smscore_notifyees;
static struct list_head g_smscore_devices;
-static struct mutex g_smscore_deviceslock;
+static DEFINE_MUTEX(g_smscore_deviceslock);
static struct list_head g_smscore_registry;
-static struct mutex g_smscore_registrylock;
+static DEFINE_MUTEX(g_smscore_registrylock);
static int default_mode = DEVICE_MODE_NONE;
@@ -2119,10 +2119,7 @@ static int __init smscore_module_init(void)
{
INIT_LIST_HEAD(&g_smscore_notifyees);
INIT_LIST_HEAD(&g_smscore_devices);
- mutex_init(&g_smscore_deviceslock);
-
INIT_LIST_HEAD(&g_smscore_registry);
- mutex_init(&g_smscore_registrylock);
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 508ac295eb06..b203c1e26353 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -68,13 +68,13 @@ module_param(debug, int, 0644);
err; \
})
-#define call_ptr_memop(vb, op, args...) \
+#define call_ptr_memop(op, vb, args...) \
({ \
struct vb2_queue *_q = (vb)->vb2_queue; \
void *ptr; \
\
log_memop(vb, op); \
- ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \
if (!IS_ERR_OR_NULL(ptr)) \
(vb)->cnt_mem_ ## op++; \
ptr; \
@@ -144,9 +144,9 @@ module_param(debug, int, 0644);
((vb)->vb2_queue->mem_ops->op ? \
(vb)->vb2_queue->mem_ops->op(args) : 0)
-#define call_ptr_memop(vb, op, args...) \
+#define call_ptr_memop(op, vb, args...) \
((vb)->vb2_queue->mem_ops->op ? \
- (vb)->vb2_queue->mem_ops->op(args) : NULL)
+ (vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
#define call_void_memop(vb, op, args...) \
do { \
@@ -230,9 +230,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
if (size < vb->planes[plane].length)
goto free;
- mem_priv = call_ptr_memop(vb, alloc,
- q->alloc_devs[plane] ? : q->dev,
- q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+ mem_priv = call_ptr_memop(alloc,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ size);
if (IS_ERR_OR_NULL(mem_priv)) {
if (mem_priv)
ret = PTR_ERR(mem_priv);
@@ -326,12 +327,9 @@ static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
if (vb->synced)
return;
- if (vb->need_cache_sync_on_prepare) {
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, prepare,
- vb->planes[plane].mem_priv);
- }
vb->synced = 1;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
}
/*
@@ -345,12 +343,9 @@ static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
if (!vb->synced)
return;
- if (vb->need_cache_sync_on_finish) {
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, finish,
- vb->planes[plane].mem_priv);
- }
vb->synced = 0;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
}
/*
@@ -381,6 +376,27 @@ static void __setup_offsets(struct vb2_buffer *vb)
}
}
+static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ /*
+ * DMA exporter should take care of cache syncs, so we can avoid
+ * explicit ->prepare()/->finish() syncs. For other ->memory types
+ * we always need ->prepare() or/and ->finish() cache sync.
+ */
+ if (q->memory == VB2_MEMORY_DMABUF) {
+ vb->skip_cache_sync_on_finish = 1;
+ vb->skip_cache_sync_on_prepare = 1;
+ return;
+ }
+
+ /*
+ * ->finish() cache sync can be avoided when queue direction is
+ * TO_DEVICE.
+ */
+ if (q->dma_dir == DMA_TO_DEVICE)
+ vb->skip_cache_sync_on_finish = 1;
+}
+
/*
* __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
@@ -414,17 +430,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
- /*
- * We need to set these flags here so that the videobuf2 core
- * will call ->prepare()/->finish() cache sync/flush on vb2
- * buffers when appropriate. However, we can avoid explicit
- * ->prepare() and ->finish() cache sync for DMABUF buffers,
- * because DMA exporter takes care of it.
- */
- if (q->memory != VB2_MEMORY_DMABUF) {
- vb->need_cache_sync_on_prepare = 1;
- vb->need_cache_sync_on_finish = 1;
- }
+ init_buffer_cache_hints(q, vb);
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
@@ -732,11 +738,30 @@ int vb2_verify_memory_type(struct vb2_queue *q,
}
EXPORT_SYMBOL(vb2_verify_memory_type);
+static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
+{
+ q->non_coherent_mem = 0;
+
+ if (!vb2_queue_allows_cache_hints(q))
+ return;
+ q->non_coherent_mem = non_coherent_mem;
+}
+
+static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
+{
+ if (non_coherent_mem != q->non_coherent_mem) {
+ dprintk(q, 1, "memory coherency model mismatch\n");
+ return false;
+ }
+ return true;
+}
+
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count)
+ unsigned int flags, unsigned int *count)
{
unsigned int num_buffers, allocated_buffers, num_planes = 0;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
unsigned int i;
int ret;
@@ -751,7 +776,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
if (*count == 0 || q->num_buffers != 0 ||
- (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
+ (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
+ !verify_coherency_flags(q, non_coherent_mem)) {
/*
* We already have buffers allocated, so first check if they
* are not in use and can be freed.
@@ -788,6 +814,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
+ set_queue_coherency(q, non_coherent_mem);
/*
* Ask the driver how many buffers and planes per buffer it requires.
@@ -872,12 +899,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count,
+ unsigned int flags, unsigned int *count,
unsigned int requested_planes,
const unsigned int requested_sizes[])
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
int ret;
if (q->num_buffers == VB2_MAX_FRAME) {
@@ -893,11 +921,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
+ set_queue_coherency(q, non_coherent_mem);
} else {
if (q->memory != memory) {
dprintk(q, 1, "memory model mismatch\n");
return -EINVAL;
}
+ if (!verify_coherency_flags(q, non_coherent_mem))
+ return -EINVAL;
}
num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
@@ -975,7 +1006,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -985,7 +1016,7 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
@@ -1125,10 +1156,11 @@ static int __prepare_userptr(struct vb2_buffer *vb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, get_userptr,
- q->alloc_devs[plane] ? : q->dev,
- planes[plane].m.userptr,
- planes[plane].length, q->dma_dir);
+ mem_priv = call_ptr_memop(get_userptr,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].m.userptr,
+ planes[plane].length);
if (IS_ERR(mem_priv)) {
dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
plane);
@@ -1249,9 +1281,11 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, attach_dmabuf,
- q->alloc_devs[plane] ? : q->dev,
- dbuf, planes[plane].length, q->dma_dir);
+ mem_priv = call_ptr_memop(attach_dmabuf,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ dbuf,
+ planes[plane].length);
if (IS_ERR(mem_priv)) {
dprintk(q, 1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
@@ -1421,9 +1455,19 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
static void vb2_req_queue(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int err;
mutex_lock(vb->vb2_queue->lock);
- vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ /*
+ * There is no method to propagate an error from vb2_core_qbuf(),
+ * so if this returns a non-0 value, then WARN.
+ *
+ * The only exception is -EIO which is returned if q->error is
+ * set. We just ignore that, and expect this will be caught the
+ * next time vb2_req_prepare() is called.
+ */
+ err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ WARN_ON_ONCE(err && err != -EIO);
mutex_unlock(vb->vb2_queue->lock);
}
@@ -2187,8 +2231,10 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
vb_plane = &vb->planes[plane];
- dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
- flags & O_ACCMODE);
+ dbuf = call_ptr_memop(get_dmabuf,
+ vb,
+ vb_plane->mem_priv,
+ flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(q, 1, "failed to export buffer %d, plane %d\n",
index, plane);
@@ -2342,6 +2388,17 @@ int vb2_core_queue_init(struct vb2_queue *q)
if (WARN_ON(q->requires_requests && !q->supports_requests))
return -EINVAL;
+ /*
+ * This combination is not allowed since a non-zero value of
+ * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
+ * it has to call start_streaming(), and the Request API expects
+ * that queueing a request (and thus queueing a buffer contained
+ * in that request) will always succeed. There is no method of
+ * propagating an error back to userspace.
+ */
+ if (WARN_ON(q->supports_requests && q->min_buffers_needed))
+ return -EINVAL;
+
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2576,7 +2633,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
fileio->memory = VB2_MEMORY_MMAP;
fileio->type = q->type;
q->fileio = fileio;
- ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
if (ret)
goto err_kfree;
@@ -2633,7 +2690,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
err_reqbufs:
fileio->count = 0;
- vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
err_kfree:
q->fileio = NULL;
@@ -2653,7 +2710,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
vb2_core_streamoff(q, q->type);
q->fileio = NULL;
fileio->count = 0;
- vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
kfree(fileio);
dprintk(q, 3, "file io emulator closed\n");
}
@@ -2978,3 +3035,4 @@ EXPORT_SYMBOL_GPL(vb2_thread_stop);
MODULE_DESCRIPTION("Media buffer core framework");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index a7f61ba85440..556e42ba66e5 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
@@ -40,6 +41,9 @@ struct vb2_dc_buf {
/* DMABUF related */
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
+ bool non_coherent_mem;
};
/*********************************************/
@@ -66,24 +70,46 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
/* callbacks for all buffers */
/*********************************************/
-static void *vb2_dc_cookie(void *buf_priv)
+static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
return &buf->dma_addr;
}
-static void *vb2_dc_vaddr(void *buf_priv)
+/*
+ * This function may fail if:
+ *
+ * - dma_buf_vmap() fails
+ * E.g. due to lack of virtual mapping address space, or due to
+ * dmabuf->ops misconfiguration.
+ *
+ * - dma_vmap_noncontiguous() fails
+ * For instance, when requested buffer size is larger than totalram_pages().
+ * Relevant for buffers that use non-coherent memory.
+ *
+ * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
+ * Relevant for buffers that use coherent memory.
+ */
+static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- struct dma_buf_map map;
- int ret;
- if (!buf->vaddr && buf->db_attach) {
- ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
- buf->vaddr = ret ? NULL : map.vaddr;
+ if (buf->vaddr)
+ return buf->vaddr;
+
+ if (buf->db_attach) {
+ struct dma_buf_map map;
+
+ if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
+ buf->vaddr = map.vaddr;
+
+ return buf->vaddr;
}
+ if (buf->non_coherent_mem)
+ buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt);
return buf->vaddr;
}
@@ -99,10 +125,19 @@ static void vb2_dc_prepare(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- if (!sgt)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_prepare)
return;
+ if (!buf->non_coherent_mem)
+ return;
+
+ /* For both USERPTR and non-coherent MMAP */
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ flush_kernel_vmap_range(buf->vaddr, buf->size);
}
static void vb2_dc_finish(void *buf_priv)
@@ -110,10 +145,19 @@ static void vb2_dc_finish(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- if (!sgt)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
+ if (!buf->non_coherent_mem)
return;
+ /* For both USERPTR and non-coherent MMAP */
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ invalidate_kernel_vmap_range(buf->vaddr, buf->size);
}
/*********************************************/
@@ -127,21 +171,69 @@ static void vb2_dc_put(void *buf_priv)
if (!refcount_dec_and_test(&buf->refcount))
return;
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
+ if (buf->non_coherent_mem) {
+ if (buf->vaddr)
+ dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
+ dma_free_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt, buf->dma_dir);
+ } else {
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_attrs(buf->dev, buf->size, buf->cookie,
+ buf->dma_addr, buf->attrs);
}
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->cookie = dma_alloc_attrs(buf->dev,
+ buf->size,
+ &buf->dma_addr,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->cookie)
+ return -ENOMEM;
+
+ if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+ return 0;
+
+ buf->vaddr = buf->cookie;
+ return 0;
+}
+
+static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
+ buf->size,
+ buf->dma_dir,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->dma_sgt)
+ return -ENOMEM;
+
+ buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
+
+ /*
+ * For non-coherent buffers the kernel mapping is created on demand
+ * in vb2_dc_vaddr().
+ */
+ return 0;
+}
+
+static void *vb2_dc_alloc(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size)
{
struct vb2_dc_buf *buf;
+ int ret;
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
@@ -150,22 +242,25 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
+ buf->attrs = vb->vb2_queue->dma_attrs;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
+ buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
+ buf->size = size;
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
- buf->size = size;
- buf->dma_dir = dma_dir;
+
+ if (buf->non_coherent_mem)
+ ret = vb2_dc_alloc_non_coherent(buf);
+ else
+ ret = vb2_dc_alloc_coherent(buf);
+
+ if (ret) {
+ dev_err(dev, "dma alloc of size %ld failed\n", size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dc_put;
@@ -186,9 +281,12 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
-
+ if (buf->non_coherent_mem)
+ ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
+ buf->dma_sgt);
+ else
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
+ buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
return ret;
@@ -350,9 +448,15 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
- struct vb2_dc_buf *buf = dbuf->priv;
+ struct vb2_dc_buf *buf;
+ void *vaddr;
+
+ buf = dbuf->priv;
+ vaddr = vb2_dc_vaddr(buf->vb, buf);
+ if (!vaddr)
+ return -EINVAL;
- dma_buf_map_set_vaddr(map, buf->vaddr);
+ dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
@@ -380,6 +484,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
int ret;
struct sg_table *sgt;
+ if (buf->non_coherent_mem)
+ return buf->dma_sgt;
+
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
dev_err(buf->dev, "failed to alloc sg table\n");
@@ -397,7 +504,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -459,8 +568,8 @@ static void vb2_dc_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dc_buf *buf;
struct frame_vector *vec;
@@ -490,7 +599,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
offset = lower_32_bits(offset_in_page(vaddr));
vec = vb2_create_framevec(vaddr, size);
@@ -555,6 +665,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_addr = sg_dma_address(sgt->sgl);
buf->dma_sgt = sgt;
+ buf->non_coherent_mem = 1;
+
out:
buf->size = size;
@@ -660,8 +772,8 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dc_buf *buf;
struct dma_buf_attachment *dba;
@@ -677,6 +789,8 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
+ buf->vb = vb;
+
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
if (IS_ERR(dba)) {
@@ -685,7 +799,7 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
@@ -755,3 +869,4 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index c5b06a509566..1094575abf95 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -51,6 +51,8 @@ struct vb2_dma_sg_buf {
struct vb2_vmarea_handler handler;
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
};
static void vb2_dma_sg_put(void *buf_priv);
@@ -96,9 +98,8 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}
-static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
@@ -113,7 +114,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
return ERR_PTR(-ENOMEM);
buf->vaddr = NULL;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->offset = 0;
buf->size = size;
/* size is already page aligned */
@@ -130,7 +131,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf->pages)
goto fail_pages_array_alloc;
- ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
if (ret)
goto fail_pages_alloc;
@@ -154,6 +155,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
+ buf->vb = vb;
refcount_set(&buf->refcount, 1);
@@ -202,6 +204,9 @@ static void vb2_dma_sg_prepare(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ if (buf->vb->skip_cache_sync_on_prepare)
+ return;
+
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
@@ -210,12 +215,14 @@ static void vb2_dma_sg_finish(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
-static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir)
+static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
@@ -230,7 +237,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
buf->vaddr = NULL;
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
@@ -292,7 +299,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_dma_sg_vaddr(void *buf_priv)
+static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf_map map;
@@ -511,7 +518,9 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.release = vb2_dma_sg_dmabuf_ops_release,
};
-static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -605,8 +614,8 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct dma_buf_attachment *dba;
@@ -630,14 +639,14 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
return buf;
}
-static void *vb2_dma_sg_cookie(void *buf_priv)
+static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
@@ -666,3 +675,4 @@ EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 2988bb38ceb1..6edf4508c636 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -345,24 +345,6 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
struct vb2_buffer *vb,
struct v4l2_buffer *b)
{
- /*
- * DMA exporter should take care of cache syncs, so we can avoid
- * explicit ->prepare()/->finish() syncs. For other ->memory types
- * we always need ->prepare() or/and ->finish() cache sync.
- */
- if (q->memory == VB2_MEMORY_DMABUF) {
- vb->need_cache_sync_on_finish = 0;
- vb->need_cache_sync_on_prepare = 0;
- return;
- }
-
- /*
- * Cache sync/invalidation flags are set by default in order to
- * preserve existing behaviour for old apps/drivers.
- */
- vb->need_cache_sync_on_prepare = 1;
- vb->need_cache_sync_on_finish = 1;
-
if (!vb2_queue_allows_cache_hints(q)) {
/*
* Clear buffer cache flags if queue does not support user
@@ -374,18 +356,11 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
return;
}
- /*
- * ->finish() cache sync can be avoided when queue direction is
- * TO_DEVICE.
- */
- if (q->dma_dir == DMA_TO_DEVICE)
- vb->need_cache_sync_on_finish = 0;
-
if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
- vb->need_cache_sync_on_finish = 0;
+ vb->skip_cache_sync_on_finish = 1;
if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
- vb->need_cache_sync_on_prepare = 0;
+ vb->skip_cache_sync_on_prepare = 1;
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
@@ -717,12 +692,32 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
#endif
}
+static void validate_memory_flags(struct vb2_queue *q,
+ int memory,
+ u32 *flags)
+{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+}
+
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
int ret = vb2_verify_memory_type(q, req->memory, req->type);
+ u32 flags = req->flags;
fill_buf_caps(q, &req->capabilities);
- return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
+ validate_memory_flags(q, req->memory, &flags);
+ req->flags = flags;
+ return ret ? ret : vb2_core_reqbufs(q, req->memory,
+ req->flags, &req->count);
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
@@ -754,6 +749,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
unsigned i;
fill_buf_caps(q, &create->capabilities);
+ validate_memory_flags(q, create->memory, &create->flags);
create->index = q->num_buffers;
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -797,6 +793,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
if (requested_sizes[i] == 0)
return -EINVAL;
return ret ? ret : vb2_core_create_bufs(q, create->memory,
+ create->flags,
&create->count,
requested_planes,
requested_sizes);
@@ -993,13 +990,16 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
{
struct video_device *vdev = video_devdata(file);
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+ u32 flags = p->flags;
fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &flags);
+ p->flags = flags;
if (res)
return res;
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
+ res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
/* If count == 0, then the owner has released all buffers and he
is no longer owner of the queue. Otherwise we have a new owner. */
if (res == 0)
@@ -1017,6 +1017,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
p->index = vdev->queue->num_buffers;
fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &p->flags);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 83f95258ec8c..0bbfea66554f 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -34,13 +34,12 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
{
struct vb2_vmalloc_buf *buf;
- buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -52,7 +51,7 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
return ERR_PTR(-ENOMEM);
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
@@ -71,9 +70,8 @@ static void vb2_vmalloc_put(void *buf_priv)
}
}
-static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir)
+static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_vmalloc_buf *buf;
struct frame_vector *vec;
@@ -84,7 +82,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size);
@@ -147,7 +145,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_vmalloc_vaddr(void *buf_priv)
+static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
@@ -339,7 +337,9 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.release = vb2_vmalloc_dmabuf_ops_release,
};
-static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -403,8 +403,10 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
+ struct device *dev,
+ struct dma_buf *dbuf,
+ unsigned long size)
{
struct vb2_vmalloc_buf *buf;
@@ -416,7 +418,7 @@ static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
return buf;
@@ -444,3 +446,4 @@ EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index dddebea644bb..8a2febf33ce2 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1008,7 +1008,7 @@ static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
static int dvb_net_filter_sec_set(struct net_device *dev,
struct dmx_section_filter **secfilter,
- u8 *mac, u8 *mac_mask)
+ const u8 *mac, u8 *mac_mask)
{
struct dvb_net_priv *priv = netdev_priv(dev);
int ret;
@@ -1052,7 +1052,7 @@ static int dvb_net_feed_start(struct net_device *dev)
int ret = 0, i;
struct dvb_net_priv *priv = netdev_priv(dev);
struct dmx_demux *demux = priv->demux;
- unsigned char *mac = (unsigned char *) dev->dev_addr;
+ const unsigned char *mac = (const unsigned char *) dev->dev_addr;
netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
mutex_lock(&priv->mutex);
@@ -1272,7 +1272,7 @@ static int dvb_net_set_mac (struct net_device *dev, void *p)
struct dvb_net_priv *priv = netdev_priv(dev);
struct sockaddr *addr=p;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
schedule_work(&priv->restart_net_feed_wq);
@@ -1367,7 +1367,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
dvbnet->dvbdev->adapter->num, if_num);
net->addr_len = 6;
- memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6);
+ eth_hw_addr_set(net, dvbnet->dvbdev->adapter->proposed_mac);
dvbnet->device[if_num] = net;
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 6974f1731529..959d110407a4 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -342,7 +342,7 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
ctx->buf_siz = req->size;
ctx->buf_cnt = req->count;
- ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count);
+ ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count);
if (ret) {
ctx->state = DVB_VB2_STATE_NONE;
dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index f88b5355493e..1c8207ab8988 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -3,15 +3,6 @@
* cxd2099.c: Driver for the Sony CXD2099AR Common Interface Controller
*
* Copyright (C) 2010-2013 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/slab.h>
diff --git a/drivers/media/dvb-frontends/cxd2099.h b/drivers/media/dvb-frontends/cxd2099.h
index 0c101bdef01d..5d4060007c46 100644
--- a/drivers/media/dvb-frontends/cxd2099.h
+++ b/drivers/media/dvb-frontends/cxd2099.h
@@ -3,15 +3,6 @@
* cxd2099.h: Driver for the Sony CXD2099AR Common Interface Controller
*
* Copyright (C) 2010-2011 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CXD2099_H_
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index 7baf0162424f..09c42bcef971 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -13,7 +13,7 @@
#include <media/dvb_frontend.h>
#include <media/dvb_math.h>
#include "cxd2820r.h"
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h> /* For gpio_chip */
#include <linux/math64.h>
#include <linux/regmap.h>
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index a7faf0cf8788..b74b9afed9a2 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -444,11 +444,11 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
unsigned layer)
{
int rc;
- int interleaving[] = {
+ static const int interleaving[] = {
0, 1, 2, 4, 8
};
- static unsigned char reg[] = {
+ static const unsigned char reg[] = {
[0] = 0x88, /* Layer A */
[1] = 0x8c, /* Layer B */
[2] = 0x90, /* Layer C */
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index e4528784f847..fff212c0bf3b 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -204,11 +204,18 @@ struct mn88443x_priv {
struct regmap *regmap_t;
};
-static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
+static int mn88443x_cmn_power_on(struct mn88443x_priv *chip)
{
+ struct device *dev = &chip->client_s->dev;
struct regmap *r_t = chip->regmap_t;
+ int ret;
- clk_prepare_enable(chip->mclk);
+ ret = clk_prepare_enable(chip->mclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare and enable mclk: %d\n",
+ ret);
+ return ret;
+ }
gpiod_set_value_cansleep(chip->reset_gpio, 1);
usleep_range(100, 1000);
@@ -222,6 +229,8 @@ static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
} else {
regmap_write(r_t, HIZSET3, 0x8f);
}
+
+ return 0;
}
static void mn88443x_cmn_power_off(struct mn88443x_priv *chip)
@@ -738,7 +747,10 @@ static int mn88443x_probe(struct i2c_client *client,
chip->fe.demodulator_priv = chip;
i2c_set_clientdata(client, chip);
- mn88443x_cmn_power_on(chip);
+ ret = mn88443x_cmn_power_on(chip);
+ if (ret)
+ goto err_i2c_t;
+
mn88443x_s_sleep(chip);
mn88443x_t_sleep(chip);
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 0b00a23436ed..934d1c0b214a 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -9,15 +9,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/mxl5xx.h b/drivers/media/dvb-frontends/mxl5xx.h
index 706a2f5d8f97..139e16b2ecfc 100644
--- a/drivers/media/dvb-frontends/mxl5xx.h
+++ b/drivers/media/dvb-frontends/mxl5xx.h
@@ -9,15 +9,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MXL5XX_H_
diff --git a/drivers/media/dvb-frontends/mxl5xx_defs.h b/drivers/media/dvb-frontends/mxl5xx_defs.h
index 1442af8dc176..097271f73740 100644
--- a/drivers/media/dvb-frontends/mxl5xx_defs.h
+++ b/drivers/media/dvb-frontends/mxl5xx_defs.h
@@ -7,10 +7,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
*/
enum MXL_BOOL_E {
diff --git a/drivers/media/dvb-frontends/mxl5xx_regs.h b/drivers/media/dvb-frontends/mxl5xx_regs.h
index 86d5317eba7a..b38a13847033 100644
--- a/drivers/media/dvb-frontends/mxl5xx_regs.h
+++ b/drivers/media/dvb-frontends/mxl5xx_regs.h
@@ -2,16 +2,6 @@
/*
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
*
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
- *
* This program may alternatively be licensed under a proprietary license from
* MaxLinear, Inc.
*
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index a246db683cdf..dd7954e8f553 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/mutex.h>
diff --git a/drivers/media/dvb-frontends/mxl692.h b/drivers/media/dvb-frontends/mxl692.h
index 45bc48f1f12f..77764a047c07 100644
--- a/drivers/media/dvb-frontends/mxl692.h
+++ b/drivers/media/dvb-frontends/mxl692.h
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MXL692_H_
diff --git a/drivers/media/dvb-frontends/mxl692_defs.h b/drivers/media/dvb-frontends/mxl692_defs.h
index 776ac407b4e7..c603f3d6f27f 100644
--- a/drivers/media/dvb-frontends/mxl692_defs.h
+++ b/drivers/media/dvb-frontends/mxl692_defs.h
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*****************************************************************************
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 1a2f0d2adadf..6a4f2997d6f5 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -376,8 +376,11 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
usb_free_urb(dev->urb_list[j]);
+ dev->urb_list[j] = NULL;
+ }
+ dev->urbs_initialized = 0;
return -ENOMEM;
}
usb_fill_bulk_urb(dev->urb_list[i],
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
index 68d7c7b41071..e517ff757744 100644
--- a/drivers/media/dvb-frontends/stv0910.c
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -5,15 +5,6 @@
* Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
* developed for Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv0910.h b/drivers/media/dvb-frontends/stv0910.h
index 24ecc6902235..0b6f02ad7910 100644
--- a/drivers/media/dvb-frontends/stv0910.h
+++ b/drivers/media/dvb-frontends/stv0910.h
@@ -5,15 +5,6 @@
* Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
* developed for Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _STV0910_H_
diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c
index d5035dac4574..2d0adb6fcb08 100644
--- a/drivers/media/dvb-frontends/stv6111.c
+++ b/drivers/media/dvb-frontends/stv6111.c
@@ -3,15 +3,6 @@
* Driver for the ST STV6111 tuner
*
* Copyright (C) 2014 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv6111.h b/drivers/media/dvb-frontends/stv6111.h
index 49e821ac9954..f172c3e3d886 100644
--- a/drivers/media/dvb-frontends/stv6111.h
+++ b/drivers/media/dvb-frontends/stv6111.h
@@ -3,15 +3,6 @@
* Driver for the ST STV6111 tuner
*
* Copyright (C) 2014 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _STV6111_H_
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 2bf9467b917d..71991f8638e6 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1165,7 +1165,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
read_pos += program_info_length;
write_pos += program_info_length;
}
- while (read_pos < length) {
+ while (read_pos + 4 < length) {
+ if (write_pos + 4 >= sizeof(c->operand) - 4) {
+ ret = -EINVAL;
+ goto out;
+ }
c->operand[write_pos++] = msg[read_pos++];
c->operand[write_pos++] = msg[read_pos++];
c->operand[write_pos++] = msg[read_pos++];
@@ -1177,13 +1181,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[write_pos++] = es_info_length >> 8;
c->operand[write_pos++] = es_info_length & 0xff;
if (es_info_length > 0) {
+ if (read_pos >= length) {
+ ret = -EINVAL;
+ goto out;
+ }
pmt_cmd_id = msg[read_pos++];
if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n",
pmt_cmd_id);
- if (es_info_length > sizeof(c->operand) - 4 -
- write_pos) {
+ if (es_info_length > sizeof(c->operand) - 4 - write_pos ||
+ es_info_length > length - read_pos) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index 9363d005e2b6..e0d57e09dab0 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -134,6 +134,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
} else {
data_length = msg->msg[3];
}
+ if (data_length > sizeof(msg->msg) - data_pos)
+ return -EINVAL;
return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 08feb3e8c1bf..d6a5d4ca439a 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -450,6 +450,7 @@ config VIDEO_TW9906
config VIDEO_TW9910
tristate "Techwell TW9910 video decoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for Techwell TW9910 NTSC/PAL/SECAM video decoder.
@@ -597,6 +598,7 @@ config VIDEO_AK881X
config VIDEO_THS8200
tristate "Texas Instruments THS8200 video encoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for the Texas Instruments THS8200 video encoder.
@@ -610,6 +612,7 @@ menu "Video improvement chips"
config VIDEO_UPD64031A
tristate "NEC Electronics uPD64031A Ghost Reduction"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for the NEC Electronics uPD64031A Ghost Reduction
video chip. It is most often found in NTSC TV cards made for
@@ -742,6 +745,19 @@ config VIDEO_HI556
To compile this driver as a module, choose M here: the
module will be called hi556.
+config VIDEO_HI846
+ tristate "Hynix Hi-846 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-846 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi846.
+
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -1186,6 +1202,16 @@ config VIDEO_OV13858
This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
+config VIDEO_OV13B10
+ tristate "OmniVision OV13B10 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV13B10 camera.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -1229,6 +1255,7 @@ config VIDEO_MT9P031
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 83268f20aa3a..4d4fe08d7a6a 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV9734) += ov9734.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
+obj-$(CONFIG_VIDEO_OV13B10) += ov13b10.o
obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
@@ -117,6 +118,7 @@ obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
obj-$(CONFIG_VIDEO_HI556) += hi556.o
+obj-$(CONFIG_VIDEO_HI846) += hi846.o
obj-$(CONFIG_VIDEO_IMX208) += imx208.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX219) += imx219.o
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 122e1fdccd96..44768b59a6ff 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -41,7 +41,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
-MODULE_DESCRIPTION("Analog Devices ADV7604 video decoder driver");
+MODULE_DESCRIPTION("Analog Devices ADV7604/10/11/12 video decoder driver");
MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
@@ -77,7 +77,7 @@ MODULE_LICENSE("GPL");
enum adv76xx_type {
ADV7604,
- ADV7611,
+ ADV7611, // including ADV7610
ADV7612,
};
@@ -3176,6 +3176,7 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
static const struct i2c_device_id adv76xx_i2c_id[] = {
{ "adv7604", (kernel_ulong_t)&adv76xx_chip_info[ADV7604] },
+ { "adv7610", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ "adv7611", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ "adv7612", (kernel_ulong_t)&adv76xx_chip_info[ADV7612] },
{ }
@@ -3183,6 +3184,7 @@ static const struct i2c_device_id adv76xx_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, adv76xx_i2c_id);
static const struct of_device_id adv76xx_of_id[] __maybe_unused = {
+ { .compatible = "adi,adv7610", .data = &adv76xx_chip_info[ADV7611] },
{ .compatible = "adi,adv7611", .data = &adv76xx_chip_info[ADV7611] },
{ .compatible = "adi,adv7612", .data = &adv76xx_chip_info[ADV7612] },
{ }
@@ -3500,8 +3502,8 @@ static int adv76xx_probe(struct i2c_client *client,
return -ENODEV;
}
if (val != 0x68) {
- v4l2_err(sd, "not an adv7604 on address 0x%x\n",
- client->addr << 1);
+ v4l2_err(sd, "not an ADV7604 on address 0x%x\n",
+ client->addr << 1);
return -ENODEV;
}
break;
@@ -3525,8 +3527,9 @@ static int adv76xx_probe(struct i2c_client *client,
val |= val2;
if ((state->info->type == ADV7611 && val != 0x2051) ||
(state->info->type == ADV7612 && val != 0x2041)) {
- v4l2_err(sd, "not an adv761x on address 0x%x\n",
- client->addr << 1);
+ v4l2_err(sd, "not an %s on address 0x%x\n",
+ state->info->type == ADV7611 ? "ADV7610/11" : "ADV7612",
+ client->addr << 1);
return -ENODEV;
}
break;
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index c8b4292512dc..3863dfeb8293 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#define DW9714_NAME "dw9714"
#define DW9714_MAX_FOCUS_POS 1023
@@ -100,7 +101,15 @@ static const struct v4l2_subdev_internal_ops dw9714_int_ops = {
.close = dw9714_close,
};
-static const struct v4l2_subdev_ops dw9714_ops = { };
+static const struct v4l2_subdev_core_ops dw9714_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops dw9714_ops = {
+ .core = &dw9714_core_ops,
+};
static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev)
{
@@ -137,7 +146,8 @@ static int dw9714_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
- dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
dw9714_dev->sd.internal_ops = &dw9714_int_ops;
rval = dw9714_init_controls(dw9714_dev);
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
new file mode 100644
index 000000000000..822ce3021fde
--- /dev/null
+++ b/drivers/media/i2c/hi846.c
@@ -0,0 +1,2190 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 Purism SPC
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI846_MEDIA_BUS_FORMAT MEDIA_BUS_FMT_SGBRG10_1X10
+#define HI846_RGB_DEPTH 10
+
+/* Frame length lines / vertical timings */
+#define HI846_REG_FLL 0x0006
+#define HI846_FLL_MAX 0xffff
+
+/* Horizontal timing */
+#define HI846_REG_LLP 0x0008
+#define HI846_LINE_LENGTH 3800
+
+#define HI846_REG_BINNING_MODE 0x000c
+
+#define HI846_REG_IMAGE_ORIENTATION 0x000e
+
+#define HI846_REG_UNKNOWN_0022 0x0022
+
+#define HI846_REG_Y_ADDR_START_VACT_H 0x0026
+#define HI846_REG_Y_ADDR_START_VACT_L 0x0027
+#define HI846_REG_UNKNOWN_0028 0x0028
+
+#define HI846_REG_Y_ADDR_END_VACT_H 0x002c
+#define HI846_REG_Y_ADDR_END_VACT_L 0x002d
+
+#define HI846_REG_Y_ODD_INC_FOBP 0x002e
+#define HI846_REG_Y_EVEN_INC_FOBP 0x002f
+
+#define HI846_REG_Y_ODD_INC_VACT 0x0032
+#define HI846_REG_Y_EVEN_INC_VACT 0x0033
+
+#define HI846_REG_GROUPED_PARA_HOLD 0x0046
+
+#define HI846_REG_TG_ENABLE 0x004c
+
+#define HI846_REG_UNKNOWN_005C 0x005c
+
+#define HI846_REG_UNKNOWN_006A 0x006a
+
+/*
+ * Long exposure time. Actually, exposure is a 20 bit value that
+ * includes the lower 4 bits of 0x0073 too. Only 16 bits are used
+ * right now
+ */
+#define HI846_REG_EXPOSURE 0x0074
+#define HI846_EXPOSURE_MIN 6
+#define HI846_EXPOSURE_MAX_MARGIN 2
+#define HI846_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI846_REG_ANALOG_GAIN 0x0077
+#define HI846_ANAL_GAIN_MIN 0
+#define HI846_ANAL_GAIN_MAX 240
+#define HI846_ANAL_GAIN_STEP 8
+
+/* Digital gain controls from sensor */
+#define HI846_REG_MWB_GR_GAIN_H 0x0078
+#define HI846_REG_MWB_GR_GAIN_L 0x0079
+#define HI846_REG_MWB_GB_GAIN_H 0x007a
+#define HI846_REG_MWB_GB_GAIN_L 0x007b
+#define HI846_REG_MWB_R_GAIN_H 0x007c
+#define HI846_REG_MWB_R_GAIN_L 0x007d
+#define HI846_REG_MWB_B_GAIN_H 0x007e
+#define HI846_REG_MWB_B_GAIN_L 0x007f
+#define HI846_DGTL_GAIN_MIN 512
+#define HI846_DGTL_GAIN_MAX 8191
+#define HI846_DGTL_GAIN_STEP 1
+#define HI846_DGTL_GAIN_DEFAULT 512
+
+#define HI846_REG_X_ADDR_START_HACT_H 0x0120
+#define HI846_REG_X_ADDR_END_HACT_H 0x0122
+
+#define HI846_REG_UNKNOWN_012A 0x012a
+
+#define HI846_REG_UNKNOWN_0200 0x0200
+
+#define HI846_REG_UNKNOWN_021C 0x021c
+#define HI846_REG_UNKNOWN_021E 0x021e
+
+#define HI846_REG_UNKNOWN_0402 0x0402
+#define HI846_REG_UNKNOWN_0404 0x0404
+#define HI846_REG_UNKNOWN_0408 0x0408
+#define HI846_REG_UNKNOWN_0410 0x0410
+#define HI846_REG_UNKNOWN_0412 0x0412
+#define HI846_REG_UNKNOWN_0414 0x0414
+
+#define HI846_REG_UNKNOWN_0418 0x0418
+
+#define HI846_REG_UNKNOWN_051E 0x051e
+
+/* Formatter */
+#define HI846_REG_X_START_H 0x0804
+#define HI846_REG_X_START_L 0x0805
+
+/* MIPI */
+#define HI846_REG_UNKNOWN_0900 0x0900
+#define HI846_REG_MIPI_TX_OP_EN 0x0901
+#define HI846_REG_MIPI_TX_OP_MODE 0x0902
+#define HI846_RAW8 BIT(5)
+
+#define HI846_REG_UNKNOWN_090C 0x090c
+#define HI846_REG_UNKNOWN_090E 0x090e
+
+#define HI846_REG_UNKNOWN_0914 0x0914
+#define HI846_REG_TLPX 0x0915
+#define HI846_REG_TCLK_PREPARE 0x0916
+#define HI846_REG_TCLK_ZERO 0x0917
+#define HI846_REG_UNKNOWN_0918 0x0918
+#define HI846_REG_THS_PREPARE 0x0919
+#define HI846_REG_THS_ZERO 0x091a
+#define HI846_REG_THS_TRAIL 0x091b
+#define HI846_REG_TCLK_POST 0x091c
+#define HI846_REG_TCLK_TRAIL_MIN 0x091d
+#define HI846_REG_UNKNOWN_091E 0x091e
+
+#define HI846_REG_UNKNOWN_0954 0x0954
+#define HI846_REG_UNKNOWN_0956 0x0956
+#define HI846_REG_UNKNOWN_0958 0x0958
+#define HI846_REG_UNKNOWN_095A 0x095a
+
+/* ISP Common */
+#define HI846_REG_MODE_SELECT 0x0a00
+#define HI846_MODE_STANDBY 0x00
+#define HI846_MODE_STREAMING 0x01
+#define HI846_REG_FAST_STANDBY_MODE 0x0a02
+#define HI846_REG_ISP_EN_H 0x0a04
+
+/* Test Pattern Control */
+#define HI846_REG_ISP 0x0a05
+#define HI846_REG_ISP_TPG_EN 0x01
+#define HI846_REG_TEST_PATTERN 0x020a /* 1-9 */
+
+#define HI846_REG_UNKNOWN_0A0C 0x0a0c
+
+/* Windowing */
+#define HI846_REG_X_OUTPUT_SIZE_H 0x0a12
+#define HI846_REG_X_OUTPUT_SIZE_L 0x0a13
+#define HI846_REG_Y_OUTPUT_SIZE_H 0x0a14
+#define HI846_REG_Y_OUTPUT_SIZE_L 0x0a15
+
+/* ISP Common */
+#define HI846_REG_PEDESTAL_EN 0x0a1a
+
+#define HI846_REG_UNKNOWN_0A1E 0x0a1e
+
+/* Horizontal Binning Mode */
+#define HI846_REG_HBIN_MODE 0x0a22
+
+#define HI846_REG_UNKNOWN_0A24 0x0a24
+#define HI846_REG_UNKNOWN_0B02 0x0b02
+#define HI846_REG_UNKNOWN_0B10 0x0b10
+#define HI846_REG_UNKNOWN_0B12 0x0b12
+#define HI846_REG_UNKNOWN_0B14 0x0b14
+
+/* BLC (Black Level Calibration) */
+#define HI846_REG_BLC_CTL0 0x0c00
+
+#define HI846_REG_UNKNOWN_0C06 0x0c06
+#define HI846_REG_UNKNOWN_0C10 0x0c10
+#define HI846_REG_UNKNOWN_0C12 0x0c12
+#define HI846_REG_UNKNOWN_0C14 0x0c14
+#define HI846_REG_UNKNOWN_0C16 0x0c16
+
+#define HI846_REG_UNKNOWN_0E04 0x0e04
+
+#define HI846_REG_CHIP_ID_L 0x0f16
+#define HI846_REG_CHIP_ID_H 0x0f17
+#define HI846_CHIP_ID_L 0x46
+#define HI846_CHIP_ID_H 0x08
+
+#define HI846_REG_UNKNOWN_0F04 0x0f04
+#define HI846_REG_UNKNOWN_0F08 0x0f08
+
+/* PLL */
+#define HI846_REG_PLL_CFG_MIPI2_H 0x0f2a
+#define HI846_REG_PLL_CFG_MIPI2_L 0x0f2b
+
+#define HI846_REG_UNKNOWN_0F30 0x0f30
+#define HI846_REG_PLL_CFG_RAMP1_H 0x0f32
+#define HI846_REG_UNKNOWN_0F36 0x0f36
+#define HI846_REG_PLL_CFG_MIPI1_H 0x0f38
+
+#define HI846_REG_UNKNOWN_2008 0x2008
+#define HI846_REG_UNKNOWN_326E 0x326e
+
+struct hi846_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi846_reg_list {
+ u32 num_of_regs;
+ const struct hi846_reg *regs;
+};
+
+struct hi846_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timing size */
+ u32 llp;
+
+ /* Link frequency needed for this resolution */
+ u8 link_freq_index;
+
+ u16 fps;
+
+ /* Vertical timining size */
+ u16 frame_len;
+
+ const struct hi846_reg_list reg_list_config;
+ const struct hi846_reg_list reg_list_2lane;
+ const struct hi846_reg_list reg_list_4lane;
+
+ /* Position inside of the 3264x2448 pixel array */
+ struct v4l2_rect crop;
+};
+
+static const struct hi846_reg hi846_init_2lane[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ /* regs below are unknown */
+ {0x2000, 0x100a},
+ {0x2002, 0x00ff},
+ {0x2004, 0x0007},
+ {0x2006, 0x3fff},
+ {0x2008, 0x3fff},
+ {0x200a, 0xc216},
+ {0x200c, 0x1292},
+ {0x200e, 0xc01a},
+ {0x2010, 0x403d},
+ {0x2012, 0x000e},
+ {0x2014, 0x403e},
+ {0x2016, 0x0b80},
+ {0x2018, 0x403f},
+ {0x201a, 0x82ae},
+ {0x201c, 0x1292},
+ {0x201e, 0xc00c},
+ {0x2020, 0x4130},
+ {0x2022, 0x43e2},
+ {0x2024, 0x0180},
+ {0x2026, 0x4130},
+ {0x2028, 0x7400},
+ {0x202a, 0x5000},
+ {0x202c, 0x0253},
+ {0x202e, 0x0ad1},
+ {0x2030, 0x2360},
+ {0x2032, 0x0009},
+ {0x2034, 0x5020},
+ {0x2036, 0x000b},
+ {0x2038, 0x0002},
+ {0x203a, 0x0044},
+ {0x203c, 0x0016},
+ {0x203e, 0x1792},
+ {0x2040, 0x7002},
+ {0x2042, 0x154f},
+ {0x2044, 0x00d5},
+ {0x2046, 0x000b},
+ {0x2048, 0x0019},
+ {0x204a, 0x1698},
+ {0x204c, 0x000e},
+ {0x204e, 0x099a},
+ {0x2050, 0x0058},
+ {0x2052, 0x7000},
+ {0x2054, 0x1799},
+ {0x2056, 0x0310},
+ {0x2058, 0x03c3},
+ {0x205a, 0x004c},
+ {0x205c, 0x064a},
+ {0x205e, 0x0001},
+ {0x2060, 0x0007},
+ {0x2062, 0x0bc7},
+ {0x2064, 0x0055},
+ {0x2066, 0x7000},
+ {0x2068, 0x1550},
+ {0x206a, 0x158a},
+ {0x206c, 0x0004},
+ {0x206e, 0x1488},
+ {0x2070, 0x7010},
+ {0x2072, 0x1508},
+ {0x2074, 0x0004},
+ {0x2076, 0x0016},
+ {0x2078, 0x03d5},
+ {0x207a, 0x0055},
+ {0x207c, 0x08ca},
+ {0x207e, 0x2019},
+ {0x2080, 0x0007},
+ {0x2082, 0x7057},
+ {0x2084, 0x0fc7},
+ {0x2086, 0x5041},
+ {0x2088, 0x12c8},
+ {0x208a, 0x5060},
+ {0x208c, 0x5080},
+ {0x208e, 0x2084},
+ {0x2090, 0x12c8},
+ {0x2092, 0x7800},
+ {0x2094, 0x0802},
+ {0x2096, 0x040f},
+ {0x2098, 0x1007},
+ {0x209a, 0x0803},
+ {0x209c, 0x080b},
+ {0x209e, 0x3803},
+ {0x20a0, 0x0807},
+ {0x20a2, 0x0404},
+ {0x20a4, 0x0400},
+ {0x20a6, 0xffff},
+ {0x20a8, 0xf0b2},
+ {0x20aa, 0xffef},
+ {0x20ac, 0x0a84},
+ {0x20ae, 0x1292},
+ {0x20b0, 0xc02e},
+ {0x20b2, 0x4130},
+ {0x23fe, 0xc056},
+ {0x3232, 0xfc0c},
+ {0x3236, 0xfc22},
+ {0x3248, 0xfca8},
+ {0x326a, 0x8302},
+ {0x326c, 0x830a},
+ {0x326e, 0x0000},
+ {0x32ca, 0xfc28},
+ {0x32cc, 0xc3bc},
+ {0x32ce, 0xc34c},
+ {0x32d0, 0xc35a},
+ {0x32d2, 0xc368},
+ {0x32d4, 0xc376},
+ {0x32d6, 0xc3c2},
+ {0x32d8, 0xc3e6},
+ {0x32da, 0x0003},
+ {0x32dc, 0x0003},
+ {0x32de, 0x00c7},
+ {0x32e0, 0x0031},
+ {0x32e2, 0x0031},
+ {0x32e4, 0x0031},
+ {0x32e6, 0xfc28},
+ {0x32e8, 0xc3bc},
+ {0x32ea, 0xc384},
+ {0x32ec, 0xc392},
+ {0x32ee, 0xc3a0},
+ {0x32f0, 0xc3ae},
+ {0x32f2, 0xc3c4},
+ {0x32f4, 0xc3e6},
+ {0x32f6, 0x0003},
+ {0x32f8, 0x0003},
+ {0x32fa, 0x00c7},
+ {0x32fc, 0x0031},
+ {0x32fe, 0x0031},
+ {0x3300, 0x0031},
+ {0x3302, 0x82ca},
+ {0x3304, 0xc164},
+ {0x3306, 0x82e6},
+ {0x3308, 0xc19c},
+ {0x330a, 0x001f},
+ {0x330c, 0x001a},
+ {0x330e, 0x0034},
+ {0x3310, 0x0000},
+ {0x3312, 0x0000},
+ {0x3314, 0xfc94},
+ {0x3316, 0xc3d8},
+ /* regs above are unknown */
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_UNKNOWN_0E04, 0x0012},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x1111},
+ {HI846_REG_Y_ODD_INC_VACT, 0x1111},
+ {HI846_REG_UNKNOWN_0022, 0x0008},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0040},
+ {HI846_REG_UNKNOWN_0028, 0x0017},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x09cf},
+ {HI846_REG_UNKNOWN_005C, 0x2101},
+ {HI846_REG_FLL, 0x09de},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_IMAGE_ORIENTATION, 0x0100},
+ {HI846_REG_BINNING_MODE, 0x0022},
+ {HI846_REG_HBIN_MODE, 0x0000},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0000},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0cc0},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x0990},
+ {HI846_REG_EXPOSURE, 0x09d8},
+ {HI846_REG_ANALOG_GAIN, 0x0000},
+ {HI846_REG_GROUPED_PARA_HOLD, 0x0000},
+ {HI846_REG_UNKNOWN_051E, 0x0000},
+ {HI846_REG_UNKNOWN_0200, 0x0400},
+ {HI846_REG_PEDESTAL_EN, 0x0c00},
+ {HI846_REG_UNKNOWN_0A0C, 0x0010},
+ {HI846_REG_UNKNOWN_0A1E, 0x0ccf},
+ {HI846_REG_UNKNOWN_0402, 0x0110},
+ {HI846_REG_UNKNOWN_0404, 0x00f4},
+ {HI846_REG_UNKNOWN_0408, 0x0000},
+ {HI846_REG_UNKNOWN_0410, 0x008d},
+ {HI846_REG_UNKNOWN_0412, 0x011a},
+ {HI846_REG_UNKNOWN_0414, 0x864c},
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+ {HI846_REG_BLC_CTL0, 0x9150},
+ {HI846_REG_UNKNOWN_0C06, 0x0021},
+ {HI846_REG_UNKNOWN_0C10, 0x0040},
+ {HI846_REG_UNKNOWN_0C12, 0x0040},
+ {HI846_REG_UNKNOWN_0C14, 0x0040},
+ {HI846_REG_UNKNOWN_0C16, 0x0040},
+ {HI846_REG_FAST_STANDBY_MODE, 0x0100},
+ {HI846_REG_ISP_EN_H, 0x014a},
+ {HI846_REG_UNKNOWN_0418, 0x0000},
+ {HI846_REG_UNKNOWN_012A, 0x03b4},
+ {HI846_REG_X_ADDR_START_HACT_H, 0x0046},
+ {HI846_REG_X_ADDR_END_HACT_H, 0x0376},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6821},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0001},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+ {HI846_REG_UNKNOWN_0900, 0x0320},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc31a},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0306},
+ {HI846_REG_THS_ZERO, 0x0b09},
+ {HI846_REG_TCLK_POST, 0x0c07},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x042a},
+ {HI846_REG_UNKNOWN_090E, 0x006b},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca00},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_UNKNOWN_0F08, 0x2f04},
+ {HI846_REG_UNKNOWN_0F30, 0x001f},
+ {HI846_REG_UNKNOWN_0F36, 0x001f},
+ {HI846_REG_UNKNOWN_0F04, 0x3a00},
+ {HI846_REG_PLL_CFG_RAMP1_H, 0x025a},
+ {HI846_REG_PLL_CFG_MIPI1_H, 0x025a},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x0024},
+ {HI846_REG_UNKNOWN_006A, 0x0100},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg hi846_init_4lane[] = {
+ {0x2000, 0x987a},
+ {0x2002, 0x00ff},
+ {0x2004, 0x0047},
+ {0x2006, 0x3fff},
+ {0x2008, 0x3fff},
+ {0x200a, 0xc216},
+ {0x200c, 0x1292},
+ {0x200e, 0xc01a},
+ {0x2010, 0x403d},
+ {0x2012, 0x000e},
+ {0x2014, 0x403e},
+ {0x2016, 0x0b80},
+ {0x2018, 0x403f},
+ {0x201a, 0x82ae},
+ {0x201c, 0x1292},
+ {0x201e, 0xc00c},
+ {0x2020, 0x4130},
+ {0x2022, 0x43e2},
+ {0x2024, 0x0180},
+ {0x2026, 0x4130},
+ {0x2028, 0x7400},
+ {0x202a, 0x5000},
+ {0x202c, 0x0253},
+ {0x202e, 0x0ad1},
+ {0x2030, 0x2360},
+ {0x2032, 0x0009},
+ {0x2034, 0x5020},
+ {0x2036, 0x000b},
+ {0x2038, 0x0002},
+ {0x203a, 0x0044},
+ {0x203c, 0x0016},
+ {0x203e, 0x1792},
+ {0x2040, 0x7002},
+ {0x2042, 0x154f},
+ {0x2044, 0x00d5},
+ {0x2046, 0x000b},
+ {0x2048, 0x0019},
+ {0x204a, 0x1698},
+ {0x204c, 0x000e},
+ {0x204e, 0x099a},
+ {0x2050, 0x0058},
+ {0x2052, 0x7000},
+ {0x2054, 0x1799},
+ {0x2056, 0x0310},
+ {0x2058, 0x03c3},
+ {0x205a, 0x004c},
+ {0x205c, 0x064a},
+ {0x205e, 0x0001},
+ {0x2060, 0x0007},
+ {0x2062, 0x0bc7},
+ {0x2064, 0x0055},
+ {0x2066, 0x7000},
+ {0x2068, 0x1550},
+ {0x206a, 0x158a},
+ {0x206c, 0x0004},
+ {0x206e, 0x1488},
+ {0x2070, 0x7010},
+ {0x2072, 0x1508},
+ {0x2074, 0x0004},
+ {0x2076, 0x0016},
+ {0x2078, 0x03d5},
+ {0x207a, 0x0055},
+ {0x207c, 0x08ca},
+ {0x207e, 0x2019},
+ {0x2080, 0x0007},
+ {0x2082, 0x7057},
+ {0x2084, 0x0fc7},
+ {0x2086, 0x5041},
+ {0x2088, 0x12c8},
+ {0x208a, 0x5060},
+ {0x208c, 0x5080},
+ {0x208e, 0x2084},
+ {0x2090, 0x12c8},
+ {0x2092, 0x7800},
+ {0x2094, 0x0802},
+ {0x2096, 0x040f},
+ {0x2098, 0x1007},
+ {0x209a, 0x0803},
+ {0x209c, 0x080b},
+ {0x209e, 0x3803},
+ {0x20a0, 0x0807},
+ {0x20a2, 0x0404},
+ {0x20a4, 0x0400},
+ {0x20a6, 0xffff},
+ {0x20a8, 0xf0b2},
+ {0x20aa, 0xffef},
+ {0x20ac, 0x0a84},
+ {0x20ae, 0x1292},
+ {0x20b0, 0xc02e},
+ {0x20b2, 0x4130},
+ {0x20b4, 0xf0b2},
+ {0x20b6, 0xffbf},
+ {0x20b8, 0x2004},
+ {0x20ba, 0x403f},
+ {0x20bc, 0x00c3},
+ {0x20be, 0x4fe2},
+ {0x20c0, 0x8318},
+ {0x20c2, 0x43cf},
+ {0x20c4, 0x0000},
+ {0x20c6, 0x9382},
+ {0x20c8, 0xc314},
+ {0x20ca, 0x2003},
+ {0x20cc, 0x12b0},
+ {0x20ce, 0xcab0},
+ {0x20d0, 0x4130},
+ {0x20d2, 0x12b0},
+ {0x20d4, 0xc90a},
+ {0x20d6, 0x4130},
+ {0x20d8, 0x42d2},
+ {0x20da, 0x8318},
+ {0x20dc, 0x00c3},
+ {0x20de, 0x9382},
+ {0x20e0, 0xc314},
+ {0x20e2, 0x2009},
+ {0x20e4, 0x120b},
+ {0x20e6, 0x120a},
+ {0x20e8, 0x1209},
+ {0x20ea, 0x1208},
+ {0x20ec, 0x1207},
+ {0x20ee, 0x1206},
+ {0x20f0, 0x4030},
+ {0x20f2, 0xc15e},
+ {0x20f4, 0x4130},
+ {0x20f6, 0x1292},
+ {0x20f8, 0xc008},
+ {0x20fa, 0x4130},
+ {0x20fc, 0x42d2},
+ {0x20fe, 0x82a1},
+ {0x2100, 0x00c2},
+ {0x2102, 0x1292},
+ {0x2104, 0xc040},
+ {0x2106, 0x4130},
+ {0x2108, 0x1292},
+ {0x210a, 0xc006},
+ {0x210c, 0x42a2},
+ {0x210e, 0x7324},
+ {0x2110, 0x9382},
+ {0x2112, 0xc314},
+ {0x2114, 0x2011},
+ {0x2116, 0x425f},
+ {0x2118, 0x82a1},
+ {0x211a, 0xf25f},
+ {0x211c, 0x00c1},
+ {0x211e, 0xf35f},
+ {0x2120, 0x2406},
+ {0x2122, 0x425f},
+ {0x2124, 0x00c0},
+ {0x2126, 0xf37f},
+ {0x2128, 0x522f},
+ {0x212a, 0x4f82},
+ {0x212c, 0x7324},
+ {0x212e, 0x425f},
+ {0x2130, 0x82d4},
+ {0x2132, 0xf35f},
+ {0x2134, 0x4fc2},
+ {0x2136, 0x01b3},
+ {0x2138, 0x93c2},
+ {0x213a, 0x829f},
+ {0x213c, 0x2421},
+ {0x213e, 0x403e},
+ {0x2140, 0xfffe},
+ {0x2142, 0x40b2},
+ {0x2144, 0xec78},
+ {0x2146, 0x831c},
+ {0x2148, 0x40b2},
+ {0x214a, 0xec78},
+ {0x214c, 0x831e},
+ {0x214e, 0x40b2},
+ {0x2150, 0xec78},
+ {0x2152, 0x8320},
+ {0x2154, 0xb3d2},
+ {0x2156, 0x008c},
+ {0x2158, 0x2405},
+ {0x215a, 0x4e0f},
+ {0x215c, 0x503f},
+ {0x215e, 0xffd8},
+ {0x2160, 0x4f82},
+ {0x2162, 0x831c},
+ {0x2164, 0x90f2},
+ {0x2166, 0x0003},
+ {0x2168, 0x008c},
+ {0x216a, 0x2401},
+ {0x216c, 0x4130},
+ {0x216e, 0x421f},
+ {0x2170, 0x831c},
+ {0x2172, 0x5e0f},
+ {0x2174, 0x4f82},
+ {0x2176, 0x831e},
+ {0x2178, 0x5e0f},
+ {0x217a, 0x4f82},
+ {0x217c, 0x8320},
+ {0x217e, 0x3ff6},
+ {0x2180, 0x432e},
+ {0x2182, 0x3fdf},
+ {0x2184, 0x421f},
+ {0x2186, 0x7100},
+ {0x2188, 0x4f0e},
+ {0x218a, 0x503e},
+ {0x218c, 0xffd8},
+ {0x218e, 0x4e82},
+ {0x2190, 0x7a04},
+ {0x2192, 0x421e},
+ {0x2194, 0x831c},
+ {0x2196, 0x5f0e},
+ {0x2198, 0x4e82},
+ {0x219a, 0x7a06},
+ {0x219c, 0x0b00},
+ {0x219e, 0x7304},
+ {0x21a0, 0x0050},
+ {0x21a2, 0x40b2},
+ {0x21a4, 0xd081},
+ {0x21a6, 0x0b88},
+ {0x21a8, 0x421e},
+ {0x21aa, 0x831e},
+ {0x21ac, 0x5f0e},
+ {0x21ae, 0x4e82},
+ {0x21b0, 0x7a0e},
+ {0x21b2, 0x521f},
+ {0x21b4, 0x8320},
+ {0x21b6, 0x4f82},
+ {0x21b8, 0x7a10},
+ {0x21ba, 0x0b00},
+ {0x21bc, 0x7304},
+ {0x21be, 0x007a},
+ {0x21c0, 0x40b2},
+ {0x21c2, 0x0081},
+ {0x21c4, 0x0b88},
+ {0x21c6, 0x4392},
+ {0x21c8, 0x7a0a},
+ {0x21ca, 0x0800},
+ {0x21cc, 0x7a0c},
+ {0x21ce, 0x0b00},
+ {0x21d0, 0x7304},
+ {0x21d2, 0x022b},
+ {0x21d4, 0x40b2},
+ {0x21d6, 0xd081},
+ {0x21d8, 0x0b88},
+ {0x21da, 0x0b00},
+ {0x21dc, 0x7304},
+ {0x21de, 0x0255},
+ {0x21e0, 0x40b2},
+ {0x21e2, 0x0081},
+ {0x21e4, 0x0b88},
+ {0x21e6, 0x4130},
+ {0x23fe, 0xc056},
+ {0x3232, 0xfc0c},
+ {0x3236, 0xfc22},
+ {0x3238, 0xfcfc},
+ {0x323a, 0xfd84},
+ {0x323c, 0xfd08},
+ {0x3246, 0xfcd8},
+ {0x3248, 0xfca8},
+ {0x324e, 0xfcb4},
+ {0x326a, 0x8302},
+ {0x326c, 0x830a},
+ {0x326e, 0x0000},
+ {0x32ca, 0xfc28},
+ {0x32cc, 0xc3bc},
+ {0x32ce, 0xc34c},
+ {0x32d0, 0xc35a},
+ {0x32d2, 0xc368},
+ {0x32d4, 0xc376},
+ {0x32d6, 0xc3c2},
+ {0x32d8, 0xc3e6},
+ {0x32da, 0x0003},
+ {0x32dc, 0x0003},
+ {0x32de, 0x00c7},
+ {0x32e0, 0x0031},
+ {0x32e2, 0x0031},
+ {0x32e4, 0x0031},
+ {0x32e6, 0xfc28},
+ {0x32e8, 0xc3bc},
+ {0x32ea, 0xc384},
+ {0x32ec, 0xc392},
+ {0x32ee, 0xc3a0},
+ {0x32f0, 0xc3ae},
+ {0x32f2, 0xc3c4},
+ {0x32f4, 0xc3e6},
+ {0x32f6, 0x0003},
+ {0x32f8, 0x0003},
+ {0x32fa, 0x00c7},
+ {0x32fc, 0x0031},
+ {0x32fe, 0x0031},
+ {0x3300, 0x0031},
+ {0x3302, 0x82ca},
+ {0x3304, 0xc164},
+ {0x3306, 0x82e6},
+ {0x3308, 0xc19c},
+ {0x330a, 0x001f},
+ {0x330c, 0x001a},
+ {0x330e, 0x0034},
+ {0x3310, 0x0000},
+ {0x3312, 0x0000},
+ {0x3314, 0xfc94},
+ {0x3316, 0xc3d8},
+
+ {0x0a00, 0x0000},
+ {0x0e04, 0x0012},
+ {0x002e, 0x1111},
+ {0x0032, 0x1111},
+ {0x0022, 0x0008},
+ {0x0026, 0x0040},
+ {0x0028, 0x0017},
+ {0x002c, 0x09cf},
+ {0x005c, 0x2101},
+ {0x0006, 0x09de},
+ {0x0008, 0x0ed8},
+ {0x000e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0a22, 0x0000},
+ {0x0a24, 0x0000},
+ {0x0804, 0x0000},
+ {0x0a12, 0x0cc0},
+ {0x0a14, 0x0990},
+ {0x0074, 0x09d8},
+ {0x0076, 0x0000},
+ {0x051e, 0x0000},
+ {0x0200, 0x0400},
+ {0x0a1a, 0x0c00},
+ {0x0a0c, 0x0010},
+ {0x0a1e, 0x0ccf},
+ {0x0402, 0x0110},
+ {0x0404, 0x00f4},
+ {0x0408, 0x0000},
+ {0x0410, 0x008d},
+ {0x0412, 0x011a},
+ {0x0414, 0x864c},
+ /* for OTP */
+ {0x021c, 0x0003},
+ {0x021e, 0x0235},
+ /* for OTP */
+ {0x0c00, 0x9950},
+ {0x0c06, 0x0021},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a02, 0x0100},
+ {0x0a04, 0x015a},
+ {0x0418, 0x0000},
+ {0x0128, 0x0028},
+ {0x012a, 0xffff},
+ {0x0120, 0x0046},
+ {0x0122, 0x0376},
+ {0x012c, 0x0020},
+ {0x012e, 0xffff},
+ {0x0124, 0x0040},
+ {0x0126, 0x0378},
+ {0x0746, 0x0050},
+ {0x0748, 0x01d5},
+ {0x074a, 0x022b},
+ {0x074c, 0x03b0},
+ {0x0756, 0x043f},
+ {0x0758, 0x3f1d},
+ {0x0b02, 0xe04d},
+ {0x0b10, 0x6821},
+ {0x0b12, 0x0120},
+ {0x0b14, 0x0001},
+ {0x2008, 0x38fd},
+ {0x326e, 0x0000},
+ {0x0900, 0x0300},
+ {0x0902, 0xc319},
+ {0x0914, 0xc109},
+ {0x0916, 0x061a},
+ {0x0918, 0x0407},
+ {0x091a, 0x0a0b},
+ {0x091c, 0x0e08},
+ {0x091e, 0x0a00},
+ {0x090c, 0x0427},
+ {0x090e, 0x0059},
+ {0x0954, 0x0089},
+ {0x0956, 0x0000},
+ {0x0958, 0xca80},
+ {0x095a, 0x9240},
+ {0x0f08, 0x2f04},
+ {0x0f30, 0x001f},
+ {0x0f36, 0x001f},
+ {0x0f04, 0x3a00},
+ {0x0f32, 0x025a},
+ {0x0f38, 0x025a},
+ {0x0f2a, 0x4124},
+ {0x006a, 0x0100},
+ {0x004c, 0x0100},
+ {0x0044, 0x0001},
+};
+
+static const struct hi846_reg mode_640x480_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x7711},
+ {HI846_REG_Y_ODD_INC_VACT, 0x7711},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0148},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x08c7},
+ {HI846_REG_UNKNOWN_005C, 0x4404},
+ {HI846_REG_FLL, 0x0277},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0322},
+ {HI846_REG_HBIN_MODE, 0x0200},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0058},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0280},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x01e0},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0210},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x7021},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0001},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_640x480_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x009a},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1280x720_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x3311},
+ {HI846_REG_Y_ODD_INC_VACT, 0x3311},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0238},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x07d7},
+ {HI846_REG_UNKNOWN_005C, 0x4202},
+ {HI846_REG_FLL, 0x034a},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0122},
+ {HI846_REG_HBIN_MODE, 0x0100},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x00b0},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0500},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x02d0},
+ {HI846_REG_EXPOSURE, 0x0344},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0410},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6c21},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0005},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_1280x720_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0407},
+ {HI846_REG_THS_ZERO, 0x0a0b},
+ {HI846_REG_TCLK_POST, 0x0e08},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x0427},
+ {HI846_REG_UNKNOWN_090E, 0x0145},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4124},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1280x720_mipi_4lane[] = {
+ /* 360Mbps */
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x008a},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1632x1224_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x3311},
+ {HI846_REG_Y_ODD_INC_VACT, 0x3311},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0040},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x09cf},
+ {HI846_REG_UNKNOWN_005C, 0x4202},
+ {HI846_REG_FLL, 0x09de},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0122},
+ {HI846_REG_HBIN_MODE, 0x0100},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0000},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0660},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x04c8},
+ {HI846_REG_EXPOSURE, 0x09d8},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0000},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6c21},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0005},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_1632x1224_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0407},
+ {HI846_REG_THS_ZERO, 0x0a0b},
+ {HI846_REG_TCLK_POST, 0x0e08},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x0427},
+ {HI846_REG_UNKNOWN_090E, 0x0069},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4124},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1632x1224_mipi_4lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x001c},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const char * const hi846_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+ "Resolution Pattern",
+};
+
+#define FREQ_INDEX_640 0
+#define FREQ_INDEX_1280 1
+static const s64 hi846_link_freqs[] = {
+ [FREQ_INDEX_640] = 80000000,
+ [FREQ_INDEX_1280] = 200000000,
+};
+
+static const struct hi846_reg_list hi846_init_regs_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(hi846_init_2lane),
+ .regs = hi846_init_2lane,
+};
+
+static const struct hi846_reg_list hi846_init_regs_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(hi846_init_4lane),
+ .regs = hi846_init_4lane,
+};
+
+static const struct hi846_mode supported_modes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .link_freq_index = FREQ_INDEX_640,
+ .fps = 120,
+ .frame_len = 631,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_config),
+ .regs = mode_640x480_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_mipi_2lane),
+ .regs = mode_640x480_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = 0,
+ },
+ .crop = {
+ .left = 0x58,
+ .top = 0x148,
+ .width = 640 * 4,
+ .height = 480 * 4,
+ },
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .link_freq_index = FREQ_INDEX_1280,
+ .fps = 90,
+ .frame_len = 842,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_config),
+ .regs = mode_1280x720_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_mipi_2lane),
+ .regs = mode_1280x720_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_mipi_4lane),
+ .regs = mode_1280x720_mipi_4lane,
+ },
+ .crop = {
+ .left = 0xb0,
+ .top = 0x238,
+ .width = 1280 * 2,
+ .height = 720 * 2,
+ },
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .link_freq_index = FREQ_INDEX_1280,
+ .fps = 30,
+ .frame_len = 2526,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_config),
+ .regs = mode_1632x1224_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_mipi_2lane),
+ .regs = mode_1632x1224_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_mipi_4lane),
+ .regs = mode_1632x1224_mipi_4lane,
+ },
+ .crop = {
+ .left = 0x0,
+ .top = 0x0,
+ .width = 1632 * 2,
+ .height = 1224 * 2,
+ },
+ }
+};
+
+struct hi846_datafmt {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+};
+
+static const char * const hi846_supply_names[] = {
+ "vddio", /* Digital I/O (1.8V or 2.8V) */
+ "vdda", /* Analog (2.8V) */
+ "vddd", /* Digital Core (1.2V) */
+};
+
+#define HI846_NUM_SUPPLIES ARRAY_SIZE(hi846_supply_names)
+
+struct hi846 {
+ struct gpio_desc *rst_gpio;
+ struct gpio_desc *shutdown_gpio;
+ struct regulator_bulk_data supplies[HI846_NUM_SUPPLIES];
+ struct clk *clock;
+ const struct hi846_datafmt *fmt;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u8 nr_lanes;
+
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ struct mutex mutex; /* protect cur_mode, streaming and chip access */
+ const struct hi846_mode *cur_mode;
+ bool streaming;
+};
+
+static inline struct hi846 *to_hi846(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hi846, sd);
+}
+
+static const struct hi846_datafmt hi846_colour_fmts[] = {
+ { HI846_MEDIA_BUS_FORMAT, V4L2_COLORSPACE_RAW },
+};
+
+static const struct hi846_datafmt *hi846_find_datafmt(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hi846_colour_fmts); i++)
+ if (hi846_colour_fmts[i].code == code)
+ return &hi846_colour_fmts[i];
+
+ return NULL;
+}
+
+static inline u8 hi846_get_link_freq_index(struct hi846 *hi846)
+{
+ return hi846->cur_mode->link_freq_index;
+}
+
+static u64 hi846_get_link_freq(struct hi846 *hi846)
+{
+ u8 index = hi846_get_link_freq_index(hi846);
+
+ return hi846_link_freqs[index];
+}
+
+static u64 hi846_calc_pixel_rate(struct hi846 *hi846)
+{
+ u64 link_freq = hi846_get_link_freq(hi846);
+ u64 pixel_rate = link_freq * 2 * hi846->nr_lanes;
+
+ do_div(pixel_rate, HI846_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi846_read_reg(struct hi846 *hi846, u16 reg, u8 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[1] = {0};
+ int ret;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = data_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "i2c read error: %d\n", ret);
+ return -EIO;
+ }
+
+ *val = data_buf[0];
+
+ return 0;
+}
+
+static int hi846_write_reg(struct hi846 *hi846, u16 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u8 buf[3] = { reg >> 8, reg & 0xff, val };
+ struct i2c_msg msg[] = {
+ { .addr = client->addr, .flags = 0,
+ .len = ARRAY_SIZE(buf), .buf = buf },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(&client->dev, "i2c write error\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hi846_write_reg_16(struct hi846 *hi846, u16 reg, u16 val, int *err)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u8 buf[4];
+ int ret;
+
+ if (*err < 0)
+ return;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ dev_err(&client->dev, "i2c_master_send != %zu: %d\n",
+ sizeof(buf), ret);
+ *err = -EIO;
+ }
+}
+
+static int hi846_write_reg_list(struct hi846 *hi846,
+ const struct hi846_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ hi846_write_reg_16(hi846, r_list->regs[i].address,
+ r_list->regs[i].val, &ret);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x: %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi846_update_digital_gain(struct hi846 *hi846, u16 d_gain)
+{
+ int ret = 0;
+
+ hi846_write_reg_16(hi846, HI846_REG_MWB_GR_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_GB_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_R_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_B_GAIN_H, d_gain, &ret);
+
+ return ret;
+}
+
+static int hi846_test_pattern(struct hi846 *hi846, u32 pattern)
+{
+ int ret;
+ u8 val;
+
+ if (pattern) {
+ ret = hi846_read_reg(hi846, HI846_REG_ISP, &val);
+ if (ret)
+ return ret;
+
+ ret = hi846_write_reg(hi846, HI846_REG_ISP,
+ val | HI846_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi846_write_reg(hi846, HI846_REG_TEST_PATTERN, pattern);
+}
+
+static int hi846_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi846 *hi846 = container_of(ctrl->handler,
+ struct hi846, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ s64 exposure_max;
+ int ret = 0;
+ u32 shutter, frame_len;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi846->cur_mode->height + ctrl->val -
+ HI846_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi846->exposure,
+ hi846->exposure->minimum,
+ exposure_max, hi846->exposure->step,
+ exposure_max);
+ }
+
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi846_write_reg(hi846, HI846_REG_ANALOG_GAIN, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi846_update_digital_gain(hi846, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ shutter = ctrl->val;
+ frame_len = hi846->cur_mode->frame_len;
+
+ if (shutter > frame_len - 6) { /* margin */
+ frame_len = shutter + 6;
+ if (frame_len > 0xffff) { /* max frame len */
+ frame_len = 0xffff;
+ }
+ }
+
+ if (shutter < 6)
+ shutter = 6;
+ if (shutter > (0xffff - 6))
+ shutter = 0xffff - 6;
+
+ hi846_write_reg_16(hi846, HI846_REG_FLL, frame_len, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_EXPOSURE, shutter, &ret);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ hi846_write_reg_16(hi846, HI846_REG_FLL,
+ hi846->cur_mode->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi846_test_pattern(hi846, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi846_ctrl_ops = {
+ .s_ctrl = hi846_set_ctrl,
+};
+
+static int hi846_init_controls(struct hi846 *hi846)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ struct v4l2_fwnode_device_properties props;
+
+ ctrl_hdlr = &hi846->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi846->mutex;
+
+ hi846->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(hi846_link_freqs) - 1,
+ 0, hi846_link_freqs);
+ if (hi846->link_freq)
+ hi846->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi846->pixel_rate =
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ hi846_calc_pixel_rate(hi846), 1,
+ hi846_calc_pixel_rate(hi846));
+ hi846->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height,
+ HI846_FLL_MAX -
+ hi846->cur_mode->height, 1,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height);
+
+ h_blank = hi846->cur_mode->llp - hi846->cur_mode->width;
+
+ hi846->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi846->hblank)
+ hi846->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI846_ANAL_GAIN_MIN, HI846_ANAL_GAIN_MAX,
+ HI846_ANAL_GAIN_STEP, HI846_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI846_DGTL_GAIN_MIN, HI846_DGTL_GAIN_MAX,
+ HI846_DGTL_GAIN_STEP, HI846_DGTL_GAIN_DEFAULT);
+ exposure_max = hi846->cur_mode->frame_len - HI846_EXPOSURE_MAX_MARGIN;
+ hi846->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI846_EXPOSURE_MIN, exposure_max,
+ HI846_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi846_test_pattern_menu) - 1,
+ 0, 0, hi846_test_pattern_menu);
+ if (ctrl_hdlr->error) {
+ dev_err(&client->dev, "v4l ctrl handler error: %d\n",
+ ctrl_hdlr->error);
+ return ctrl_hdlr->error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &hi846_ctrl_ops,
+ &props);
+ if (ret)
+ return ret;
+
+ hi846->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static int hi846_set_video_mode(struct hi846 *hi846, int fps)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u64 frame_length;
+ int ret = 0;
+ int dummy_lines;
+ u64 link_freq = hi846_get_link_freq(hi846);
+
+ dev_dbg(&client->dev, "%s: link freq: %llu\n", __func__,
+ hi846_get_link_freq(hi846));
+
+ do_div(link_freq, fps);
+ frame_length = link_freq;
+ do_div(frame_length, HI846_LINE_LENGTH);
+
+ dummy_lines = (frame_length > hi846->cur_mode->frame_len) ?
+ (frame_length - hi846->cur_mode->frame_len) : 0;
+
+ frame_length = hi846->cur_mode->frame_len + dummy_lines;
+
+ dev_dbg(&client->dev, "%s: frame length calculated: %llu\n", __func__,
+ frame_length);
+
+ hi846_write_reg_16(hi846, HI846_REG_FLL, frame_length & 0xFFFF, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_LLP,
+ HI846_LINE_LENGTH & 0xFFFF, &ret);
+
+ return ret;
+}
+
+static int hi846_start_streaming(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ int ret = 0;
+ u8 val;
+
+ if (hi846->nr_lanes == 2)
+ ret = hi846_write_reg_list(hi846, &hi846_init_regs_list_2lane);
+ else
+ ret = hi846_write_reg_list(hi846, &hi846_init_regs_list_4lane);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls: %d\n", ret);
+ return ret;
+ }
+
+ ret = hi846_write_reg_list(hi846, &hi846->cur_mode->reg_list_config);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode: %d\n", ret);
+ return ret;
+ }
+
+ if (hi846->nr_lanes == 2)
+ ret = hi846_write_reg_list(hi846,
+ &hi846->cur_mode->reg_list_2lane);
+ else
+ ret = hi846_write_reg_list(hi846,
+ &hi846->cur_mode->reg_list_4lane);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mipi mode: %d\n", ret);
+ return ret;
+ }
+
+ hi846_set_video_mode(hi846, hi846->cur_mode->fps);
+
+ ret = __v4l2_ctrl_handler_setup(hi846->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /*
+ * Reading 0x0034 is purely done for debugging reasons: It is not
+ * documented in the DS but only mentioned once:
+ * "If 0x0034[2] bit is disabled , Visible pixel width and height is 0."
+ * So even though that sounds like we won't see anything, we don't
+ * know more about this, so in that case only inform the user but do
+ * nothing more.
+ */
+ ret = hi846_read_reg(hi846, 0x0034, &val);
+ if (ret)
+ return ret;
+ if (!(val & BIT(2)))
+ dev_info(&client->dev, "visible pixel width and height is 0\n");
+
+ ret = hi846_write_reg(hi846, HI846_REG_MODE_SELECT,
+ HI846_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "failed to start stream");
+ return ret;
+ }
+
+ hi846->streaming = 1;
+
+ dev_dbg(&client->dev, "%s: started streaming successfully\n", __func__);
+
+ return ret;
+}
+
+static void hi846_stop_streaming(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+
+ if (hi846_write_reg(hi846, HI846_REG_MODE_SELECT, HI846_MODE_STANDBY))
+ dev_err(&client->dev, "failed to stop stream");
+
+ hi846->streaming = 0;
+}
+
+static int hi846_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi846->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi846->mutex);
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto out;
+ }
+
+ ret = hi846_start_streaming(hi846);
+ }
+
+ if (!enable || ret) {
+ hi846_stop_streaming(hi846);
+ pm_runtime_put(&client->dev);
+ }
+
+out:
+ mutex_unlock(&hi846->mutex);
+
+ return ret;
+}
+
+static int hi846_power_on(struct hi846 *hi846)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(HI846_NUM_SUPPLIES, hi846->supplies);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(hi846->clock);
+ if (ret < 0)
+ goto err_reg;
+
+ if (hi846->shutdown_gpio)
+ gpiod_set_value_cansleep(hi846->shutdown_gpio, 0);
+
+ /* 30us = 2400 cycles at 80Mhz */
+ usleep_range(30, 60);
+ if (hi846->rst_gpio)
+ gpiod_set_value_cansleep(hi846->rst_gpio, 0);
+ usleep_range(30, 60);
+
+ return 0;
+
+err_reg:
+ regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+
+ return ret;
+}
+
+static void hi846_power_off(struct hi846 *hi846)
+{
+ if (hi846->rst_gpio)
+ gpiod_set_value_cansleep(hi846->rst_gpio, 1);
+
+ if (hi846->shutdown_gpio)
+ gpiod_set_value_cansleep(hi846->shutdown_gpio, 1);
+
+ clk_disable_unprepare(hi846->clock);
+ regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+}
+
+static int __maybe_unused hi846_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+
+ if (hi846->streaming)
+ hi846_stop_streaming(hi846);
+
+ hi846_power_off(hi846);
+
+ return 0;
+}
+
+static int __maybe_unused hi846_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+ int ret;
+
+ ret = hi846_power_on(hi846);
+ if (ret)
+ return ret;
+
+ if (hi846->streaming) {
+ ret = hi846_start_streaming(hi846);
+ if (ret) {
+ dev_err(dev, "%s: start streaming failed: %d\n",
+ __func__, ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ hi846_power_off(hi846);
+ return ret;
+}
+
+static int hi846_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct hi846_datafmt *fmt = hi846_find_datafmt(mf->code);
+ u32 tgt_fps;
+ s32 vblank_def, h_blank;
+
+ if (!fmt) {
+ mf->code = hi846_colour_fmts[0].code;
+ mf->colorspace = hi846_colour_fmts[0].colorspace;
+ fmt = &hi846_colour_fmts[0];
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = *mf;
+ return 0;
+ }
+
+ if (hi846->nr_lanes == 2) {
+ if (!hi846->cur_mode->reg_list_2lane.num_of_regs) {
+ dev_err(&client->dev,
+ "this mode is not supported for 2 lanes\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!hi846->cur_mode->reg_list_4lane.num_of_regs) {
+ dev_err(&client->dev,
+ "this mode is not supported for 4 lanes\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&hi846->mutex);
+
+ if (hi846->streaming) {
+ mutex_unlock(&hi846->mutex);
+ return -EBUSY;
+ }
+
+ hi846->fmt = fmt;
+
+ hi846->cur_mode =
+ v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height, mf->width, mf->height);
+ dev_dbg(&client->dev, "%s: found mode: %dx%d\n", __func__,
+ hi846->cur_mode->width, hi846->cur_mode->height);
+
+ tgt_fps = hi846->cur_mode->fps;
+ dev_dbg(&client->dev, "%s: target fps: %d\n", __func__, tgt_fps);
+
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->field = V4L2_FIELD_NONE;
+
+ __v4l2_ctrl_s_ctrl(hi846->link_freq, hi846_get_link_freq_index(hi846));
+ __v4l2_ctrl_s_ctrl_int64(hi846->pixel_rate,
+ hi846_calc_pixel_rate(hi846));
+
+ /* Update limits and set FPS to default */
+ vblank_def = hi846->cur_mode->frame_len - hi846->cur_mode->height;
+ __v4l2_ctrl_modify_range(hi846->vblank,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height,
+ HI846_FLL_MAX - hi846->cur_mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi846->vblank, vblank_def);
+
+ h_blank = hi846->cur_mode->llp - hi846->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi846->hblank, h_blank, h_blank, 1,
+ h_blank);
+
+ dev_dbg(&client->dev, "Set fmt w=%d h=%d code=0x%x colorspace=0x%x\n",
+ mf->width, mf->height,
+ fmt->code, fmt->colorspace);
+
+ mutex_unlock(&hi846->mutex);
+
+ return 0;
+}
+
+static int hi846_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format->format = *v4l2_subdev_get_try_format(&hi846->sd,
+ sd_state,
+ format->pad);
+ return 0;
+ }
+
+ mutex_lock(&hi846->mutex);
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->colorspace = V4L2_COLORSPACE_RAW;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mutex_unlock(&hi846->mutex);
+ dev_dbg(&client->dev,
+ "Get format w=%d h=%d code=0x%x colorspace=0x%x\n",
+ mf->width, mf->height, mf->code, mf->colorspace);
+
+ return 0;
+}
+
+static int hi846_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index > 0)
+ return -EINVAL;
+
+ code->code = HI846_MEDIA_BUS_FORMAT;
+
+ return 0;
+}
+
+static int hi846_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (fse->pad || fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != HI846_MEDIA_BUS_FORMAT) {
+ dev_err(&client->dev, "frame size enum not matching\n");
+ return -EINVAL;
+ }
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = supported_modes[fse->index].width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = supported_modes[fse->index].height;
+
+ dev_dbg(&client->dev, "%s: max width: %d max height: %d\n", __func__,
+ fse->max_width, fse->max_height);
+
+ return 0;
+}
+
+static int hi846_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ mutex_lock(&hi846->mutex);
+ switch (sel->which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ break;
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ sel->r = hi846->cur_mode->crop;
+ break;
+ }
+ mutex_unlock(&hi846->mutex);
+ return 0;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = 3264;
+ sel->r.height = 2448;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hi846_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_lock(&hi846->mutex);
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->colorspace = V4L2_COLORSPACE_RAW;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mutex_unlock(&hi846->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi846_video_ops = {
+ .s_stream = hi846_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi846_pad_ops = {
+ .init_cfg = hi846_init_cfg,
+ .enum_frame_size = hi846_enum_frame_size,
+ .enum_mbus_code = hi846_enum_mbus_code,
+ .set_fmt = hi846_set_format,
+ .get_fmt = hi846_get_format,
+ .get_selection = hi846_get_selection,
+};
+
+static const struct v4l2_subdev_ops hi846_subdev_ops = {
+ .video = &hi846_video_ops,
+ .pad = &hi846_pad_ops,
+};
+
+static const struct media_entity_operations hi846_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int hi846_identify_module(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ int ret;
+ u8 hi, lo;
+
+ ret = hi846_read_reg(hi846, HI846_REG_CHIP_ID_L, &lo);
+ if (ret)
+ return ret;
+
+ if (lo != HI846_CHIP_ID_L) {
+ dev_err(&client->dev, "wrong chip id low byte: %x", lo);
+ return -ENXIO;
+ }
+
+ ret = hi846_read_reg(hi846, HI846_REG_CHIP_ID_H, &hi);
+ if (ret)
+ return ret;
+
+ if (hi != HI846_CHIP_ID_H) {
+ dev_err(&client->dev, "wrong chip id high byte: %x", hi);
+ return -ENXIO;
+ }
+
+ dev_info(&client->dev, "chip id %02X %02X using %d mipi lanes\n",
+ hi, lo, hi846->nr_lanes);
+
+ return 0;
+}
+
+static s64 hi846_check_link_freqs(struct hi846 *hi846,
+ struct v4l2_fwnode_endpoint *ep)
+{
+ const s64 *freqs = hi846_link_freqs;
+ int freqs_count = ARRAY_SIZE(hi846_link_freqs);
+ int i, j;
+
+ for (i = 0; i < freqs_count; i++) {
+ for (j = 0; j < ep->nr_of_link_frequencies; j++)
+ if (freqs[i] == ep->link_frequencies[j])
+ break;
+ if (j == ep->nr_of_link_frequencies)
+ return freqs[i];
+ }
+
+ return 0;
+}
+
+static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+ s64 fq;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep) {
+ dev_err(dev, "unable to find endpoint node\n");
+ return -ENXIO;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node: %d\n", ret);
+ return ret;
+ }
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2 &&
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return -EINVAL;
+ }
+
+ hi846->nr_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ return -EINVAL;
+ }
+
+ /* Check that link frequences for all the modes are in device tree */
+ fq = hi846_check_link_freqs(hi846, &bus_cfg);
+ if (fq) {
+ dev_err(dev, "Link frequency of %lld is not supported\n", fq);
+ return -EINVAL;
+ }
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ hi846->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(hi846->rst_gpio)) {
+ dev_err(dev, "failed to get reset gpio: %pe\n",
+ hi846->rst_gpio);
+ return PTR_ERR(hi846->rst_gpio);
+ }
+
+ hi846->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hi846->shutdown_gpio)) {
+ dev_err(dev, "failed to get shutdown gpio: %pe\n",
+ hi846->shutdown_gpio);
+ return PTR_ERR(hi846->shutdown_gpio);
+ }
+
+ return 0;
+}
+
+static int hi846_probe(struct i2c_client *client)
+{
+ struct hi846 *hi846;
+ int ret;
+ int i;
+ u32 mclk_freq;
+
+ hi846 = devm_kzalloc(&client->dev, sizeof(*hi846), GFP_KERNEL);
+ if (!hi846)
+ return -ENOMEM;
+
+ ret = hi846_parse_dt(hi846, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi846->clock = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(hi846->clock)) {
+ dev_err(&client->dev, "failed to get clock: %pe\n",
+ hi846->clock);
+ return PTR_ERR(hi846->clock);
+ }
+
+ mclk_freq = clk_get_rate(hi846->clock);
+ if (mclk_freq != 25000000)
+ dev_warn(&client->dev,
+ "External clock freq should be 25000000, not %u.\n",
+ mclk_freq);
+
+ for (i = 0; i < HI846_NUM_SUPPLIES; i++)
+ hi846->supplies[i].supply = hi846_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&client->dev, HI846_NUM_SUPPLIES,
+ hi846->supplies);
+ if (ret < 0)
+ return ret;
+
+ v4l2_i2c_subdev_init(&hi846->sd, client, &hi846_subdev_ops);
+
+ mutex_init(&hi846->mutex);
+
+ ret = hi846_power_on(hi846);
+ if (ret)
+ goto err_mutex;
+
+ ret = hi846_identify_module(hi846);
+ if (ret)
+ goto err_power_off;
+
+ hi846->cur_mode = &supported_modes[0];
+
+ ret = hi846_init_controls(hi846);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto err_power_off;
+ }
+
+ hi846->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi846->sd.entity.ops = &hi846_subdev_entity_ops;
+ hi846->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi846->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi846->sd.entity, 1, &hi846->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto err_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&hi846->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto err_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_media_entity_cleanup:
+ media_entity_cleanup(&hi846->sd.entity);
+
+err_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi846->sd.ctrl_handler);
+
+err_power_off:
+ hi846_power_off(hi846);
+
+err_mutex:
+ mutex_destroy(&hi846->mutex);
+
+ return ret;
+}
+
+static int hi846_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ hi846_suspend(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&hi846->mutex);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(hi846_pm_ops, hi846_suspend, hi846_resume, NULL);
+
+static const struct of_device_id hi846_of_match[] = {
+ { .compatible = "hynix,hi846", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi846_of_match);
+
+static struct i2c_driver hi846_i2c_driver = {
+ .driver = {
+ .name = "hi846",
+ .pm = &hi846_pm_ops,
+ .of_match_table = of_match_ptr(hi846_of_match),
+ },
+ .probe_new = hi846_probe,
+ .remove = hi846_remove,
+};
+
+module_i2c_driver(hi846_i2c_driver);
+
+MODULE_AUTHOR("Angus Ainslie <angus@akkea.ca>");
+MODULE_AUTHOR("Martin Kepplinger <martin.kepplinger@puri.sm>");
+MODULE_DESCRIPTION("Hynix HI846 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 81cdf37216ca..c249507aa2db 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1260,18 +1260,18 @@ static int imx258_probe(struct i2c_client *client)
return -ENOMEM;
imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ if (IS_ERR(imx258->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ "error getting clock\n");
if (!imx258->clk) {
dev_dbg(&client->dev,
"no clock provided, using clock-frequency property\n");
device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != IMX258_INPUT_CLOCK_FREQ)
- return -EINVAL;
- } else if (IS_ERR(imx258->clk)) {
- return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
- "error getting clock\n");
+ } else {
+ val = clk_get_rate(imx258->clk);
}
- if (clk_get_rate(imx258->clk) != IMX258_INPUT_CLOCK_FREQ) {
+ if (val != IMX258_INPUT_CLOCK_FREQ) {
dev_err(&client->dev, "input clock frequency not supported\n");
return -EINVAL;
}
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 92376592455e..56674173524f 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -791,6 +791,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
rc_proto = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
RC_PROTO_BIT_RC6_6A_32;
ir_codes = RC_MAP_HAUPPAUGE;
+ ir->polling_interval = 125;
probe_tx = true;
break;
}
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 1aa2c58fd38c..7c663fd587bb 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -606,19 +606,18 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
if (!priv->nsources)
return 0;
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
for_each_source(priv, source) {
unsigned int i = to_index(priv, source);
struct max9286_asd *mas;
- mas = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
- source->fwnode,
- struct max9286_asd);
+ mas = v4l2_async_nf_add_fwnode(&priv->notifier, source->fwnode,
+ struct max9286_asd);
if (IS_ERR(mas)) {
dev_err(dev, "Failed to add subdev for source %u: %ld",
i, PTR_ERR(mas));
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(mas);
}
@@ -627,10 +626,10 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
priv->notifier.ops = &max9286_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&priv->sd, &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
if (ret) {
dev_err(dev, "Failed to register subdev_notifier");
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return ret;
}
@@ -642,8 +641,8 @@ static void max9286_v4l2_notifier_unregister(struct max9286_priv *priv)
if (!priv->nsources)
return;
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
}
static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 6eb88ef99783..cbce8b88dbcf 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -27,6 +27,7 @@
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
@@ -75,38 +76,38 @@
#define MT9P031_PLL_CONFIG_1 0x11
#define MT9P031_PLL_CONFIG_2 0x12
#define MT9P031_PIXEL_CLOCK_CONTROL 0x0a
-#define MT9P031_PIXEL_CLOCK_INVERT (1 << 15)
+#define MT9P031_PIXEL_CLOCK_INVERT BIT(15)
#define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8)
#define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0)
-#define MT9P031_FRAME_RESTART 0x0b
+#define MT9P031_RESTART 0x0b
+#define MT9P031_FRAME_PAUSE_RESTART BIT(1)
+#define MT9P031_FRAME_RESTART BIT(0)
#define MT9P031_SHUTTER_DELAY 0x0c
#define MT9P031_RST 0x0d
-#define MT9P031_RST_ENABLE 1
-#define MT9P031_RST_DISABLE 0
+#define MT9P031_RST_ENABLE BIT(0)
#define MT9P031_READ_MODE_1 0x1e
#define MT9P031_READ_MODE_2 0x20
-#define MT9P031_READ_MODE_2_ROW_MIR (1 << 15)
-#define MT9P031_READ_MODE_2_COL_MIR (1 << 14)
-#define MT9P031_READ_MODE_2_ROW_BLC (1 << 6)
+#define MT9P031_READ_MODE_2_ROW_MIR BIT(15)
+#define MT9P031_READ_MODE_2_COL_MIR BIT(14)
+#define MT9P031_READ_MODE_2_ROW_BLC BIT(6)
#define MT9P031_ROW_ADDRESS_MODE 0x22
#define MT9P031_COLUMN_ADDRESS_MODE 0x23
#define MT9P031_GLOBAL_GAIN 0x35
#define MT9P031_GLOBAL_GAIN_MIN 8
#define MT9P031_GLOBAL_GAIN_MAX 1024
#define MT9P031_GLOBAL_GAIN_DEF 8
-#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
+#define MT9P031_GLOBAL_GAIN_MULT BIT(6)
#define MT9P031_ROW_BLACK_TARGET 0x49
#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
#define MT9P031_GREEN1_OFFSET 0x60
#define MT9P031_GREEN2_OFFSET 0x61
#define MT9P031_BLACK_LEVEL_CALIBRATION 0x62
-#define MT9P031_BLC_MANUAL_BLC (1 << 0)
+#define MT9P031_BLC_MANUAL_BLC BIT(0)
#define MT9P031_RED_OFFSET 0x63
#define MT9P031_BLUE_OFFSET 0x64
#define MT9P031_TEST_PATTERN 0xa0
#define MT9P031_TEST_PATTERN_SHIFT 3
-#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
-#define MT9P031_TEST_PATTERN_DISABLE (0 << 0)
+#define MT9P031_TEST_PATTERN_ENABLE BIT(0)
#define MT9P031_TEST_PATTERN_GREEN 0xa1
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
@@ -196,7 +197,7 @@ static int mt9p031_reset(struct mt9p031 *mt9p031)
ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_ENABLE);
if (ret < 0)
return ret;
- ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_DISABLE);
+ ret = mt9p031_write(client, MT9P031_RST, 0);
if (ret < 0)
return ret;
@@ -229,6 +230,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
struct mt9p031_platform_data *pdata = mt9p031->pdata;
+ unsigned long ext_freq;
int ret;
mt9p031->clk = devm_clk_get(&client->dev, NULL);
@@ -239,13 +241,15 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
if (ret < 0)
return ret;
+ ext_freq = clk_get_rate(mt9p031->clk);
+
/* If the external clock frequency is out of bounds for the PLL use the
* pixel clock divider only and disable the PLL.
*/
- if (pdata->ext_freq > limits.ext_clock_max) {
+ if (ext_freq > limits.ext_clock_max) {
unsigned int div;
- div = DIV_ROUND_UP(pdata->ext_freq, pdata->target_freq);
+ div = DIV_ROUND_UP(ext_freq, pdata->target_freq);
div = roundup_pow_of_two(div) / 2;
mt9p031->clk_div = min_t(unsigned int, div, 64);
@@ -254,7 +258,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
return 0;
}
- mt9p031->pll.ext_clock = pdata->ext_freq;
+ mt9p031->pll.ext_clock = ext_freq;
mt9p031->pll.pix_clock = pdata->target_freq;
mt9p031->use_pll = true;
@@ -369,6 +373,14 @@ static int __mt9p031_set_power(struct mt9p031 *mt9p031, bool on)
return ret;
}
+ /* Configure the pixel clock polarity */
+ if (mt9p031->pdata && mt9p031->pdata->pixclk_pol) {
+ ret = mt9p031_write(client, MT9P031_PIXEL_CLOCK_CONTROL,
+ MT9P031_PIXEL_CLOCK_INVERT);
+ if (ret < 0)
+ return ret;
+ }
+
return v4l2_ctrl_handler_setup(&mt9p031->ctrls);
}
@@ -444,9 +456,23 @@ static int mt9p031_set_params(struct mt9p031 *mt9p031)
static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int val;
int ret;
if (!enable) {
+ /* enable pause restart */
+ val = MT9P031_FRAME_PAUSE_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
+ /* enable restart + keep pause restart set */
+ val |= MT9P031_FRAME_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
/* Stop sensor readout */
ret = mt9p031_set_output_control(mt9p031,
MT9P031_OUTPUT_CONTROL_CEN, 0);
@@ -466,6 +492,16 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
if (ret < 0)
return ret;
+ /*
+ * - clear pause restart
+ * - don't clear restart as clearing restart manually can cause
+ * undefined behavior
+ */
+ val = MT9P031_FRAME_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
return mt9p031_pll_enable(mt9p031);
}
@@ -756,8 +792,7 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret < 0)
return ret;
- return mt9p031_write(client, MT9P031_TEST_PATTERN,
- MT9P031_TEST_PATTERN_DISABLE);
+ return mt9p031_write(client, MT9P031_TEST_PATTERN, 0);
}
ret = mt9p031_write(client, MT9P031_TEST_PATTERN_GREEN, 0x05a0);
@@ -1011,8 +1046,11 @@ static const struct v4l2_subdev_internal_ops mt9p031_subdev_internal_ops = {
static struct mt9p031_platform_data *
mt9p031_get_pdata(struct i2c_client *client)
{
- struct mt9p031_platform_data *pdata;
+ struct mt9p031_platform_data *pdata = NULL;
struct device_node *np;
+ struct v4l2_fwnode_endpoint endpoint = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
@@ -1021,6 +1059,9 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!np)
return NULL;
+ if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
+ goto done;
+
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto done;
@@ -1028,6 +1069,9 @@ mt9p031_get_pdata(struct i2c_client *client)
of_property_read_u32(np, "input-clock-frequency", &pdata->ext_freq);
of_property_read_u32(np, "pixel-clock-frequency", &pdata->target_freq);
+ pdata->pixclk_pol = !!(endpoint.bus.parallel.flags &
+ V4L2_MBUS_PCLK_SAMPLE_RISING);
+
done:
of_node_put(np);
return pdata;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 7fc70af53e45..b4d22f5d9933 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#define OV13858_REG_VALUE_08BIT 1
@@ -1553,6 +1554,12 @@ static int ov13858_identify_module(struct ov13858 *ov13858)
return 0;
}
+static const struct v4l2_subdev_core_ops ov13858_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ov13858_video_ops = {
.s_stream = ov13858_set_stream,
};
@@ -1569,6 +1576,7 @@ static const struct v4l2_subdev_sensor_ops ov13858_sensor_ops = {
};
static const struct v4l2_subdev_ops ov13858_subdev_ops = {
+ .core = &ov13858_core_ops,
.video = &ov13858_video_ops,
.pad = &ov13858_pad_ops,
.sensor = &ov13858_sensor_ops,
@@ -1724,7 +1732,8 @@ static int ov13858_probe(struct i2c_client *client,
/* Initialize subdev */
ov13858->sd.internal_ops = &ov13858_internal_ops;
- ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
ov13858->sd.entity.ops = &ov13858_subdev_entity_ops;
ov13858->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
new file mode 100644
index 000000000000..7caeae641051
--- /dev/null
+++ b/drivers/media/i2c/ov13b10.c
@@ -0,0 +1,1491 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV13B10_REG_VALUE_08BIT 1
+#define OV13B10_REG_VALUE_16BIT 2
+#define OV13B10_REG_VALUE_24BIT 3
+
+#define OV13B10_REG_MODE_SELECT 0x0100
+#define OV13B10_MODE_STANDBY 0x00
+#define OV13B10_MODE_STREAMING 0x01
+
+#define OV13B10_REG_SOFTWARE_RST 0x0103
+#define OV13B10_SOFTWARE_RST 0x01
+
+/* Chip ID */
+#define OV13B10_REG_CHIP_ID 0x300a
+#define OV13B10_CHIP_ID 0x560d42
+
+/* V_TIMING internal */
+#define OV13B10_REG_VTS 0x380e
+#define OV13B10_VTS_30FPS 0x0c7c
+#define OV13B10_VTS_60FPS 0x063e
+#define OV13B10_VTS_MAX 0x7fff
+
+/* HBLANK control - read only */
+#define OV13B10_PPL_560MHZ 4704
+
+/* Exposure control */
+#define OV13B10_REG_EXPOSURE 0x3500
+#define OV13B10_EXPOSURE_MIN 4
+#define OV13B10_EXPOSURE_STEP 1
+#define OV13B10_EXPOSURE_DEFAULT 0x40
+
+/* Analog gain control */
+#define OV13B10_REG_ANALOG_GAIN 0x3508
+#define OV13B10_ANA_GAIN_MIN 0x80
+#define OV13B10_ANA_GAIN_MAX 0x07c0
+#define OV13B10_ANA_GAIN_STEP 1
+#define OV13B10_ANA_GAIN_DEFAULT 0x80
+
+/* Digital gain control */
+#define OV13B10_REG_DGTL_GAIN_H 0x350a
+#define OV13B10_REG_DGTL_GAIN_M 0x350b
+#define OV13B10_REG_DGTL_GAIN_L 0x350c
+
+#define OV13B10_DGTL_GAIN_MIN 1024 /* Min = 1 X */
+#define OV13B10_DGTL_GAIN_MAX (4096 - 1) /* Max = 4 X */
+#define OV13B10_DGTL_GAIN_DEFAULT 2560 /* Default gain = 2.5 X */
+#define OV13B10_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */
+
+#define OV13B10_DGTL_GAIN_L_SHIFT 6
+#define OV13B10_DGTL_GAIN_L_MASK 0x3
+#define OV13B10_DGTL_GAIN_M_SHIFT 2
+#define OV13B10_DGTL_GAIN_M_MASK 0xff
+#define OV13B10_DGTL_GAIN_H_SHIFT 10
+#define OV13B10_DGTL_GAIN_H_MASK 0x3
+
+/* Test Pattern Control */
+#define OV13B10_REG_TEST_PATTERN 0x5080
+#define OV13B10_TEST_PATTERN_ENABLE BIT(7)
+#define OV13B10_TEST_PATTERN_MASK 0xf3
+#define OV13B10_TEST_PATTERN_BAR_SHIFT 2
+
+/* Flip Control */
+#define OV13B10_REG_FORMAT1 0x3820
+#define OV13B10_REG_FORMAT2 0x3821
+
+/* Horizontal Window Offset */
+#define OV13B10_REG_H_WIN_OFFSET 0x3811
+
+/* Vertical Window Offset */
+#define OV13B10_REG_V_WIN_OFFSET 0x3813
+
+struct ov13b10_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov13b10_reg_list {
+ u32 num_of_regs;
+ const struct ov13b10_reg *regs;
+};
+
+/* Link frequency config */
+struct ov13b10_link_freq_config {
+ u32 pixels_per_line;
+
+ /* registers for this link frequency */
+ struct ov13b10_reg_list reg_list;
+};
+
+/* Mode : resolution and related config&values */
+struct ov13b10_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 vts_def;
+ u32 vts_min;
+
+ /* Index of Link frequency config to be used */
+ u32 link_freq_index;
+ /* Default register values */
+ struct ov13b10_reg_list reg_list;
+};
+
+/* 4208x3120 needs 1120Mbps/lane, 4 lanes */
+static const struct ov13b10_reg mipi_data_rate_1120mbps[] = {
+ {0x0103, 0x01},
+ {0x0303, 0x04},
+ {0x0305, 0xaf},
+ {0x0321, 0x00},
+ {0x0323, 0x04},
+ {0x0324, 0x01},
+ {0x0325, 0xa4},
+ {0x0326, 0x81},
+ {0x0327, 0x04},
+ {0x3012, 0x07},
+ {0x3013, 0x32},
+ {0x3107, 0x23},
+ {0x3501, 0x0c},
+ {0x3502, 0x10},
+ {0x3504, 0x08},
+ {0x3508, 0x07},
+ {0x3509, 0xc0},
+ {0x3600, 0x16},
+ {0x3601, 0x54},
+ {0x3612, 0x4e},
+ {0x3620, 0x00},
+ {0x3621, 0x68},
+ {0x3622, 0x66},
+ {0x3623, 0x03},
+ {0x3662, 0x92},
+ {0x3666, 0xbb},
+ {0x3667, 0x44},
+ {0x366e, 0xff},
+ {0x366f, 0xf3},
+ {0x3675, 0x44},
+ {0x3676, 0x00},
+ {0x367f, 0xe9},
+ {0x3681, 0x32},
+ {0x3682, 0x1f},
+ {0x3683, 0x0b},
+ {0x3684, 0x0b},
+ {0x3704, 0x0f},
+ {0x3706, 0x40},
+ {0x3708, 0x3b},
+ {0x3709, 0x72},
+ {0x370b, 0xa2},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3725, 0x42},
+ {0x3739, 0x12},
+ {0x3767, 0x00},
+ {0x377a, 0x0d},
+ {0x3789, 0x18},
+ {0x3790, 0x40},
+ {0x3791, 0xa2},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37d9, 0x0c},
+ {0x37da, 0x02},
+ {0x37dc, 0x02},
+ {0x37e1, 0x04},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x70},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3811, 0x0f},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x381f, 0x08},
+ {0x3820, 0x88},
+ {0x3821, 0x00},
+ {0x3822, 0x14},
+ {0x382e, 0xe6},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3ca0, 0x00},
+ {0x3ca1, 0x00},
+ {0x3ca2, 0x00},
+ {0x3ca3, 0x00},
+ {0x3ca4, 0x50},
+ {0x3ca5, 0x11},
+ {0x3ca6, 0x01},
+ {0x3ca7, 0x00},
+ {0x3ca8, 0x00},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x400a, 0x01},
+ {0x400b, 0x19},
+ {0x4011, 0x21},
+ {0x4017, 0x08},
+ {0x4019, 0x04},
+ {0x401a, 0x58},
+ {0x4032, 0x1e},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x405e, 0x00},
+ {0x4066, 0x02},
+ {0x4501, 0x00},
+ {0x4502, 0x10},
+ {0x4505, 0x00},
+ {0x4800, 0x64},
+ {0x481b, 0x3e},
+ {0x481f, 0x30},
+ {0x4825, 0x34},
+ {0x4837, 0x0e},
+ {0x484b, 0x01},
+ {0x4883, 0x02},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+ {0x5045, 0x20},
+ {0x5046, 0x20},
+ {0x5047, 0xa4},
+ {0x5048, 0x20},
+ {0x5049, 0xa4},
+ {0x0100, 0x01},
+};
+
+static const struct ov13b10_reg mode_4208x3120_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x70},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x0f},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_4160x3120_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x40},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x27},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_4160x2340_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x40},
+ {0x380a, 0x09},
+ {0x380b, 0x24},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x27},
+ {0x3812, 0x01},
+ {0x3813, 0x8f},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_2104x1560_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x38},
+ {0x380a, 0x06},
+ {0x380b, 0x18},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x07},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
+static const struct ov13b10_reg mode_2080x1170_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x20},
+ {0x380a, 0x04},
+ {0x380b, 0x92},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x13},
+ {0x3812, 0x00},
+ {0x3813, 0xc9},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
+static const char * const ov13b10_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bar Type 1",
+ "Vertical Color Bar Type 2",
+ "Vertical Color Bar Type 3",
+ "Vertical Color Bar Type 4"
+};
+
+/* Configurations for supported link frequencies */
+#define OV13B10_LINK_FREQ_560MHZ 560000000ULL
+#define OV13B10_LINK_FREQ_INDEX_0 0
+
+#define OV13B10_EXT_CLK 19200000
+#define OV13B10_DATA_LANES 4
+
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= 2 * OV13B10_DATA_LANES;
+ do_div(f, 10);
+
+ return f;
+}
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[] = {
+ OV13B10_LINK_FREQ_560MHZ
+};
+
+/* Link frequency configs */
+static const struct ov13b10_link_freq_config
+ link_freq_configs[] = {
+ {
+ .pixels_per_line = OV13B10_PPL_560MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1120mbps),
+ .regs = mipi_data_rate_1120mbps,
+ }
+ }
+};
+
+/* Mode configs */
+static const struct ov13b10_mode supported_modes[] = {
+ {
+ .width = 4208,
+ .height = 3120,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4208x3120_regs),
+ .regs = mode_4208x3120_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 4160,
+ .height = 3120,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4160x3120_regs),
+ .regs = mode_4160x3120_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 4160,
+ .height = 2340,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4160x2340_regs),
+ .regs = mode_4160x2340_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2104,
+ .height = 1560,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2104x1560_regs),
+ .regs = mode_2104x1560_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2080,
+ .height = 1170,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2080x1170_regs),
+ .regs = mode_2080x1170_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ }
+};
+
+struct ov13b10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov13b10_mode *cur_mode;
+
+ /* Mutex for serialized access */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+#define to_ov13b10(_sd) container_of(_sd, struct ov13b10, sd)
+
+/* Read registers up to 4 at a time */
+static int ov13b10_read_reg(struct ov13b10 *ov13b,
+ u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ struct i2c_msg msgs[2];
+ u8 *data_be_p;
+ int ret;
+ __be32 data_be = 0;
+ __be16 reg_addr_be = cpu_to_be16(reg);
+
+ if (len > 4)
+ return -EINVAL;
+
+ data_be_p = (u8 *)&data_be;
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *)&reg_addr_be;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_be_p[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int ov13b10_write_reg(struct ov13b10 *ov13b,
+ u16 reg, u32 len, u32 __val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int buf_i, val_i;
+ u8 buf[6], *val_p;
+ __be32 val;
+
+ if (len > 4)
+ return -EINVAL;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ val = cpu_to_be32(__val);
+ val_p = (u8 *)&val;
+ buf_i = 2;
+ val_i = 4 - len;
+
+ while (val_i < 4)
+ buf[buf_i++] = val_p[val_i++];
+
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int ov13b10_write_regs(struct ov13b10 *ov13b,
+ const struct ov13b10_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int ret;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ ret = ov13b10_write_reg(ov13b, regs[i].address, 1,
+ regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov13b10_write_reg_list(struct ov13b10 *ov13b,
+ const struct ov13b10_reg_list *r_list)
+{
+ return ov13b10_write_regs(ov13b, r_list->regs, r_list->num_of_regs);
+}
+
+/* Open sub-device */
+static int ov13b10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ const struct ov13b10_mode *default_mode = &supported_modes[0];
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
+
+ mutex_lock(&ov13b->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = default_mode->width;
+ try_fmt->height = default_mode->height;
+ try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ /* No crop or compose */
+ mutex_unlock(&ov13b->mutex);
+
+ return 0;
+}
+
+static int ov13b10_update_digital_gain(struct ov13b10 *ov13b, u32 d_gain)
+{
+ int ret;
+ u32 val;
+
+ /*
+ * 0x350C[7:6], 0x350B[7:0], 0x350A[1:0]
+ */
+
+ val = (d_gain & OV13B10_DGTL_GAIN_L_MASK) << OV13B10_DGTL_GAIN_L_SHIFT;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_L,
+ OV13B10_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = (d_gain >> OV13B10_DGTL_GAIN_M_SHIFT) & OV13B10_DGTL_GAIN_M_MASK;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_M,
+ OV13B10_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = (d_gain >> OV13B10_DGTL_GAIN_H_SHIFT) & OV13B10_DGTL_GAIN_H_MASK;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_H,
+ OV13B10_REG_VALUE_08BIT, val);
+
+ return ret;
+}
+
+static int ov13b10_enable_test_pattern(struct ov13b10 *ov13b, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_TEST_PATTERN,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ if (pattern) {
+ val &= OV13B10_TEST_PATTERN_MASK;
+ val |= ((pattern - 1) << OV13B10_TEST_PATTERN_BAR_SHIFT) |
+ OV13B10_TEST_PATTERN_ENABLE;
+ } else {
+ val &= ~OV13B10_TEST_PATTERN_ENABLE;
+ }
+
+ return ov13b10_write_reg(ov13b, OV13B10_REG_TEST_PATTERN,
+ OV13B10_REG_VALUE_08BIT, val);
+}
+
+static int ov13b10_set_ctrl_hflip(struct ov13b10 *ov13b, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? val & ~BIT(3) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_H_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Applying cropping offset to reverse the change of Bayer order
+ * after mirroring image
+ */
+ return ov13b10_write_reg(ov13b, OV13B10_REG_H_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? ++val : val);
+}
+
+static int ov13b10_set_ctrl_vflip(struct ov13b10 *ov13b, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? val | BIT(4) | BIT(5) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_V_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Applying cropping offset to reverse the change of Bayer order
+ * after flipping image
+ */
+ return ov13b10_write_reg(ov13b, OV13B10_REG_V_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? --val : val);
+}
+
+static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov13b10 *ov13b = container_of(ctrl->handler,
+ struct ov13b10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ s64 max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = ov13b->cur_mode->height + ctrl->val - 8;
+ __v4l2_ctrl_modify_range(ov13b->exposure,
+ ov13b->exposure->minimum,
+ max, ov13b->exposure->step, max);
+ break;
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ ret = 0;
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_ANALOG_GAIN,
+ OV13B10_REG_VALUE_16BIT,
+ ctrl->val << 1);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov13b10_update_digital_gain(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_EXPOSURE,
+ OV13B10_REG_VALUE_24BIT,
+ ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_VTS,
+ OV13B10_REG_VALUE_16BIT,
+ ov13b->cur_mode->height
+ + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov13b10_enable_test_pattern(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ov13b10_set_ctrl_hflip(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ov13b10_set_ctrl_vflip(ov13b, ctrl->val);
+ break;
+ default:
+ dev_info(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov13b10_ctrl_ops = {
+ .s_ctrl = ov13b10_set_ctrl,
+};
+
+static int ov13b10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only one bayer order(GRBG) is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov13b10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void ov13b10_update_pad_format(const struct ov13b10_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int ov13b10_do_get_pad_format(struct ov13b10 *ov13b,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt;
+ struct v4l2_subdev *sd = &ov13b->sd;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ ov13b10_update_pad_format(ov13b->cur_mode, fmt);
+ }
+
+ return 0;
+}
+
+static int ov13b10_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ mutex_lock(&ov13b->mutex);
+ ret = ov13b10_do_get_pad_format(ov13b, sd_state, fmt);
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+}
+
+static int
+ov13b10_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
+
+ mutex_lock(&ov13b->mutex);
+
+ /* Only one raw bayer(GRBG) order is supported */
+ if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+ ov13b10_update_pad_format(mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ ov13b->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov13b->link_freq, mode->link_freq_index);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(ov13b->pixel_rate, pixel_rate);
+
+ /* Update limits and set FPS to default */
+ vblank_def = ov13b->cur_mode->vts_def -
+ ov13b->cur_mode->height;
+ vblank_min = ov13b->cur_mode->vts_min -
+ ov13b->cur_mode->height;
+ __v4l2_ctrl_modify_range(ov13b->vblank, vblank_min,
+ OV13B10_VTS_MAX
+ - ov13b->cur_mode->height,
+ 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov13b->vblank, vblank_def);
+ h_blank =
+ link_freq_configs[mode->link_freq_index].pixels_per_line
+ - ov13b->cur_mode->width;
+ __v4l2_ctrl_modify_range(ov13b->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&ov13b->mutex);
+
+ return 0;
+}
+
+static int ov13b10_start_streaming(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ const struct ov13b10_reg_list *reg_list;
+ int ret, link_freq_index;
+
+ /* Get out of from software reset */
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
+ OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set powerup registers\n",
+ __func__);
+ return ret;
+ }
+
+ link_freq_index = ov13b->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov13b10_write_reg_list(ov13b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &ov13b->cur_mode->reg_list;
+ ret = ov13b10_write_reg_list(ov13b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov13b->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ return ov13b10_write_reg(ov13b, OV13B10_REG_MODE_SELECT,
+ OV13B10_REG_VALUE_08BIT,
+ OV13B10_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int ov13b10_stop_streaming(struct ov13b10 *ov13b)
+{
+ return ov13b10_write_reg(ov13b, OV13B10_REG_MODE_SELECT,
+ OV13B10_REG_VALUE_08BIT, OV13B10_MODE_STANDBY);
+}
+
+static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&ov13b->mutex);
+ if (ov13b->streaming == enable) {
+ mutex_unlock(&ov13b->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = ov13b10_start_streaming(ov13b);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ ov13b10_stop_streaming(ov13b);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov13b->streaming = enable;
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov13b10_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+
+ if (ov13b->streaming)
+ ov13b10_stop_streaming(ov13b);
+
+ return 0;
+}
+
+static int __maybe_unused ov13b10_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ if (ov13b->streaming) {
+ ret = ov13b10_start_streaming(ov13b);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ ov13b10_stop_streaming(ov13b);
+ ov13b->streaming = false;
+ return ret;
+}
+
+/* Verify chip ID */
+static int ov13b10_identify_module(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
+ OV13B10_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV13B10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV13B10_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
+ .s_stream = ov13b10_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov13b10_pad_ops = {
+ .enum_mbus_code = ov13b10_enum_mbus_code,
+ .get_fmt = ov13b10_get_pad_format,
+ .set_fmt = ov13b10_set_pad_format,
+ .enum_frame_size = ov13b10_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov13b10_subdev_ops = {
+ .video = &ov13b10_video_ops,
+ .pad = &ov13b10_pad_ops,
+};
+
+static const struct media_entity_operations ov13b10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov13b10_internal_ops = {
+ .open = ov13b10_open,
+};
+
+/* Initialize control handlers */
+static int ov13b10_init_controls(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ struct v4l2_fwnode_device_properties props;
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 hblank;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ const struct ov13b10_mode *mode;
+ u32 max;
+ int ret;
+
+ ctrl_hdlr = &ov13b->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ mutex_init(&ov13b->mutex);
+ ctrl_hdlr->lock = &ov13b->mutex;
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ ov13b->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov13b10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ max,
+ 0,
+ link_freq_menu_items);
+ if (ov13b->link_freq)
+ ov13b->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = 0;
+ /* By default, PIXEL_RATE is read only */
+ ov13b->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
+
+ mode = ov13b->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
+ ov13b->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_VBLANK,
+ vblank_min,
+ OV13B10_VTS_MAX - mode->height, 1,
+ vblank_def);
+
+ hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
+ mode->width;
+ ov13b->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_HBLANK,
+ hblank, hblank, 1, hblank);
+ if (ov13b->hblank)
+ ov13b->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = mode->vts_def - 8;
+ ov13b->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV13B10_EXPOSURE_MIN,
+ exposure_max, OV13B10_EXPOSURE_STEP,
+ exposure_max);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV13B10_ANA_GAIN_MIN, OV13B10_ANA_GAIN_MAX,
+ OV13B10_ANA_GAIN_STEP, OV13B10_ANA_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV13B10_DGTL_GAIN_MIN, OV13B10_DGTL_GAIN_MAX,
+ OV13B10_DGTL_GAIN_STEP, OV13B10_DGTL_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov13b10_test_pattern_menu) - 1,
+ 0, 0, ov13b10_test_pattern_menu);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov13b10_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
+ ov13b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&ov13b->mutex);
+
+ return ret;
+}
+
+static void ov13b10_free_controls(struct ov13b10 *ov13b)
+{
+ v4l2_ctrl_handler_free(ov13b->sd.ctrl_handler);
+ mutex_destroy(&ov13b->mutex);
+}
+
+static int ov13b10_check_hwcfg(struct device *dev)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned int i, j;
+ int ret;
+ u32 ext_clk;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (ext_clk != OV13B10_EXT_CLK) {
+ dev_err(dev, "external clock %d is not supported",
+ ext_clk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV13B10_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto out_err;
+ }
+ }
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov13b10_probe(struct i2c_client *client)
+{
+ struct ov13b10 *ov13b;
+ int ret;
+
+ /* Check HW config */
+ ret = ov13b10_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ return ret;
+ }
+
+ ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
+ if (!ov13b)
+ return -ENOMEM;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
+
+ /* Check module identity */
+ ret = ov13b10_identify_module(ov13b);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ return ret;
+ }
+
+ /* Set default mode to max resolution */
+ ov13b->cur_mode = &supported_modes[0];
+
+ ret = ov13b10_init_controls(ov13b);
+ if (ret)
+ return ret;
+
+ /* Initialize subdev */
+ ov13b->sd.internal_ops = &ov13b10_internal_ops;
+ ov13b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13b->sd.entity.ops = &ov13b10_subdev_entity_ops;
+ ov13b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ ov13b->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov13b->sd.entity, 1, &ov13b->pad);
+ if (ret) {
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&ov13b->sd.entity);
+
+error_handler_free:
+ ov13b10_free_controls(ov13b);
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int ov13b10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov13b10_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov13b10_suspend, ov13b10_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov13b10_acpi_ids[] = {
+ {"OVTIDB10"},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov13b10_acpi_ids);
+#endif
+
+static struct i2c_driver ov13b10_i2c_driver = {
+ .driver = {
+ .name = "ov13b10",
+ .pm = &ov13b10_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov13b10_acpi_ids),
+ },
+ .probe_new = ov13b10_probe,
+ .remove = ov13b10_remove,
+};
+
+module_i2c_driver(ov13b10_i2c_driver);
+
+MODULE_AUTHOR("Kao, Arec <arec.kao@intel.com>");
+MODULE_DESCRIPTION("Omnivision ov13b10 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 49189926afd6..251f459ab484 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#define OV5670_REG_CHIP_ID 0x300a
@@ -2420,6 +2421,12 @@ static int ov5670_identify_module(struct ov5670 *ov5670)
return 0;
}
+static const struct v4l2_subdev_core_ops ov5670_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ov5670_video_ops = {
.s_stream = ov5670_set_stream,
};
@@ -2436,6 +2443,7 @@ static const struct v4l2_subdev_sensor_ops ov5670_sensor_ops = {
};
static const struct v4l2_subdev_ops ov5670_subdev_ops = {
+ .core = &ov5670_core_ops,
.video = &ov5670_video_ops,
.pad = &ov5670_pad_ops,
.sensor = &ov5670_sensor_ops,
@@ -2489,7 +2497,8 @@ static int ov5670_probe(struct i2c_client *client)
}
ov5670->sd.internal_ops = &ov5670_internal_ops;
- ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
ov5670->sd.entity.ops = &ov5670_subdev_entity_ops;
ov5670->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index aa74744b91c7..c6c6050cda1a 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -107,6 +107,11 @@ static const char * const ov8856_supply_names[] = {
"dvdd", /* Digital core power */
};
+enum {
+ OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
+ OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
+};
+
struct ov8856_reg {
u16 address;
u8 val;
@@ -145,6 +150,9 @@ struct ov8856_mode {
/* Number of data lanes */
u8 data_lanes;
+
+ /* Default MEDIA_BUS_FMT for this mode */
+ u32 default_mbus_index;
};
struct ov8856_mipi_data_rates {
@@ -1055,7 +1063,7 @@ static const struct ov8856_reg lane_4_mode_3264x2448[] = {
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
- {0x3813, 0x01},
+ {0x3813, 0x02},
{0x3814, 0x01},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -1259,7 +1267,7 @@ static const struct ov8856_reg lane_4_mode_1632x1224[] = {
{0x3810, 0x00},
{0x3811, 0x02},
{0x3812, 0x00},
- {0x3813, 0x01},
+ {0x3813, 0x02},
{0x3814, 0x03},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -1372,6 +1380,19 @@ static const struct ov8856_reg lane_4_mode_1632x1224[] = {
{0x5e10, 0xfc}
};
+static const struct ov8856_reg mipi_data_mbus_sbggr10_1x10[] = {
+ {0x3813, 0x02},
+};
+
+static const struct ov8856_reg mipi_data_mbus_sgrbg10_1x10[] = {
+ {0x3813, 0x01},
+};
+
+static const u32 ov8856_mbus_codes[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10
+};
+
static const char * const ov8856_test_pattern_menu[] = {
"Disabled",
"Standard Color Bar",
@@ -1380,6 +1401,17 @@ static const char * const ov8856_test_pattern_menu[] = {
"Bottom-Top Darker Color Bar"
};
+static const struct ov8856_reg_list bayer_offset_configs[] = {
+ [OV8856_MEDIA_BUS_FMT_SBGGR10_1X10] = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_mbus_sbggr10_1x10),
+ .regs = mipi_data_mbus_sbggr10_1x10,
+ },
+ [OV8856_MEDIA_BUS_FMT_SGRBG10_1X10] = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_mbus_sgrbg10_1x10),
+ .regs = mipi_data_mbus_sgrbg10_1x10,
+ }
+};
+
struct ov8856 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1399,6 +1431,9 @@ struct ov8856 {
/* Current mode */
const struct ov8856_mode *cur_mode;
+ /* Application specified mbus format */
+ u32 cur_mbus_index;
+
/* To serialize asynchronus callbacks */
struct mutex mutex;
@@ -1450,6 +1485,7 @@ static const struct ov8856_lane_cfg lane_cfg_2 = {
},
.link_freq_index = 0,
.data_lanes = 2,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 1640,
@@ -1464,6 +1500,7 @@ static const struct ov8856_lane_cfg lane_cfg_2 = {
},
.link_freq_index = 1,
.data_lanes = 2,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
}}
};
@@ -1499,6 +1536,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 0,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 1640,
@@ -1513,6 +1551,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 1,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 3264,
@@ -1527,6 +1566,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 0,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
},
{
.width = 1632,
@@ -1541,6 +1581,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 1,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
}}
};
@@ -1904,12 +1945,21 @@ static int ov8856_init_controls(struct ov8856 *ov8856)
return 0;
}
-static void ov8856_update_pad_format(const struct ov8856_mode *mode,
+static void ov8856_update_pad_format(struct ov8856 *ov8856,
+ const struct ov8856_mode *mode,
struct v4l2_mbus_framefmt *fmt)
{
+ int index;
+
fmt->width = mode->width;
fmt->height = mode->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ for (index = 0; index < ARRAY_SIZE(ov8856_mbus_codes); ++index)
+ if (ov8856_mbus_codes[index] == fmt->code)
+ break;
+ if (index == ARRAY_SIZE(ov8856_mbus_codes))
+ index = mode->default_mbus_index;
+ fmt->code = ov8856_mbus_codes[index];
+ ov8856->cur_mbus_index = index;
fmt->field = V4L2_FIELD_NONE;
}
@@ -1935,6 +1985,13 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
return ret;
}
+ reg_list = &bayer_offset_configs[ov8856->cur_mbus_index];
+ ret = ov8856_write_reg_list(ov8856, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mbus format");
+ return ret;
+ }
+
ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler);
if (ret)
return ret;
@@ -2096,7 +2153,7 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
fmt->format.height);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(mode, &fmt->format);
+ ov8856_update_pad_format(ov8856, mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
*v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
@@ -2140,7 +2197,7 @@ static int ov8856_get_format(struct v4l2_subdev *sd,
sd_state,
fmt->pad);
else
- ov8856_update_pad_format(ov8856->cur_mode, &fmt->format);
+ ov8856_update_pad_format(ov8856, ov8856->cur_mode, &fmt->format);
mutex_unlock(&ov8856->mutex);
@@ -2151,11 +2208,10 @@ static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- /* Only one bayer order GRBG is supported */
- if (code->index > 0)
+ if (code->index >= ARRAY_SIZE(ov8856_mbus_codes))
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = ov8856_mbus_codes[code->index];
return 0;
}
@@ -2165,11 +2221,15 @@ static int ov8856_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ov8856 *ov8856 = to_ov8856(sd);
+ int index;
if (fse->index >= ov8856->modes_size)
return -EINVAL;
- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ for (index = 0; index < ARRAY_SIZE(ov8856_mbus_codes); ++index)
+ if (fse->code == ov8856_mbus_codes[index])
+ break;
+ if (index == ARRAY_SIZE(ov8856_mbus_codes))
return -EINVAL;
fse->min_width = ov8856->priv_lane->supported_modes[fse->index].width;
@@ -2185,7 +2245,7 @@ static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct ov8856 *ov8856 = to_ov8856(sd);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(&ov8856->priv_lane->supported_modes[0],
+ ov8856_update_pad_format(ov8856, &ov8856->priv_lane->supported_modes[0],
v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov8856->mutex);
@@ -2426,6 +2486,7 @@ static int ov8856_probe(struct i2c_client *client)
mutex_init(&ov8856->mutex);
ov8856->cur_mode = &ov8856->priv_lane->supported_modes[0];
+ ov8856->cur_mbus_index = ov8856->cur_mode->default_mbus_index;
ret = ov8856_init_controls(ov8856);
if (ret) {
dev_err(&client->dev, "failed to init controls: %d", ret);
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f630b88cbfaa..ef976d085d72 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -876,11 +876,10 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
bridge->rx = ep;
/* register async notifier so we get noticed when sensor is connected */
- v4l2_async_notifier_init(&bridge->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &bridge->notifier,
- of_fwnode_handle(ep_node),
- struct v4l2_async_subdev);
+ v4l2_async_nf_init(&bridge->notifier);
+ asd = v4l2_async_nf_add_fwnode_remote(&bridge->notifier,
+ of_fwnode_handle(ep_node),
+ struct v4l2_async_subdev);
of_node_put(ep_node);
if (IS_ERR(asd)) {
@@ -890,10 +889,9 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
}
bridge->notifier.ops = &mipid02_notifier_ops;
- ret = v4l2_async_subdev_notifier_register(&bridge->sd,
- &bridge->notifier);
+ ret = v4l2_async_subdev_nf_register(&bridge->sd, &bridge->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
return ret;
@@ -1031,8 +1029,8 @@ static int mipid02_probe(struct i2c_client *client)
return 0;
unregister_notifier:
- v4l2_async_notifier_unregister(&bridge->notifier);
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_unregister(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
power_off:
mipid02_set_power_off(bridge);
entity_cleanup:
@@ -1048,8 +1046,8 @@ static int mipid02_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mipid02_dev *bridge = to_mipid02_dev(sd);
- v4l2_async_notifier_unregister(&bridge->notifier);
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_unregister(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
v4l2_async_unregister_subdev(&bridge->sd);
mipid02_set_power_off(bridge);
media_entity_cleanup(&bridge->sd.entity);
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 6070aaf0b32e..8fafce26d62f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1092,67 +1092,82 @@ tda1997x_detect_std(struct tda1997x_state *state,
struct v4l2_dv_timings *timings)
{
struct v4l2_subdev *sd = &state->sd;
- u32 vper;
- u16 hper;
- u16 hsper;
- int i;
/*
* Read the FMT registers
- * REG_V_PER: Period of a frame (or two fields) in MCLK(27MHz) cycles
- * REG_H_PER: Period of a line in MCLK(27MHz) cycles
- * REG_HS_WIDTH: Period of horiz sync pulse in MCLK(27MHz) cycles
+ * REG_V_PER: Period of a frame (or field) in MCLK (27MHz) cycles
+ * REG_H_PER: Period of a line in MCLK (27MHz) cycles
+ * REG_HS_WIDTH: Period of horiz sync pulse in MCLK (27MHz) cycles
*/
- vper = io_read24(sd, REG_V_PER) & MASK_VPER;
- hper = io_read16(sd, REG_H_PER) & MASK_HPER;
- hsper = io_read16(sd, REG_HS_WIDTH) & MASK_HSWIDTH;
- v4l2_dbg(1, debug, sd, "Signal Timings: %u/%u/%u\n", vper, hper, hsper);
+ u32 vper, vsync_pos;
+ u16 hper, hsync_pos, hsper, interlaced;
+ u16 htot, hact, hfront, hsync, hback;
+ u16 vtot, vact, vfront1, vfront2, vsync, vback1, vback2;
if (!state->input_detect[0] && !state->input_detect[1])
return -ENOLINK;
- for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
- const struct v4l2_bt_timings *bt;
- u32 lines, width, _hper, _hsper;
- u32 vmin, vmax, hmin, hmax, hsmin, hsmax;
- bool vmatch, hmatch, hsmatch;
-
- bt = &v4l2_dv_timings_presets[i].bt;
- width = V4L2_DV_BT_FRAME_WIDTH(bt);
- lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
- _hper = (u32)bt->pixelclock / width;
- if (bt->interlaced)
- lines /= 2;
- /* vper +/- 0.7% */
- vmin = ((27000000 / 1000) * 993) / _hper * lines;
- vmax = ((27000000 / 1000) * 1007) / _hper * lines;
- /* hper +/- 1.0% */
- hmin = ((27000000 / 100) * 99) / _hper;
- hmax = ((27000000 / 100) * 101) / _hper;
- /* hsper +/- 2 (take care to avoid 32bit overflow) */
- _hsper = 27000 * bt->hsync / ((u32)bt->pixelclock/1000);
- hsmin = _hsper - 2;
- hsmax = _hsper + 2;
-
- /* vmatch matches the framerate */
- vmatch = ((vper <= vmax) && (vper >= vmin)) ? 1 : 0;
- /* hmatch matches the width */
- hmatch = ((hper <= hmax) && (hper >= hmin)) ? 1 : 0;
- /* hsmatch matches the hswidth */
- hsmatch = ((hsper <= hsmax) && (hsper >= hsmin)) ? 1 : 0;
- if (hmatch && vmatch && hsmatch) {
- v4l2_print_dv_timings(sd->name, "Detected format: ",
- &v4l2_dv_timings_presets[i],
- false);
- if (timings)
- *timings = v4l2_dv_timings_presets[i];
- return 0;
- }
- }
+ vper = io_read24(sd, REG_V_PER);
+ hper = io_read16(sd, REG_H_PER);
+ hsper = io_read16(sd, REG_HS_WIDTH);
+ vsync_pos = vper & MASK_VPER_SYNC_POS;
+ hsync_pos = hper & MASK_HPER_SYNC_POS;
+ interlaced = hsper & MASK_HSWIDTH_INTERLACED;
+ vper &= MASK_VPER;
+ hper &= MASK_HPER;
+ hsper &= MASK_HSWIDTH;
+ v4l2_dbg(1, debug, sd, "Signal Timings: %u/%u/%u\n", vper, hper, hsper);
+
+ htot = io_read16(sd, REG_FMT_H_TOT);
+ hact = io_read16(sd, REG_FMT_H_ACT);
+ hfront = io_read16(sd, REG_FMT_H_FRONT);
+ hsync = io_read16(sd, REG_FMT_H_SYNC);
+ hback = io_read16(sd, REG_FMT_H_BACK);
+
+ vtot = io_read16(sd, REG_FMT_V_TOT);
+ vact = io_read16(sd, REG_FMT_V_ACT);
+ vfront1 = io_read(sd, REG_FMT_V_FRONT_F1);
+ vfront2 = io_read(sd, REG_FMT_V_FRONT_F2);
+ vsync = io_read(sd, REG_FMT_V_SYNC);
+ vback1 = io_read(sd, REG_FMT_V_BACK_F1);
+ vback2 = io_read(sd, REG_FMT_V_BACK_F2);
+
+ v4l2_dbg(1, debug, sd, "Geometry: H %u %u %u %u %u Sync%c V %u %u %u %u %u %u %u Sync%c\n",
+ htot, hact, hfront, hsync, hback, hsync_pos ? '+' : '-',
+ vtot, vact, vfront1, vfront2, vsync, vback1, vback2, vsync_pos ? '+' : '-');
+
+ if (!timings)
+ return 0;
- v4l_err(state->client, "no resolution match for timings: %d/%d/%d\n",
- vper, hper, hsper);
- return -ERANGE;
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt.width = hact;
+ timings->bt.hfrontporch = hfront;
+ timings->bt.hsync = hsync;
+ timings->bt.hbackporch = hback;
+ timings->bt.height = vact;
+ timings->bt.vfrontporch = vfront1;
+ timings->bt.vsync = vsync;
+ timings->bt.vbackporch = vback1;
+ timings->bt.interlaced = interlaced ? V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+ timings->bt.polarities = vsync_pos ? V4L2_DV_VSYNC_POS_POL : 0;
+ timings->bt.polarities |= hsync_pos ? V4L2_DV_HSYNC_POS_POL : 0;
+
+ timings->bt.pixelclock = (u64)htot * vtot * 27000000;
+ if (interlaced) {
+ timings->bt.il_vfrontporch = vfront2;
+ timings->bt.il_vsync = timings->bt.vsync;
+ timings->bt.il_vbackporch = vback2;
+ do_div(timings->bt.pixelclock, vper * 2 /* full frame */);
+ } else {
+ timings->bt.il_vfrontporch = 0;
+ timings->bt.il_vsync = 0;
+ timings->bt.il_vbackporch = 0;
+ do_div(timings->bt.pixelclock, vper);
+ }
+ v4l2_find_dv_timings_cap(timings, &tda1997x_dv_timings_cap,
+ (u32)timings->bt.pixelclock / 500, NULL, NULL);
+ v4l2_print_dv_timings(sd->name, "Detected format: ", timings, false);
+ return 0;
}
/* some sort of errata workaround for chip revision 0 (N1) */
@@ -1248,13 +1263,13 @@ tda1997x_parse_infoframe(struct tda1997x_state *state, u16 addr)
{
struct v4l2_subdev *sd = &state->sd;
union hdmi_infoframe frame;
- u8 buffer[40];
+ u8 buffer[40] = { 0 };
u8 reg;
int len, err;
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
- err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
+ err = hdmi_infoframe_unpack(&frame, buffer, len);
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -1928,13 +1943,13 @@ static int tda1997x_log_infoframe(struct v4l2_subdev *sd, int addr)
{
struct tda1997x_state *state = to_state(sd);
union hdmi_infoframe frame;
- u8 buffer[40];
+ u8 buffer[40] = { 0 };
int len, err;
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
v4l2_dbg(1, debug, sd, "infoframe: addr=%d len=%d\n", addr, len);
- err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
+ err = hdmi_infoframe_unpack(&frame, buffer, len);
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -2450,7 +2465,8 @@ static const struct media_entity_operations tda1997x_media_ops = {
static int tda1997x_pcm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct tda1997x_state *state = snd_soc_dai_get_drvdata(dai);
+ struct v4l2_subdev *sd = snd_soc_dai_get_drvdata(dai);
+ struct tda1997x_state *state = to_state(sd);
struct snd_soc_component *component = dai->component;
struct snd_pcm_runtime *rtd = substream->runtime;
int rate, err;
@@ -2759,7 +2775,6 @@ static int tda1997x_probe(struct i2c_client *client,
dev_err(&client->dev, "register audio codec failed\n");
goto err_free_media;
}
- dev_set_drvdata(&state->client->dev, state);
v4l_info(state->client, "registered audio codec\n");
}
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index d9b3daada07d..115371ba33f0 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -117,9 +117,12 @@
#define REG_CURPAGE_00H 0xFF
#define MASK_VPER 0x3fffff
+#define MASK_VPER_SYNC_POS 0x800000
#define MASK_VHREF 0x3fff
#define MASK_HPER 0x0fff
+#define MASK_HPER_SYNC_POS 0x8000
#define MASK_HSWIDTH 0x03ff
+#define MASK_HSWIDTH_INTERLACED 0x8000
/* HPD Detection */
#define DETECT_UTIL BIT(7) /* utility of HDMI level */
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index de12f38f347c..cb660b4bfd4b 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -441,14 +441,15 @@ static void buffer_queue(struct vb2_buffer *vb)
static int video_i2c_thread_vid_cap(void *priv)
{
struct video_i2c_data *data = priv;
- unsigned int delay = mult_frac(HZ, data->frame_interval.numerator,
- data->frame_interval.denominator);
+ u32 delay = mult_frac(1000000UL, data->frame_interval.numerator,
+ data->frame_interval.denominator);
+ s64 end_us = ktime_to_us(ktime_get());
set_freezable();
do {
- unsigned long start_jiffies = jiffies;
struct video_i2c_buffer *vid_cap_buf = NULL;
+ s64 current_us;
int schedule_delay;
try_to_freeze();
@@ -475,12 +476,14 @@ static int video_i2c_thread_vid_cap(void *priv)
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
- schedule_delay = delay - (jiffies - start_jiffies);
-
- if (time_after(jiffies, start_jiffies + delay))
- schedule_delay = delay;
-
- schedule_timeout_interruptible(schedule_delay);
+ end_us += delay;
+ current_us = ktime_to_us(ktime_get());
+ if (current_us < end_us) {
+ schedule_delay = end_us - current_us;
+ usleep_range(schedule_delay * 3 / 4, schedule_delay);
+ } else {
+ end_us = current_us;
+ }
} while (!kthread_should_stop());
return 0;
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
index 4815b9dde9af..375b09612981 100644
--- a/drivers/media/mc/Kconfig
+++ b/drivers/media/mc/Kconfig
@@ -16,13 +16,5 @@ config MEDIA_CONTROLLER_REQUEST_API
bool
depends on MEDIA_CONTROLLER
help
- DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
-
This option enables the Request API for the Media controller and V4L2
interfaces. It is currently needed by a few stateless codec drivers.
-
- There is currently no intention to provide API or ABI stability for
- this new API as of yet.
-
-comment "Please notice that the enabled Media controller Request API is EXPERIMENTAL"
- depends on MEDIA_CONTROLLER_REQUEST_API
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 16af58f2f93c..74edcc76d12f 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -332,8 +332,8 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
}
}
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
- ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
+ ret = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret) {
cobalt_err("no suitable DMA available\n");
goto err_disable;
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index f2440eb38820..59497ba6bf1f 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -804,7 +804,7 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
CX18_ERR("Can't enable device %d!\n", cx->instance);
return -EIO;
}
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
CX18_ERR("No suitable DMA available, card %d\n", cx->instance);
return -EIO;
}
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 4864def20676..ce3f0141f94e 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -276,7 +276,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
s->pixelformat = fmt->fmt.pix.pixelformat;
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12) {
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16) {
s->vb_bytes_per_frame = h * 720 * 3 / 2;
s->vb_bytes_per_line = 720; /* First plane */
} else {
@@ -470,7 +470,7 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
.index = 0,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:1:1)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
},
{
.index = 1,
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 2f5df471dada..013694bfcb1c 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -325,8 +325,8 @@ void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
struct cx18_buffer *buf;
list_for_each_entry(buf, &mdl->buf_list, list)
- pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
- buf_size, dma);
+ dma_sync_single_for_device(&pci_dev->dev, buf->dma_handle,
+ buf_size, dma);
}
int cx18_stream_alloc(struct cx18_stream *s)
@@ -385,8 +385,9 @@ int cx18_stream_alloc(struct cx18_stream *s)
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list);
- buf->dma_handle = pci_map_single(s->cx->pci_dev,
- buf->buf, s->buf_size, s->dma);
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+ buf->buf, s->buf_size,
+ s->dma);
cx18_buf_sync_for_cpu(s, buf);
list_add_tail(&buf->list, &s->buf_pool);
}
@@ -419,8 +420,8 @@ void cx18_stream_free(struct cx18_stream *s)
buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
list_del_init(&buf->list);
- pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
- s->buf_size, s->dma);
+ dma_unmap_single(&s->cx->pci_dev->dev, buf->dma_handle,
+ s->buf_size, s->dma);
kfree(buf->buf);
kfree(buf);
}
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index c41bae118415..87ff554bb2d2 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -49,44 +49,44 @@ static struct {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
VFL_TYPE_VIDEO, 0,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
VFL_TYPE_VIDEO, -1,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, CX18_V4L2_ENC_YUV_OFFSET,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
VFL_TYPE_VIDEO, CX18_V4L2_ENC_PCM_OFFSET,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
},
{ /* CX18_ENC_STREAM_TYPE_IDX */
"encoder IDX",
VFL_TYPE_VIDEO, -1,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
- PCI_DMA_NONE,
+ DMA_NONE,
V4L2_CAP_RADIO | V4L2_CAP_TUNER
},
};
@@ -133,7 +133,7 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->vb_bytes_per_frame = height * 720 * 3 / 2;
else
s->vb_bytes_per_frame = height * 720 * 2;
@@ -155,7 +155,7 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->vb_bytes_per_frame = height * 720 * 3 / 2;
else
s->vb_bytes_per_frame = height * 720 * 2;
@@ -287,7 +287,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
s, &cx->serialize_lock);
/* Assume the previous pixel default */
- s->pixelformat = V4L2_PIX_FMT_HM12;
+ s->pixelformat = V4L2_PIX_FMT_NV12_16L16;
s->vb_bytes_per_frame = cx->cxhdl.height * 720 * 3 / 2;
s->vb_bytes_per_line = 720;
}
@@ -324,7 +324,7 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
+ if (cx18_stream_info[type].dma != DMA_NONE &&
cx->stream_buffers[type] == 0) {
CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
return 0;
@@ -733,7 +733,7 @@ static void cx18_stream_configure_mdls(struct cx18_stream *s)
* Set the MDL size to the exact size needed for one frame.
* Use enough buffers per MDL to cover the MDL size
*/
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->mdl_size = 720 * s->cx->cxhdl.height * 3 / 2;
else
s->mdl_size = 720 * s->cx->cxhdl.height * 2;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index ab14d35214aa..25dc8d4dc5b7 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -550,7 +550,7 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, sizeof(struct cx23885_audio_dev), &card);
if (err < 0)
- goto error;
+ goto error_msg;
chip = (struct cx23885_audio_dev *) card->private_data;
chip->dev = dev;
@@ -576,6 +576,7 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
error:
snd_card_free(card);
+error_msg:
pr_err("%s(): Failed to register analog audio adapter\n",
__func__);
diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
index 03dc9924fa2c..25d0d6745b52 100644
--- a/drivers/media/pci/ddbridge/ddbridge-main.c
+++ b/drivers/media/pci/ddbridge/ddbridge-main.c
@@ -180,8 +180,8 @@ static int ddb_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
dev = vzalloc(sizeof(*dev));
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index 30d29b96a339..67c467d3c81f 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -29,6 +29,7 @@ static const struct cio2_sensor_config cio2_supported_sensors[] = {
static const struct cio2_property_names prop_names = {
.clock_frequency = "clock-frequency",
.rotation = "rotation",
+ .orientation = "orientation",
.bus_type = "bus-type",
.data_lanes = "data-lanes",
.remote_endpoint = "remote-endpoint",
@@ -72,11 +73,51 @@ out_free_buff:
return ret;
}
+static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor)
+{
+ switch (sensor->ssdb.degree) {
+ case CIO2_SENSOR_ROTATION_NORMAL:
+ return 0;
+ case CIO2_SENSOR_ROTATION_INVERTED:
+ return 180;
+ default:
+ dev_warn(&sensor->adev->dev,
+ "Unknown rotation %d. Assume 0 degree rotation\n",
+ sensor->ssdb.degree);
+ return 0;
+ }
+}
+
+static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor)
+{
+ switch (sensor->pld->panel) {
+ case ACPI_PLD_PANEL_FRONT:
+ return V4L2_FWNODE_ORIENTATION_FRONT;
+ case ACPI_PLD_PANEL_BACK:
+ return V4L2_FWNODE_ORIENTATION_BACK;
+ case ACPI_PLD_PANEL_TOP:
+ case ACPI_PLD_PANEL_LEFT:
+ case ACPI_PLD_PANEL_RIGHT:
+ case ACPI_PLD_PANEL_UNKNOWN:
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ default:
+ dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
+ sensor->pld->panel);
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ }
+}
+
static void cio2_bridge_create_fwnode_properties(
struct cio2_sensor *sensor,
struct cio2_bridge *bridge,
const struct cio2_sensor_config *cfg)
{
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+
+ rotation = cio2_bridge_parse_rotation(sensor);
+ orientation = cio2_bridge_parse_orientation(sensor);
+
sensor->prop_names = prop_names;
sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]);
@@ -85,9 +126,12 @@ static void cio2_bridge_create_fwnode_properties(
sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.clock_frequency,
sensor->ssdb.mclkspeed);
- sensor->dev_properties[1] = PROPERTY_ENTRY_U8(
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
sensor->prop_names.rotation,
- sensor->ssdb.degree);
+ rotation);
+ sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.orientation,
+ orientation);
sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.bus_type,
@@ -159,6 +203,7 @@ static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
for (i = 0; i < bridge->n_sensors; i++) {
sensor = &bridge->sensors[i];
software_node_unregister_nodes(sensor->swnodes);
+ ACPI_FREE(sensor->pld);
acpi_dev_put(sensor->adev);
}
}
@@ -170,6 +215,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
struct fwnode_handle *fwnode;
struct cio2_sensor *sensor;
struct acpi_device *adev;
+ acpi_status status;
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
@@ -191,11 +237,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
if (ret)
goto err_put_adev;
+ status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
+ if (ACPI_FAILURE(status))
+ goto err_put_adev;
+
if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
dev_err(&adev->dev,
"Number of lanes in SSDB is invalid\n");
ret = -EINVAL;
- goto err_put_adev;
+ goto err_free_pld;
}
cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
@@ -203,7 +253,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
ret = software_node_register_nodes(sensor->swnodes);
if (ret)
- goto err_put_adev;
+ goto err_free_pld;
fwnode = software_node_fwnode(&sensor->swnodes[
SWNODE_SENSOR_HID]);
@@ -225,6 +275,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
err_free_swnodes:
software_node_unregister_nodes(sensor->swnodes);
+err_free_pld:
+ ACPI_FREE(sensor->pld);
err_put_adev:
acpi_dev_put(adev);
return ret;
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
index dd0ffcafa489..202c7d494f7a 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
@@ -12,6 +12,10 @@
#define CIO2_MAX_LANES 4
#define MAX_NUM_LINK_FREQS 3
+/* Values are educated guesses as we don't have a spec */
+#define CIO2_SENSOR_ROTATION_NORMAL 0
+#define CIO2_SENSOR_ROTATION_INVERTED 1
+
#define CIO2_SENSOR_CONFIG(_HID, _NR, ...) \
(const struct cio2_sensor_config) { \
.hid = _HID, \
@@ -80,6 +84,7 @@ struct cio2_sensor_ssdb {
struct cio2_property_names {
char clock_frequency[16];
char rotation[9];
+ char orientation[12];
char bus_type[9];
char data_lanes[11];
char remote_endpoint[16];
@@ -106,9 +111,11 @@ struct cio2_sensor {
struct cio2_node_names node_names;
struct cio2_sensor_ssdb ssdb;
+ struct acpi_pld_info *pld;
+
struct cio2_property_names prop_names;
struct property_entry ep_properties[5];
- struct property_entry dev_properties[3];
+ struct property_entry dev_properties[4];
struct property_entry cio2_properties[3];
struct software_node_ref_args local_ref[1];
struct software_node_ref_args remote_ref[1];
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index 47db0ee0fcbf..356ea966cf8d 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -11,6 +11,7 @@
* et al.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
@@ -102,26 +103,29 @@ static inline u32 cio2_bytesperline(const unsigned int width)
static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
+
if (cio2->dummy_lop) {
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
- cio2->dummy_lop, cio2->dummy_lop_bus_addr);
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
+ cio2->dummy_lop_bus_addr);
cio2->dummy_lop = NULL;
}
if (cio2->dummy_page) {
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
- cio2->dummy_page, cio2->dummy_page_bus_addr);
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
+ cio2->dummy_page_bus_addr);
cio2->dummy_page = NULL;
}
}
static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
- cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_page_bus_addr,
GFP_KERNEL);
- cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_lop_bus_addr,
GFP_KERNEL);
if (!cio2->dummy_page || !cio2->dummy_lop) {
@@ -497,6 +501,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
+ struct device *dev = &cio2->pci_dev->dev;
void __iomem *const base = cio2->base;
unsigned int i;
u32 value;
@@ -514,8 +519,7 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
value, value & CIO2_CDMAC0_DMA_HALTED,
4000, 2000000);
if (ret)
- dev_err(&cio2->pci_dev->dev,
- "DMA %i can not be halted\n", CIO2_DMA_CHAN);
+ dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
for (i = 0; i < CIO2_NUM_PORTS; i++) {
writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
@@ -539,8 +543,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
- dev_warn(&cio2->pci_dev->dev,
- "no ready buffers found on DMA channel %u\n",
+ dev_warn(dev, "no ready buffers found on DMA channel %u\n",
dma_chan);
return;
}
@@ -557,8 +560,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
q->bufs[q->bufs_first] = NULL;
atomic_dec(&q->bufs_queued);
- dev_dbg(&cio2->pci_dev->dev,
- "buffer %i done\n", b->vbb.vb2_buf.index);
+ dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
b->vbb.vb2_buf.timestamp = ns;
b->vbb.field = V4L2_FIELD_NONE;
@@ -612,6 +614,20 @@ static const char *const cio2_irq_errs[] = {
"non-matching Long Packet stalled",
};
+static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long csi2_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
+ dev_err(dev, "CSI-2 receiver port %i: %s\n",
+ port, cio2_irq_errs[i]);
+
+ if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
+ dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
+ csi2_status, port);
+}
+
static const char *const cio2_port_errs[] = {
"ECC recoverable",
"DPHY not recoverable",
@@ -622,10 +638,19 @@ static const char *const cio2_port_errs[] = {
"PKT2LONG",
};
+static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long port_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
+ dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
+}
+
static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
{
- void __iomem *const base = cio2->base;
struct device *dev = &cio2->pci_dev->dev;
+ void __iomem *const base = cio2->base;
if (int_status & CIO2_INT_IOOE) {
/*
@@ -687,59 +712,32 @@ static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
/* CSI2 receiver (error) interrupt */
- u32 ie_status, ie_clear;
unsigned int port;
+ u32 ie_status;
- ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
- ie_status = ie_clear;
+ ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
for (port = 0; port < CIO2_NUM_PORTS; port++) {
u32 port_status = (ie_status >> (port * 8)) & 0xff;
- u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
- void __iomem *const csi_rx_base =
- base + CIO2_REG_PIPE_BASE(port);
- unsigned int i;
-
- while (port_status & err_mask) {
- i = ffs(port_status) - 1;
- dev_err(dev, "port %i error %s\n",
- port, cio2_port_errs[i]);
- ie_status &= ~BIT(port * 8 + i);
- port_status &= ~BIT(i);
- }
+
+ cio2_irq_log_port_errs(dev, port, port_status);
if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
- u32 csi2_status, csi2_clear;
+ void __iomem *csi_rx_base =
+ base + CIO2_REG_PIPE_BASE(port);
+ u32 csi2_status;
csi2_status = readl(csi_rx_base +
CIO2_REG_IRQCTRL_STATUS);
- csi2_clear = csi2_status;
- err_mask =
- BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
-
- while (csi2_status & err_mask) {
- i = ffs(csi2_status) - 1;
- dev_err(dev,
- "CSI-2 receiver port %i: %s\n",
- port, cio2_irq_errs[i]);
- csi2_status &= ~BIT(i);
- }
-
- writel(csi2_clear,
- csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
- if (csi2_status)
- dev_warn(dev,
- "unknown CSI2 error 0x%x on port %i\n",
- csi2_status, port);
- ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
+ cio2_irq_log_irq_errs(dev, port, csi2_status);
+
+ writel(csi2_status,
+ csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
}
}
- writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
- if (ie_status)
- dev_warn(dev, "unknown interrupt 0x%x on IE\n",
- ie_status);
+ writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
}
@@ -795,16 +793,21 @@ static int cio2_vb2_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
unsigned int i;
- *num_planes = q->format.num_planes;
+ if (*num_planes && *num_planes < q->format.num_planes)
+ return -EINVAL;
- for (i = 0; i < *num_planes; ++i) {
+ for (i = 0; i < q->format.num_planes; ++i) {
+ if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
+ return -EINVAL;
sizes[i] = q->format.plane_fmt[i].sizeimage;
- alloc_devs[i] = &cio2->pci_dev->dev;
+ alloc_devs[i] = dev;
}
+ *num_planes = q->format.num_planes;
*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
/* Initialize buffer queue */
@@ -824,8 +827,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &cio2->pci_dev->dev;
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int pages = PFN_UP(vb->planes[0].length);
unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
struct sg_table *sg;
@@ -879,17 +881,17 @@ fail:
static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q =
container_of(vb->vb2_queue, struct cio2_queue, vbq);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
struct cio2_fbpt_entry *entry;
unsigned long flags;
unsigned int i, j, next = q->bufs_next;
int bufs_queued = atomic_inc_return(&q->bufs_queued);
u32 fbpt_rp;
- dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
+ dev_dbg(dev, "queue buffer %d\n", vb->index);
/*
* This code queues the buffer to the CIO2 DMA engine, which starts
@@ -940,12 +942,12 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
return;
}
- dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
+ dev_dbg(dev, "entry %i was full!\n", next);
next = (next + 1) % CIO2_MAX_BUFFERS;
}
local_irq_restore(flags);
- dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
+ dev_err(dev, "error: all cio2 entries were full!\n");
atomic_dec(&q->bufs_queued);
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
@@ -954,14 +956,14 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int i;
/* Free LOP table */
for (i = 0; i < CIO2_MAX_LOPS; i++) {
if (b->lop[i])
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ dma_free_coherent(dev, PAGE_SIZE,
b->lop[i], b->lop_bus_addr[i]);
}
}
@@ -970,14 +972,15 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
int r;
cio2->cur_queue = q;
atomic_set(&q->frame_sequence, 0);
- r = pm_runtime_resume_and_get(&cio2->pci_dev->dev);
+ r = pm_runtime_resume_and_get(dev);
if (r < 0) {
- dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
+ dev_info(dev, "failed to set power %d\n", r);
return r;
}
@@ -1003,9 +1006,9 @@ fail_csi2_subdev:
fail_hw:
media_pipeline_stop(&q->vdev.entity);
fail_pipeline:
- dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
+ dev_dbg(dev, "failed to start streaming (%d)\n", r);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_put(&cio2->pci_dev->dev);
+ pm_runtime_put(dev);
return r;
}
@@ -1014,16 +1017,16 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
- dev_err(&cio2->pci_dev->dev,
- "failed to stop sensor streaming\n");
+ dev_err(dev, "failed to stop sensor streaming\n");
cio2_hw_exit(cio2, q);
synchronize_irq(cio2->pci_dev->irq);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
media_pipeline_stop(&q->vdev.entity);
- pm_runtime_put(&cio2->pci_dev->dev);
+ pm_runtime_put(dev);
cio2->streaming = false;
}
@@ -1311,16 +1314,16 @@ static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
static int cio2_video_link_validate(struct media_link *link)
{
- struct video_device *vd = container_of(link->sink->entity,
- struct video_device, entity);
+ struct media_entity *entity = link->sink->entity;
+ struct video_device *vd = media_entity_to_video_device(entity);
struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
struct cio2_device *cio2 = video_get_drvdata(vd);
+ struct device *dev = &cio2->pci_dev->dev;
struct v4l2_subdev_format source_fmt;
int ret;
- if (!media_entity_remote_pad(link->sink->entity->pads)) {
- dev_info(&cio2->pci_dev->dev,
- "video node %s pad not connected\n", vd->name);
+ if (!media_entity_remote_pad(entity->pads)) {
+ dev_info(dev, "video node %s pad not connected\n", vd->name);
return -ENOTCONN;
}
@@ -1330,8 +1333,7 @@ static int cio2_video_link_validate(struct media_link *link)
if (source_fmt.format.width != q->format.width ||
source_fmt.format.height != q->format.height) {
- dev_err(&cio2->pci_dev->dev,
- "Wrong width or height %ux%u (%ux%u expected)\n",
+ dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
q->format.width, q->format.height,
source_fmt.format.width, source_fmt.format.height);
return -EINVAL;
@@ -1371,15 +1373,15 @@ struct sensor_async_subdev {
struct csi2_bus_info csi2;
};
+#define to_sensor_asd(asd) container_of(asd, struct sensor_async_subdev, asd)
+
/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
struct cio2_queue *q;
if (cio2->queue[s_asd->csi2.port].sensor)
@@ -1399,10 +1401,8 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
cio2->queue[s_asd->csi2.port].sensor = NULL;
}
@@ -1410,8 +1410,8 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
/* .complete() is called after all subdevices have been located */
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{
- struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
- notifier);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct device *dev = &cio2->pci_dev->dev;
struct sensor_async_subdev *s_asd;
struct v4l2_async_subdev *asd;
struct cio2_queue *q;
@@ -1419,7 +1419,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
int ret;
list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
- s_asd = container_of(asd, struct sensor_async_subdev, asd);
+ s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
@@ -1428,8 +1428,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
break;
if (pad == q->sensor->entity.num_pads) {
- dev_err(&cio2->pci_dev->dev,
- "failed to find src pad for %s\n",
+ dev_err(dev, "failed to find src pad for %s\n",
q->sensor->name);
return -ENXIO;
}
@@ -1439,8 +1438,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
&q->subdev.entity, CIO2_PAD_SINK,
0);
if (ret) {
- dev_err(&cio2->pci_dev->dev,
- "failed to create link for %s\n",
+ dev_err(dev, "failed to create link for %s\n",
q->sensor->name);
return ret;
}
@@ -1457,6 +1455,7 @@ static const struct v4l2_async_notifier_operations cio2_async_ops = {
static int cio2_parse_firmware(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
int ret;
@@ -1467,10 +1466,8 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
struct sensor_async_subdev *s_asd;
struct fwnode_handle *ep;
- ep = fwnode_graph_get_endpoint_by_id(
- dev_fwnode(&cio2->pci_dev->dev), i, 0,
- FWNODE_GRAPH_ENDPOINT_NEXT);
-
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
continue;
@@ -1478,8 +1475,9 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
if (ret)
goto err_parse;
- s_asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &cio2->notifier, ep, struct sensor_async_subdev);
+ s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
+ struct
+ sensor_async_subdev);
if (IS_ERR(s_asd)) {
ret = PTR_ERR(s_asd);
goto err_parse;
@@ -1502,10 +1500,9 @@ err_parse:
* suspend.
*/
cio2->notifier.ops = &cio2_async_ops;
- ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
+ ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
if (ret)
- dev_err(&cio2->pci_dev->dev,
- "failed to register async notifier : %d\n", ret);
+ dev_err(dev, "failed to register async notifier : %d\n", ret);
return ret;
}
@@ -1524,7 +1521,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
static const u32 default_width = 1936;
static const u32 default_height = 1096;
const struct ipu3_cio2_fmt dflt_fmt = formats[0];
-
+ struct device *dev = &cio2->pci_dev->dev;
struct video_device *vdev = &q->vdev;
struct vb2_queue *vbq = &q->vbq;
struct v4l2_subdev *subdev = &q->subdev;
@@ -1566,8 +1563,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
subdev->internal_ops = &cio2_subdev_internal_ops;
r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev media entity (%d)\n", r);
+ dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
goto fail_subdev_media_entity;
}
@@ -1575,8 +1571,8 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vdev->entity.ops = &cio2_video_entity_ops;
r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize videodev media entity (%d)\n", r);
+ dev_err(dev, "failed initialize videodev media entity (%d)\n",
+ r);
goto fail_vdev_media_entity;
}
@@ -1590,8 +1586,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
v4l2_set_subdevdata(subdev, cio2);
r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev (%d)\n", r);
+ dev_err(dev, "failed initialize subdev (%d)\n", r);
goto fail_subdev;
}
@@ -1607,8 +1602,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vbq->lock = &q->lock;
r = vb2_queue_init(vbq);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to initialize videobuf2 queue (%d)\n", r);
+ dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
goto fail_subdev;
}
@@ -1625,8 +1619,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
video_set_drvdata(vdev, cio2);
r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to register video device (%d)\n", r);
+ dev_err(dev, "failed to register video device (%d)\n", r);
goto fail_vdev;
}
@@ -1648,7 +1641,7 @@ fail_subdev:
fail_vdev_media_entity:
media_entity_cleanup(&subdev->entity);
fail_subdev_media_entity:
- cio2_fbpt_exit(q, &cio2->pci_dev->dev);
+ cio2_fbpt_exit(q, dev);
fail_fbpt:
mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
@@ -1715,11 +1708,12 @@ static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
- struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
+ struct device *dev = &pci_dev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct cio2_device *cio2;
int r;
- cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
+ cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
if (!cio2)
return -ENOMEM;
cio2->pci_dev = pci_dev;
@@ -1732,7 +1726,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
r = cio2_check_fwnode_graph(fwnode);
if (r) {
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
- dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
+ dev_err(dev, "fwnode graph has no endpoints connected\n");
return -EINVAL;
}
@@ -1743,16 +1737,16 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
r = pcim_enable_device(pci_dev);
if (r) {
- dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
+ dev_err(dev, "failed to enable device (%d)\n", r);
return r;
}
- dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
+ dev_info(dev, "device 0x%x (rev: 0x%x)\n",
pci_dev->device, pci_dev->revision);
r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
if (r) {
- dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
+ dev_err(dev, "failed to remap I/O memory (%d)\n", r);
return -ENODEV;
}
@@ -1762,15 +1756,15 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
+ r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
if (r) {
- dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
+ dev_err(dev, "failed to set DMA mask (%d)\n", r);
return -ENODEV;
}
r = pci_enable_msi(pci_dev);
if (r) {
- dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
+ dev_err(dev, "failed to enable MSI (%d)\n", r);
return r;
}
@@ -1780,7 +1774,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
mutex_init(&cio2->lock);
- cio2->media_dev.dev = &cio2->pci_dev->dev;
+ cio2->media_dev.dev = dev;
strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
sizeof(cio2->media_dev.model));
snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
@@ -1793,10 +1787,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
goto fail_mutex_destroy;
cio2->v4l2_dev.mdev = &cio2->media_dev;
- r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
+ r = v4l2_device_register(dev, &cio2->v4l2_dev);
if (r) {
- dev_err(&pci_dev->dev,
- "failed to register V4L2 device (%d)\n", r);
+ dev_err(dev, "failed to register V4L2 device (%d)\n", r);
goto fail_media_device_unregister;
}
@@ -1804,28 +1797,28 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
if (r)
goto fail_v4l2_device_unregister;
- v4l2_async_notifier_init(&cio2->notifier);
+ v4l2_async_nf_init(&cio2->notifier);
/* Register notifier for subdevices we care */
r = cio2_parse_firmware(cio2);
if (r)
goto fail_clean_notifier;
- r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
- IRQF_SHARED, CIO2_NAME, cio2);
+ r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ CIO2_NAME, cio2);
if (r) {
- dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
+ dev_err(dev, "failed to request IRQ (%d)\n", r);
goto fail_clean_notifier;
}
- pm_runtime_put_noidle(&pci_dev->dev);
- pm_runtime_allow(&pci_dev->dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
return 0;
fail_clean_notifier:
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
fail_v4l2_device_unregister:
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -1844,8 +1837,8 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
media_device_unregister(&cio2->media_dev);
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -2005,10 +1998,9 @@ static int __maybe_unused cio2_resume(struct device *dev)
if (!cio2->streaming)
return 0;
/* Start stream */
- r = pm_runtime_force_resume(&cio2->pci_dev->dev);
+ r = pm_runtime_force_resume(dev);
if (r < 0) {
- dev_err(&cio2->pci_dev->dev,
- "failed to set power %d\n", r);
+ dev_err(dev, "failed to set power %d\n", r);
return r;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 3806d7f04d69..3a1f394e05aa 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -338,6 +338,8 @@ struct cio2_buffer {
unsigned int offset;
};
+#define to_cio2_buffer(vb) container_of(vb, struct cio2_buffer, vbb.vb2_buf)
+
struct csi2_bus_info {
u32 port;
u32 lanes;
@@ -399,6 +401,8 @@ struct cio2_device {
dma_addr_t dummy_lop_bus_addr;
};
+#define to_cio2_device(n) container_of(n, struct cio2_device, notifier)
+
/**************** Virtual channel ****************/
/*
* This should come from sensor driver. No
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8ebc97ebf1a2..57d4d5485d7a 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -837,7 +837,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
IVTV_ERR("Can't enable device!\n");
return -EIO;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
IVTV_ERR("No suitable DMA available.\n");
return -EIO;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index da19b2e95e6c..0cdf6b3210c2 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -339,7 +339,7 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
if (id->type == IVTV_ENC_STREAM_TYPE_YUV) {
- pixfmt->pixelformat = V4L2_PIX_FMT_HM12;
+ pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
/* YUV size is (Y=(h*720) + UV=(h*(720/2))) */
pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2;
pixfmt->bytesperline = 720;
@@ -417,7 +417,7 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
pixfmt->field = V4L2_FIELD_ANY;
break;
}
- pixfmt->pixelformat = V4L2_PIX_FMT_HM12;
+ pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
pixfmt->bytesperline = 720;
pixfmt->width = itv->yuv_info.v4l2_src_w;
pixfmt->height = itv->yuv_info.v4l2_src_h;
@@ -917,7 +917,7 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:2:0)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
@@ -944,7 +944,7 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
.description = "HM12 (YUV 4:2:0)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index 7ac4615e92ea..f9b192ab7e7c 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -188,7 +188,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return 0;
IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
- s->dma != PCI_DMA_NONE ? "DMA " : "",
+ s->dma != DMA_NONE ? "DMA " : "",
s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
@@ -218,8 +218,9 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return -ENOMEM;
}
if (ivtv_might_use_dma(s)) {
- s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ s->sg_handle = dma_map_single(&itv->pdev->dev, s->sg_dma,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
ivtv_stream_sync_for_cpu(s);
}
@@ -237,7 +238,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
}
INIT_LIST_HEAD(&buf->list);
if (ivtv_might_use_dma(s)) {
- buf->dma_handle = pci_map_single(s->itv->pdev,
+ buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
buf->buf, s->buf_size + 256, s->dma);
ivtv_buf_sync_for_cpu(s, buf);
}
@@ -260,8 +261,8 @@ void ivtv_stream_free(struct ivtv_stream *s)
/* empty q_free */
while ((buf = ivtv_dequeue(s, &s->q_free))) {
if (ivtv_might_use_dma(s))
- pci_unmap_single(s->itv->pdev, buf->dma_handle,
- s->buf_size + 256, s->dma);
+ dma_unmap_single(&s->itv->pdev->dev, buf->dma_handle,
+ s->buf_size + 256, s->dma);
kfree(buf->buf);
kfree(buf);
}
@@ -269,8 +270,9 @@ void ivtv_stream_free(struct ivtv_stream *s)
/* Free SG Array/Lists */
if (s->sg_dma != NULL) {
if (s->sg_handle != IVTV_DMA_UNMAPPED) {
- pci_unmap_single(s->itv->pdev, s->sg_handle,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ dma_unmap_single(&s->itv->pdev->dev, s->sg_handle,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
s->sg_handle = IVTV_DMA_UNMAPPED;
}
kfree(s->sg_pending);
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f04ee84bab5f..6e455948cc77 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -100,7 +100,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
VFL_TYPE_VIDEO, 0,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -108,7 +108,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -116,7 +116,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -124,42 +124,42 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_RADIO | V4L2_CAP_TUNER,
&ivtv_v4l2_radio_fops
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET,
- PCI_DMA_TODEVICE, 0,
+ DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
"decoder VBI",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VOUT */
"decoder VOUT",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET,
- PCI_DMA_TODEVICE, 0,
+ DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
}
@@ -179,7 +179,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
- s->dma = PCI_DMA_NONE;
+ s->dma = DMA_NONE;
else
s->dma = ivtv_stream_info[type].dma;
s->buf_size = itv->stream_buf_size[type];
@@ -217,7 +217,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (ivtv_stream_info[type].dma != PCI_DMA_NONE &&
+ if (ivtv_stream_info[type].dma != DMA_NONE &&
itv->options.kilobytes[type] == 0) {
IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
return 0;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 0d8372cc364a..210be8290f24 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -81,8 +81,10 @@ void ivtv_udma_alloc(struct ivtv *itv)
{
if (itv->udma.SG_handle == 0) {
/* Map DMA Page Array Buffer */
- itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
+ itv->udma.SGarray,
+ sizeof(itv->udma.SGarray),
+ DMA_TO_DEVICE);
ivtv_udma_sync_for_cpu(itv);
}
}
@@ -135,7 +137,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Map SG List */
- dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
@@ -159,7 +162,8 @@ void ivtv_udma_unmap(struct ivtv *itv)
/* Unmap Scatterlist */
if (dma->SG_length) {
- pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
+ DMA_TO_DEVICE);
dma->SG_length = 0;
}
/* sync DMA */
@@ -175,13 +179,14 @@ void ivtv_udma_free(struct ivtv *itv)
/* Unmap SG Array */
if (itv->udma.SG_handle) {
- pci_unmap_single(itv->pdev, itv->udma.SG_handle,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
+ sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
}
/* Unmap Scatterlist */
if (itv->udma.SG_length) {
- pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
+ itv->udma.page_count, DMA_TO_DEVICE);
}
for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 5f7dc9771f8d..e79e8a5a744a 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -113,7 +113,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
dma->page_count = 0;
return -ENOMEM;
}
- dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
@@ -920,7 +921,9 @@ static void ivtv_yuv_init(struct ivtv *itv)
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN);
if (yi->blanking_ptr) {
- yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
+ yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
+ yi->blanking_ptr,
+ 720 * 16, DMA_TO_DEVICE);
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
@@ -1264,7 +1267,8 @@ void ivtv_yuv_close(struct ivtv *itv)
if (yi->blanking_ptr) {
kfree(yi->blanking_ptr);
yi->blanking_ptr = NULL;
- pci_unmap_single(itv->pdev, yi->blanking_dmaptr, 720*16, PCI_DMA_TODEVICE);
+ dma_unmap_single(&itv->pdev->dev, yi->blanking_dmaptr,
+ 720 * 16, DMA_TO_DEVICE);
}
/* Invalidate the old dimension information */
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index e2d56dca5be4..2c43ebf83966 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -36,7 +36,7 @@
#include <linux/fb.h>
#include <linux/ivtvfb.h>
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
#include <asm/memtype.h>
#endif
@@ -48,8 +48,8 @@ static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
-static int osd_yres;
-static int osd_xres;
+static unsigned int osd_yres;
+static unsigned int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
@@ -58,8 +58,8 @@ module_param(osd_laced, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
module_param(osd_left, int, 0444);
-module_param(osd_yres, int, 0444);
-module_param(osd_xres, int, 0444);
+module_param(osd_yres, uint, 0444);
+module_param(osd_xres, uint, 0444);
MODULE_PARM_DESC(ivtvfb_card_id,
"Only use framebuffer of the specified ivtv card (0-31)\n"
@@ -1157,7 +1157,7 @@ static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
if (pat_enabled()) {
if (ivtvfb_force_pat) {
pr_info("PAT is enabled. Write-combined framebuffer caching will be disabled.\n");
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 6f3125c2d097..8287851b5ffd 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -258,19 +258,24 @@ static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
/* IRQ is being signaled */
reg_isr = readw(ndev->bmmio0 + REG_ISR);
- if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
- iret = netup_i2c_interrupt(&ndev->i2c[0]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
- iret = netup_i2c_interrupt(&ndev->i2c[1]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
+ if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
iret = netup_spi_interrupt(ndev->spi);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
- iret = netup_dma_interrupt(&ndev->dma[0]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
- iret = netup_dma_interrupt(&ndev->dma[1]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
- iret = netup_ci_interrupt(ndev);
+ else if (!ndev->old_fw) {
+ if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
+ iret = netup_i2c_interrupt(&ndev->i2c[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
+ iret = netup_i2c_interrupt(&ndev->i2c[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
+ iret = netup_dma_interrupt(&ndev->dma[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
+ iret = netup_dma_interrupt(&ndev->dma[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
+ iret = netup_ci_interrupt(ndev);
+ } else {
+ goto err;
+ }
} else {
+err:
dev_err(&pci_dev->dev,
"%s(): unknown interrupt 0x%x\n",
__func__, reg_isr);
@@ -841,7 +846,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
"%s(): board vendor 0x%x, revision 0x%x\n",
__func__, board_vendor, board_revision);
pci_set_master(pci_dev);
- if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
+ if (dma_set_mask(&pci_dev->dev, 0xffffffff) < 0) {
dev_err(&pci_dev->dev,
"%s(): 32bit PCI DMA is not supported\n", __func__);
goto pci_detect_err;
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index f1f4793a4452..6ac9b9bd7435 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -228,16 +228,16 @@ static void pluto_set_dma_addr(struct pluto *pluto)
static int pluto_dma_map(struct pluto *pluto)
{
- pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ pluto->dma_addr = dma_map_single(&pluto->pdev->dev, pluto->dma_buf,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
- return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
+ return dma_mapping_error(&pluto->pdev->dev, pluto->dma_addr);
}
static void pluto_dma_unmap(struct pluto *pluto)
{
- pci_unmap_single(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pluto->pdev->dev, pluto->dma_addr, TS_DMA_BYTES,
+ DMA_FROM_DEVICE);
}
static int pluto_start_feed(struct dvb_demux_feed *f)
@@ -276,8 +276,8 @@ static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
{
/* synchronize the DMA transfer with the CPU
* first so that we see updated contents. */
- pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pluto->pdev->dev, pluto->dma_addr,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
/* Workaround for broken hardware:
* [1] On startup NBPACKETS seems to contain an uninitialized value,
@@ -310,8 +310,8 @@ static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
pluto_set_dma_addr(pluto);
/* sync the buffer and give it back to the card */
- pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pluto->pdev->dev, pluto->dma_addr,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
}
static irqreturn_t pluto_irq(int irq, void *dev_id)
@@ -595,7 +595,7 @@ static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable interrupts */
pci_write_config_dword(pdev, 0x6c, 0x8000);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index f2aa36814fba..121a4a92ea10 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1340,7 +1340,7 @@ static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto err;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index ce449c941171..0d82a4b27d5b 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5765,6 +5765,33 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0200000,
},
},
+ [SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H] = {
+ .name = "Leadtek Winfast HDTV200 H",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_PARALLEL,
+ .gpiomask = 0x00200700,
+ .inputs = { {
+ .type = SAA7134_INPUT_TV,
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x00000300,
+ }, {
+ .type = SAA7134_INPUT_COMPOSITE,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x00200300,
+ }, {
+ .type = SAA7134_INPUT_SVIDEO,
+ .vmux = 8,
+ .amux = LINE1,
+ .gpio = 0x00200300,
+ } },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -7041,6 +7068,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x13cf,
.driver_data = SAA7134_BOARD_SNAZIO_TVPVR_PRO,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x107d,
+ .subdevice = 0x6f2e,
+ .driver_data = SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -7245,6 +7278,22 @@ static int saa7134_kworld_pc150u_toggle_agc(struct saa7134_dev *dev,
return 0;
}
+static int saa7134_leadtek_hdtv200h_toggle_agc(struct saa7134_dev *dev,
+ enum tda18271_mode mode)
+{
+ switch (mode) {
+ case TDA18271_ANALOG:
+ saa7134_set_gpio(dev, 10, 0);
+ break;
+ case TDA18271_DIGITAL:
+ saa7134_set_gpio(dev, 10, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
int command, int arg)
{
@@ -7264,6 +7313,9 @@ static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
case SAA7134_BOARD_KWORLD_PC150U:
ret = saa7134_kworld_pc150u_toggle_agc(dev, arg);
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
+ ret = saa7134_leadtek_hdtv200h_toggle_agc(dev, arg);
+ break;
default:
break;
}
@@ -7287,6 +7339,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
case SAA7134_BOARD_KWORLD_PC150U:
case SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2:
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
/* tda8290 + tda18271 */
ret = saa7134_tda8290_18271_callback(dev, command, arg);
break;
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index f359cd5c006a..d17a1b15faee 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1189,6 +1189,22 @@ static struct s5h1411_config kworld_s5h1411_config = {
S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK,
};
+static struct tda18271_config hdtv200h_tda18271_config = {
+ .gate = TDA18271_GATE_ANALOG,
+ .config = 3 /* Use tuner callback for AGC */
+};
+
+static struct s5h1411_config hdtv200h_s5h1411_config = {
+ .output_mode = S5H1411_PARALLEL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .qam_if = S5H1411_IF_4000,
+ .vsb_if = S5H1411_IF_3250,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing =
+ S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK,
+};
+
/* ==================================================================
* Core code
@@ -1854,6 +1870,19 @@ static int dvb_init(struct saa7134_dev *dev)
__func__);
}
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
+ fe0->dvb.frontend = dvb_attach(s5h1411_attach,
+ &hdtv200h_s5h1411_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0x4b,
+ &tda829x_no_probe);
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_adap,
+ &hdtv200h_tda18271_config);
+ }
+ break;
default:
pr_warn("Huh? unknown DVB card?\n");
break;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index d29499cd7370..49fe0f6bacba 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -328,6 +328,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_AVERMEDIA_505 194
#define SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM 195
#define SAA7134_BOARD_SNAZIO_TVPVR_PRO 196
+#define SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H 197
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index 4ddd0f5b50f1..5526bcc7a9bd 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -1057,8 +1057,6 @@ static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
dprintk(DBGLVL_API, " numformats = 0x%x\n",
vcoutputtermhdr->numformats);
- t = (struct tmComResDescrHeader *)
- ((struct tmComResDMATermDescrHeader *)(buf + idx));
next_offset = idx + (vcoutputtermhdr->len);
for (i = 0; i < vcoutputtermhdr->numformats; i++) {
t = (struct tmComResDescrHeader *)
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 282f7dfb7aaf..23d3cae54a5d 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -262,7 +262,7 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&dev->pci->dev, "32 bit PCI DMA is not supported\n");
goto disable_pci;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 80321e03809a..cf4adc64c953 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -200,6 +200,22 @@ config VIDEO_TI_CAL_MC
endif # VIDEO_TI_CAL
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on VIDEO_V4L2 && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
+
endif # V4L_PLATFORM_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
@@ -314,6 +330,9 @@ config VIDEO_MEDIATEK_VCODEC
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
+ select V4L2_H264
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats on MT8173
@@ -635,6 +654,7 @@ config VIDEO_RCAR_DRIF
depends on VIDEO_V4L2
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_VMALLOC
+ select V4L2_ASYNC
help
Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
Radio Interface that interfaces with an RF front end chip. It is a
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 73ce083c2fc6..a148553babfc 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
obj-$(CONFIG_VIDEO_XILINX) += xilinx/
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index 887b492e4ad1..c8156da33043 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -6,16 +6,20 @@
*/
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/firmware.h>
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -101,6 +105,12 @@
#define BETA_OFFSET_DIV_2 -1
#define TC_OFFSET_DIV_2 -1
+/*
+ * This control allows applications to explicitly disable the encoder buffer.
+ * This value is Allegro specific.
+ */
+#define V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER (V4L2_CID_USER_ALLEGRO_BASE + 0)
+
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
@@ -125,6 +135,13 @@ struct allegro_mbox {
struct mutex lock;
};
+struct allegro_encoder_buffer {
+ unsigned int size;
+ unsigned int color_depth;
+ unsigned int num_cores;
+ unsigned int clk_rate;
+};
+
struct allegro_dev {
struct v4l2_device v4l2_dev;
struct video_device video_dev;
@@ -136,12 +153,19 @@ struct allegro_dev {
struct regmap *regmap;
struct regmap *sram;
+ struct regmap *settings;
+
+ struct clk *clk_core;
+ struct clk *clk_mcu;
const struct fw_info *fw_info;
struct allegro_buffer firmware;
struct allegro_buffer suballocator;
+ bool has_encoder_buffer;
+ struct allegro_encoder_buffer encoder_buffer;
struct completion init_complete;
+ bool initialized;
/* The mailbox interface */
struct allegro_mbox *mbox_command;
@@ -257,6 +281,8 @@ struct allegro_channel {
struct v4l2_ctrl *mpeg_video_cpb_size;
struct v4l2_ctrl *mpeg_video_gop_size;
+ struct v4l2_ctrl *encoder_buffer;
+
/* user_id is used to identify the channel during CREATE_CHANNEL */
/* not sure, what to set here and if this is actually required */
int user_id;
@@ -921,6 +947,52 @@ out:
kfree(msg);
}
+static int allegro_encoder_buffer_init(struct allegro_dev *dev,
+ struct allegro_encoder_buffer *buffer)
+{
+ int err;
+ struct regmap *settings = dev->settings;
+ unsigned int supports_10_bit;
+ unsigned int memory_depth;
+ unsigned int num_cores;
+ unsigned int color_depth;
+ unsigned long clk_rate;
+
+ /* We don't support the encoder buffer pre Firmware version 2019.2 */
+ if (dev->fw_info->mailbox_version < MCU_MSG_VERSION_2019_2)
+ return -ENODEV;
+
+ if (!settings)
+ return -EINVAL;
+
+ err = regmap_read(settings, VCU_ENC_COLOR_DEPTH, &supports_10_bit);
+ if (err < 0)
+ return err;
+ err = regmap_read(settings, VCU_MEMORY_DEPTH, &memory_depth);
+ if (err < 0)
+ return err;
+ err = regmap_read(settings, VCU_NUM_CORE, &num_cores);
+ if (err < 0)
+ return err;
+
+ clk_rate = clk_get_rate(dev->clk_core);
+ if (clk_rate == 0)
+ return -EINVAL;
+
+ color_depth = supports_10_bit ? 10 : 8;
+ /* The firmware expects the encoder buffer size in bits. */
+ buffer->size = color_depth * 32 * memory_depth;
+ buffer->color_depth = color_depth;
+ buffer->num_cores = num_cores;
+ buffer->clk_rate = clk_rate;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "using %d bits encoder buffer with %d-bit color depth\n",
+ buffer->size, color_depth);
+
+ return 0;
+}
+
static void allegro_mcu_send_init(struct allegro_dev *dev,
dma_addr_t suballoc_dma, size_t suballoc_size)
{
@@ -934,10 +1006,17 @@ static void allegro_mcu_send_init(struct allegro_dev *dev,
msg.suballoc_dma = to_mcu_addr(dev, suballoc_dma);
msg.suballoc_size = to_mcu_size(dev, suballoc_size);
- /* disable L2 cache */
- msg.l2_cache[0] = -1;
- msg.l2_cache[1] = -1;
- msg.l2_cache[2] = -1;
+ if (dev->has_encoder_buffer) {
+ msg.encoder_buffer_size = dev->encoder_buffer.size;
+ msg.encoder_buffer_color_depth = dev->encoder_buffer.color_depth;
+ msg.num_cores = dev->encoder_buffer.num_cores;
+ msg.clk_rate = dev->encoder_buffer.clk_rate;
+ } else {
+ msg.encoder_buffer_size = -1;
+ msg.encoder_buffer_color_depth = -1;
+ msg.num_cores = -1;
+ msg.clk_rate = -1;
+ }
allegro_mbox_send(dev->mbox_command, &msg);
}
@@ -1184,9 +1263,8 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->max_transfo_depth_intra = channel->max_transfo_depth_intra;
param->max_transfo_depth_inter = channel->max_transfo_depth_inter;
- param->prefetch_auto = 0;
- param->prefetch_mem_offset = 0;
- param->prefetch_mem_size = 0;
+ param->encoder_buffer_enabled = v4l2_ctrl_g_ctrl(channel->encoder_buffer);
+ param->encoder_buffer_offset = 0;
param->rate_control_mode = channel->frame_rc_enable ?
v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0;
@@ -1311,6 +1389,7 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
u64 src_handle)
{
struct mcu_msg_encode_frame msg;
+ bool use_encoder_buffer = v4l2_ctrl_g_ctrl(channel->encoder_buffer);
memset(&msg, 0, sizeof(msg));
@@ -1319,6 +1398,8 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.channel_id = channel->mcu_channel_id;
msg.encoding_options = AL_OPT_FORCE_LOAD;
+ if (use_encoder_buffer)
+ msg.encoding_options |= AL_OPT_USE_L2;
msg.pps_qp = 26; /* qp are relative to 26 */
msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */
/* src_handle is copied to mcu_msg_encode_frame_response */
@@ -1326,8 +1407,6 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.src_y = to_codec_addr(dev, src_y);
msg.src_uv = to_codec_addr(dev, src_uv);
msg.stride = channel->stride;
- msg.ep2 = 0x0;
- msg.ep2_v = to_mcu_addr(dev, msg.ep2);
allegro_mbox_send(dev->mbox_command, &msg);
@@ -1509,14 +1588,14 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile);
level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level);
- sps->profile_idc = nal_h264_profile_from_v4l2(profile);
+ sps->profile_idc = nal_h264_profile(profile);
sps->constraint_set0_flag = 0;
sps->constraint_set1_flag = 1;
sps->constraint_set2_flag = 0;
sps->constraint_set3_flag = 0;
sps->constraint_set4_flag = 0;
sps->constraint_set5_flag = 0;
- sps->level_idc = nal_h264_level_from_v4l2(level);
+ sps->level_idc = nal_h264_level(level);
sps->seq_parameter_set_id = 0;
sps->log2_max_frame_num_minus4 = LOG2_MAX_FRAME_NUM - 4;
sps->pic_order_cnt_type = 0;
@@ -1541,13 +1620,17 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui_parameters_present_flag = 1;
sps->vui.aspect_ratio_info_present_flag = 0;
sps->vui.overscan_info_present_flag = 0;
+
sps->vui.video_signal_type_present_flag = 1;
- sps->vui.video_format = 1;
- sps->vui.video_full_range_flag = 0;
+ sps->vui.video_format = 5; /* unspecified */
+ sps->vui.video_full_range_flag = nal_h264_full_range(channel->quantization);
sps->vui.colour_description_present_flag = 1;
- sps->vui.colour_primaries = 5;
- sps->vui.transfer_characteristics = 5;
- sps->vui.matrix_coefficients = 5;
+ sps->vui.colour_primaries = nal_h264_color_primaries(channel->colorspace);
+ sps->vui.transfer_characteristics =
+ nal_h264_transfer_characteristics(channel->colorspace, channel->xfer_func);
+ sps->vui.matrix_coefficients =
+ nal_h264_matrix_coeffs(channel->colorspace, channel->ycbcr_enc);
+
sps->vui.chroma_loc_info_present_flag = 1;
sps->vui.chroma_sample_loc_type_top_field = 0;
sps->vui.chroma_sample_loc_type_bottom_field = 0;
@@ -1560,8 +1643,9 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui.nal_hrd_parameters_present_flag = 0;
sps->vui.vcl_hrd_parameters_present_flag = 1;
sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0;
- sps->vui.vcl_hrd_parameters.bit_rate_scale = 0;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */
+ sps->vui.vcl_hrd_parameters.bit_rate_scale =
+ ffs(channel->bitrate_peak) - 6;
sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] =
channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
@@ -1654,12 +1738,12 @@ static ssize_t allegro_hevc_write_vps(struct allegro_channel *channel,
vps->temporal_id_nesting_flag = 1;
ptl = &vps->profile_tier_level;
- ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_idc = nal_hevc_profile(profile);
ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
- ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_tier_flag = nal_hevc_tier(tier);
ptl->general_progressive_source_flag = 1;
ptl->general_frame_only_constraint_flag = 1;
- ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+ ptl->general_level_idc = nal_hevc_level(level);
vps->sub_layer_ordering_info_present_flag = 0;
vps->max_dec_pic_buffering_minus1[0] = num_ref_frames;
@@ -1678,7 +1762,10 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
struct allegro_dev *dev = channel->dev;
struct nal_hevc_sps *sps;
struct nal_hevc_profile_tier_level *ptl;
+ struct nal_hevc_vui_parameters *vui;
+ struct nal_hevc_hrd_parameters *hrd;
ssize_t size;
+ unsigned int cpb_size;
unsigned int num_ref_frames = channel->num_ref_idx_l0;
s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile);
s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level);
@@ -1691,12 +1778,12 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
sps->temporal_id_nesting_flag = 1;
ptl = &sps->profile_tier_level;
- ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_idc = nal_hevc_profile(profile);
ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
- ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_tier_flag = nal_hevc_tier(tier);
ptl->general_progressive_source_flag = 1;
ptl->general_frame_only_constraint_flag = 1;
- ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+ ptl->general_level_idc = nal_hevc_level(level);
sps->seq_parameter_set_id = 0;
sps->chroma_format_idc = 1; /* Only 4:2:0 sampling supported */
@@ -1731,6 +1818,50 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
sps->sps_temporal_mvp_enabled_flag = channel->temporal_mvp_enable;
sps->strong_intra_smoothing_enabled_flag = channel->max_cu_size > 4;
+ sps->vui_parameters_present_flag = 1;
+ vui = &sps->vui;
+
+ vui->video_signal_type_present_flag = 1;
+ vui->video_format = 5; /* unspecified */
+ vui->video_full_range_flag = nal_hevc_full_range(channel->quantization);
+ vui->colour_description_present_flag = 1;
+ vui->colour_primaries = nal_hevc_color_primaries(channel->colorspace);
+ vui->transfer_characteristics = nal_hevc_transfer_characteristics(channel->colorspace,
+ channel->xfer_func);
+ vui->matrix_coeffs = nal_hevc_matrix_coeffs(channel->colorspace, channel->ycbcr_enc);
+
+ vui->chroma_loc_info_present_flag = 1;
+ vui->chroma_sample_loc_type_top_field = 0;
+ vui->chroma_sample_loc_type_bottom_field = 0;
+
+ vui->vui_timing_info_present_flag = 1;
+ vui->vui_num_units_in_tick = channel->framerate.denominator;
+ vui->vui_time_scale = channel->framerate.numerator;
+
+ vui->bitstream_restriction_flag = 1;
+ vui->motion_vectors_over_pic_boundaries_flag = 1;
+ vui->restricted_ref_pic_lists_flag = 1;
+ vui->log2_max_mv_length_horizontal = 15;
+ vui->log2_max_mv_length_vertical = 15;
+
+ vui->vui_hrd_parameters_present_flag = 1;
+ hrd = &vui->nal_hrd_parameters;
+ hrd->vcl_hrd_parameters_present_flag = 1;
+
+ hrd->initial_cpb_removal_delay_length_minus1 = 31;
+ hrd->au_cpb_removal_delay_length_minus1 = 30;
+ hrd->dpb_output_delay_length_minus1 = 30;
+
+ hrd->bit_rate_scale = ffs(channel->bitrate_peak) - 6;
+ hrd->vcl_hrd[0].bit_rate_value_minus1[0] =
+ (channel->bitrate_peak >> (6 + hrd->bit_rate_scale)) - 1;
+
+ cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size) * 1000;
+ hrd->cpb_size_scale = ffs(cpb_size) - 4;
+ hrd->vcl_hrd[0].cpb_size_value_minus1[0] = (cpb_size >> (4 + hrd->cpb_size_scale)) - 1;
+
+ hrd->vcl_hrd[0].cbr_flag[0] = !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable);
+
size = nal_hevc_write_sps(&dev->plat_dev->dev, dest, n, sps);
kfree(sps);
@@ -2185,6 +2316,15 @@ static irqreturn_t allegro_irq_thread(int irq, void *data)
{
struct allegro_dev *dev = data;
+ /*
+ * The firmware is initialized after the mailbox is setup. We further
+ * check the AL5_ITC_CPU_IRQ_STA register, if the firmware actually
+ * triggered the interrupt. Although this should not happen, make sure
+ * that we ignore interrupts, if the mailbox is not initialized.
+ */
+ if (!dev->mbox_status)
+ return IRQ_NONE;
+
allegro_mbox_notify(dev->mbox_status);
return IRQ_HANDLED;
@@ -2384,6 +2524,8 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_cpb_size, false);
v4l2_ctrl_grab(channel->mpeg_video_gop_size, false);
+ v4l2_ctrl_grab(channel->encoder_buffer, false);
+
if (channel->user_id != -1) {
clear_bit(channel->user_id, &dev->channel_user_ids);
channel->user_id = -1;
@@ -2450,6 +2592,8 @@ static int allegro_create_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_cpb_size, true);
v4l2_ctrl_grab(channel->mpeg_video_gop_size, true);
+ v4l2_ctrl_grab(channel->encoder_buffer, true);
+
reinit_completion(&channel->completion);
allegro_mcu_send_create_channel(dev, channel);
timeout = wait_for_completion_timeout(&channel->completion,
@@ -2833,6 +2977,10 @@ static int allegro_try_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
allegro_clamp_bitrate(channel, ctrl);
break;
+ case V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER:
+ if (!channel->dev->has_encoder_buffer)
+ ctrl->val = 0;
+ break;
}
return 0;
@@ -2873,6 +3021,16 @@ static const struct v4l2_ctrl_ops allegro_ctrl_ops = {
.s_ctrl = allegro_s_ctrl,
};
+static const struct v4l2_ctrl_config allegro_encoder_buffer_ctrl_config = {
+ .id = V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER,
+ .name = "Encoder Buffer Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
static int allegro_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -3024,6 +3182,8 @@ static int allegro_open(struct file *file)
V4L2_CID_MPEG_VIDEO_GOP_SIZE,
0, ALLEGRO_GOP_SIZE_MAX,
1, ALLEGRO_GOP_SIZE_DEFAULT);
+ channel->encoder_buffer = v4l2_ctrl_new_custom(handler,
+ &allegro_encoder_buffer_ctrl_config, NULL);
v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -3504,6 +3664,11 @@ static int allegro_mcu_hw_init(struct allegro_dev *dev,
return -EIO;
}
+ err = allegro_encoder_buffer_init(dev, &dev->encoder_buffer);
+ dev->has_encoder_buffer = (err == 0);
+ if (!dev->has_encoder_buffer)
+ v4l2_info(&dev->v4l2_dev, "encoder buffer not available\n");
+
allegro_mcu_enable_interrupts(dev);
/* The mcu sends INIT after reset. */
@@ -3591,11 +3756,16 @@ static void allegro_fw_callback(const struct firmware *fw, void *context)
v4l2_info(&dev->v4l2_dev,
"using mcu firmware version '%s'\n", dev->fw_info->version);
+ pm_runtime_enable(&dev->plat_dev->dev);
+ err = pm_runtime_resume_and_get(&dev->plat_dev->dev);
+ if (err)
+ goto err_release_firmware_codec;
+
/* Ensure that the mcu is sleeping at the reset vector */
err = allegro_mcu_reset(dev);
if (err) {
v4l2_err(&dev->v4l2_dev, "failed to reset mcu\n");
- goto err_release_firmware_codec;
+ goto err_suspend;
}
allegro_copy_firmware(dev, fw->data, fw->size);
@@ -3623,6 +3793,8 @@ static void allegro_fw_callback(const struct firmware *fw, void *context)
"allegro codec registered as /dev/video%d\n",
dev->video_dev.num);
+ dev->initialized = true;
+
release_firmware(fw_codec);
release_firmware(fw);
@@ -3635,6 +3807,9 @@ err_mcu_hw_deinit:
allegro_mcu_hw_deinit(dev);
err_free_fw_codec:
allegro_free_fw_codec(dev);
+err_suspend:
+ pm_runtime_put(&dev->plat_dev->dev);
+ pm_runtime_disable(&dev->plat_dev->dev);
err_release_firmware_codec:
release_firmware(fw_codec);
err_release_firmware:
@@ -3669,6 +3844,8 @@ static int allegro_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
+ dev->initialized = false;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!res) {
dev_err(&pdev->dev,
@@ -3707,6 +3884,18 @@ static int allegro_probe(struct platform_device *pdev)
return PTR_ERR(dev->sram);
}
+ dev->settings = syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(dev->settings))
+ dev_warn(&pdev->dev, "failed to open settings\n");
+
+ dev->clk_core = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(dev->clk_core))
+ return PTR_ERR(dev->clk_core);
+
+ dev->clk_mcu = devm_clk_get(&pdev->dev, "mcu_clk");
+ if (IS_ERR(dev->clk_mcu))
+ return PTR_ERR(dev->clk_mcu);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -3739,17 +3928,75 @@ static int allegro_remove(struct platform_device *pdev)
{
struct allegro_dev *dev = platform_get_drvdata(pdev);
- video_unregister_device(&dev->video_dev);
- if (dev->m2m_dev)
- v4l2_m2m_release(dev->m2m_dev);
- allegro_mcu_hw_deinit(dev);
- allegro_free_fw_codec(dev);
+ if (dev->initialized) {
+ video_unregister_device(&dev->video_dev);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ allegro_mcu_hw_deinit(dev);
+ allegro_free_fw_codec(dev);
+ }
+
+ pm_runtime_put(&dev->plat_dev->dev);
+ pm_runtime_disable(&dev->plat_dev->dev);
v4l2_device_unregister(&dev->v4l2_dev);
return 0;
}
+static int allegro_runtime_resume(struct device *device)
+{
+ struct allegro_dev *dev = dev_get_drvdata(device);
+ struct regmap *settings = dev->settings;
+ unsigned int clk_mcu;
+ unsigned int clk_core;
+ int err;
+
+ if (!settings)
+ return -EINVAL;
+
+#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
+
+ err = regmap_read(settings, VCU_CORE_CLK, &clk_core);
+ if (err < 0)
+ return err;
+ err = clk_set_rate(dev->clk_core, MHZ_TO_HZ(clk_core));
+ if (err < 0)
+ return err;
+ err = clk_prepare_enable(dev->clk_core);
+ if (err)
+ return err;
+
+ err = regmap_read(settings, VCU_MCU_CLK, &clk_mcu);
+ if (err < 0)
+ goto disable_clk_core;
+ err = clk_set_rate(dev->clk_mcu, MHZ_TO_HZ(clk_mcu));
+ if (err < 0)
+ goto disable_clk_core;
+ err = clk_prepare_enable(dev->clk_mcu);
+ if (err)
+ goto disable_clk_core;
+
+#undef MHZ_TO_HZ
+
+ return 0;
+
+disable_clk_core:
+ clk_disable_unprepare(dev->clk_core);
+
+ return err;
+}
+
+static int allegro_runtime_suspend(struct device *device)
+{
+ struct allegro_dev *dev = dev_get_drvdata(device);
+
+ clk_disable_unprepare(dev->clk_mcu);
+ clk_disable_unprepare(dev->clk_core);
+
+ return 0;
+}
+
static const struct of_device_id allegro_dt_ids[] = {
{ .compatible = "allegro,al5e-1.1" },
{ /* sentinel */ }
@@ -3757,12 +4004,18 @@ static const struct of_device_id allegro_dt_ids[] = {
MODULE_DEVICE_TABLE(of, allegro_dt_ids);
+static const struct dev_pm_ops allegro_pm_ops = {
+ .runtime_resume = allegro_runtime_resume,
+ .runtime_suspend = allegro_runtime_suspend,
+};
+
static struct platform_driver allegro_driver = {
.probe = allegro_probe,
.remove = allegro_remove,
.driver = {
.name = "allegro",
.of_match_table = of_match_ptr(allegro_dt_ids),
+ .pm = &allegro_pm_ops,
},
};
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.c b/drivers/media/platform/allegro-dvt/allegro-mail.c
index 7e08c5050f2e..16effad10746 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.c
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.c
@@ -49,11 +49,11 @@ allegro_enc_init(u32 *dst, struct mcu_msg_init_request *msg)
dst[i++] = msg->reserved0;
dst[i++] = msg->suballoc_dma;
dst[i++] = msg->suballoc_size;
- dst[i++] = msg->l2_cache[0];
- dst[i++] = msg->l2_cache[1];
- dst[i++] = msg->l2_cache[2];
+ dst[i++] = msg->encoder_buffer_size;
+ dst[i++] = msg->encoder_buffer_color_depth;
+ dst[i++] = msg->num_cores;
if (version >= MCU_MSG_VERSION_2019_2) {
- dst[i++] = -1;
+ dst[i++] = msg->clk_rate;
dst[i++] = 0;
}
@@ -146,13 +146,10 @@ allegro_encode_config_blob(u32 *dst, struct create_channel_param *param)
FIELD_PREP(GENMASK(7, 0), param->tc_offset);
dst[i++] = param->unknown11;
dst[i++] = param->unknown12;
- if (version >= MCU_MSG_VERSION_2019_2)
- dst[i++] = param->num_slices;
- else
- dst[i++] = FIELD_PREP(GENMASK(31, 16), param->prefetch_auto) |
- FIELD_PREP(GENMASK(15, 0), param->num_slices);
- dst[i++] = param->prefetch_mem_offset;
- dst[i++] = param->prefetch_mem_size;
+ dst[i++] = param->num_slices;
+ dst[i++] = param->encoder_buffer_offset;
+ dst[i++] = param->encoder_buffer_enabled;
+
dst[i++] = FIELD_PREP(GENMASK(31, 16), param->clip_vrt_range) |
FIELD_PREP(GENMASK(15, 0), param->clip_hrz_range);
dst[i++] = FIELD_PREP(GENMASK(31, 16), param->me_range[1]) |
@@ -429,8 +426,8 @@ allegro_dec_encode_frame(struct mcu_msg_encode_frame_response *msg, u32 *src)
msg->frame_tag_size = src[i++];
msg->stuffing = src[i++];
msg->filler = src[i++];
- msg->num_column = FIELD_GET(GENMASK(31, 16), src[i]);
- msg->num_row = FIELD_GET(GENMASK(15, 0), src[i++]);
+ msg->num_row = FIELD_GET(GENMASK(31, 16), src[i]);
+ msg->num_column = FIELD_GET(GENMASK(15, 0), src[i++]);
msg->num_ref_idx_l1 = FIELD_GET(GENMASK(31, 24), src[i]);
msg->num_ref_idx_l0 = FIELD_GET(GENMASK(23, 16), src[i]);
msg->qp = FIELD_GET(GENMASK(15, 0), src[i++]);
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.h b/drivers/media/platform/allegro-dvt/allegro-mail.h
index 2c7bc509eac3..a5686058d754 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.h
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.h
@@ -37,7 +37,10 @@ struct mcu_msg_init_request {
u32 reserved0; /* maybe a unused channel id */
u32 suballoc_dma;
u32 suballoc_size;
- s32 l2_cache[3];
+ s32 encoder_buffer_size;
+ s32 encoder_buffer_color_depth;
+ s32 num_cores;
+ s32 clk_rate;
};
struct mcu_msg_init_response {
@@ -79,9 +82,8 @@ struct create_channel_param {
u32 unknown11;
u32 unknown12;
u16 num_slices;
- u16 prefetch_auto;
- u32 prefetch_mem_offset;
- u32 prefetch_mem_size;
+ u32 encoder_buffer_offset;
+ u32 encoder_buffer_enabled;
u16 clip_hrz_range;
u16 clip_vrt_range;
u16 me_range[4];
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.c b/drivers/media/platform/allegro-dvt/nal-h264.c
index 0ab2fcbee1b9..32663766340f 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.c
+++ b/drivers/media/platform/allegro-dvt/nal-h264.c
@@ -34,80 +34,6 @@ enum nal_unit_type {
FILLER_DATA = 12,
};
-/**
- * nal_h264_profile_from_v4l2() - Get profile_idc for v4l2 h264 profile
- * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
- *
- * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
- * in Rec. ITU-T H.264 (04/2017) A.2.
- *
- * Return: the profile_idc for the passed level
- */
-int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile)
-{
- switch (profile) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- return 66;
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- return 77;
- case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
- return 88;
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- return 100;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * nal_h264_level_from_v4l2() - Get level_idc for v4l2 h264 level
- * @level: the level as &enum v4l2_mpeg_video_h264_level
- *
- * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
- * Rec. ITU-T H.264 (04/2017) A.3.2.
- *
- * Return: the level_idc for the passed level
- */
-int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level)
-{
- switch (level) {
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
- return 10;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- return 9;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
- return 11;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
- return 12;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
- return 13;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
- return 20;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
- return 21;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
- return 22;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
- return 30;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
- return 31;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
- return 32;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
- return 40;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
- return 41;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
- return 42;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
- return 50;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
- return 51;
- default:
- return -EINVAL;
- }
-}
-
static void nal_h264_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.h b/drivers/media/platform/allegro-dvt/nal-h264.h
index a19634fe8c0b..34db07cda652 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.h
+++ b/drivers/media/platform/allegro-dvt/nal-h264.h
@@ -8,8 +8,11 @@
#ifndef __NAL_H264_H__
#define __NAL_H264_H__
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
/*
* struct nal_h264_hrd_parameters - HRD parameters
@@ -187,8 +190,201 @@ struct nal_h264_pps {
};
};
-int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile);
-int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level);
+/**
+ * nal_h264_profile() - Get profile_idc for v4l2 h264 profile
+ * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
+ * in Rec. ITU-T H.264 (04/2017) A.2.
+ *
+ * Return: the profile_idc for the passed level
+ */
+static inline int nal_h264_profile(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return 88;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_level() - Get level_idc for v4l2 h264 level
+ * @level: the level as &enum v4l2_mpeg_video_h264_level
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
+ * Rec. ITU-T H.264 (04/2017) A.3.2.
+ *
+ * Return: the level_idc for the passed level
+ */
+static inline int nal_h264_level(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 9;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return 51;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_full_range() - Get video_full_range_flag for v4l2 quantization
+ * @quantization: the quantization type as &enum v4l2_quantization
+ *
+ * Convert the &enum v4l2_quantization to video_full_range_flag as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the video_full_range_flag value for the passed quantization
+ */
+static inline int nal_h264_full_range(enum v4l2_quantization quantization)
+{
+ switch (quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ return 1;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * nal_h264_color_primaries() - Get color_primaries for v4l2 colorspace
+ * @colorspace: the color space as &enum v4l2_colorspace
+ *
+ * Convert the &enum v4l2_colorspace to color_primaries as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the color_primaries value for the passed colorspace
+ */
+static inline int nal_h264_color_primaries(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 6;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 7;
+ case V4L2_COLORSPACE_REC709:
+ return 1;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return 4;
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return 5;
+ case V4L2_COLORSPACE_BT2020:
+ return 9;
+ case V4L2_COLORSPACE_DEFAULT:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_RAW:
+ case V4L2_COLORSPACE_DCI_P3:
+ default:
+ return 2;
+ }
+}
+
+/**
+ * nal_h264_transfer_characteristics() - Get transfer_characteristics for v4l2 xfer_func
+ * @colorspace: the color space as &enum v4l2_colorspace
+ * @xfer_func: the transfer function as &enum v4l2_xfer_func
+ *
+ * Convert the &enum v4l2_xfer_func to transfer_characteristics as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the transfer_characteristics value for the passed transfer function
+ */
+static inline int nal_h264_transfer_characteristics(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func)
+{
+ if (xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(colorspace);
+
+ switch (xfer_func) {
+ case V4L2_XFER_FUNC_709:
+ return 6;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return 16;
+ case V4L2_XFER_FUNC_SRGB:
+ case V4L2_XFER_FUNC_OPRGB:
+ case V4L2_XFER_FUNC_NONE:
+ case V4L2_XFER_FUNC_DCI_P3:
+ case V4L2_XFER_FUNC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
+
+/**
+ * nal_h264_matrix_coeffs() - Get matrix_coefficients for v4l2 v4l2_ycbcr_encoding
+ * @colorspace: the color space as &enum v4l2_colorspace
+ * @ycbcr_encoding: the ycbcr encoding as &enum v4l2_ycbcr_encoding
+ *
+ * Convert the &enum v4l2_ycbcr_encoding to matrix_coefficients as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the matrix_coefficients value for the passed encoding
+ */
+static inline int nal_h264_matrix_coeffs(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding)
+{
+ if (ycbcr_encoding == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_encoding = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ switch (ycbcr_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ return 5;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
+ return 1;
+ case V4L2_YCBCR_ENC_BT2020:
+ return 9;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return 10;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
ssize_t nal_h264_write_sps(const struct device *dev,
void *dest, size_t n, struct nal_h264_sps *sps);
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.c b/drivers/media/platform/allegro-dvt/nal-hevc.c
index 15a352e45831..9cdf2756e0a3 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.c
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.c
@@ -35,76 +35,6 @@ enum nal_unit_type {
FD_NUT = 38,
};
-int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile)
-{
- switch (profile) {
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
- return 1;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
- return 2;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
- return 3;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_profile_from_v4l2);
-
-int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier)
-{
- switch (tier) {
- case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
- return 0;
- case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
- return 1;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_tier_from_v4l2);
-
-int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level)
-{
- /*
- * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
- * shall be set equal to a value of 30 times the level number
- * specified in Table A.6.
- */
- int factor = 30 / 10;
-
- switch (level) {
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
- return factor * 10;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
- return factor * 20;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
- return factor * 21;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
- return factor * 30;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
- return factor * 31;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
- return factor * 40;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
- return factor * 41;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
- return factor * 50;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
- return factor * 51;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
- return factor * 52;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
- return factor * 60;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
- return factor * 61;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
- return factor * 62;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_level_from_v4l2);
-
static void nal_hevc_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
@@ -277,6 +207,136 @@ static void nal_hevc_rbsp_vps(struct rbsp *rbsp, struct nal_hevc_vps *vps)
rbsp_unsupported(rbsp);
}
+static void nal_hevc_rbsp_sub_layer_hrd_parameters(struct rbsp *rbsp,
+ struct nal_hevc_sub_layer_hrd_parameters *hrd)
+{
+ unsigned int i;
+ unsigned int cpb_cnt = 1;
+
+ for (i = 0; i < cpb_cnt; i++) {
+ rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]);
+ rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]);
+ rbsp_bit(rbsp, &hrd->cbr_flag[i]);
+ }
+}
+
+static void nal_hevc_rbsp_hrd_parameters(struct rbsp *rbsp,
+ struct nal_hevc_hrd_parameters *hrd)
+{
+ unsigned int i;
+ unsigned int max_num_sub_layers_minus_1 = 0;
+
+ rbsp_bit(rbsp, &hrd->nal_hrd_parameters_present_flag);
+ rbsp_bit(rbsp, &hrd->vcl_hrd_parameters_present_flag);
+ if (hrd->nal_hrd_parameters_present_flag || hrd->vcl_hrd_parameters_present_flag) {
+ rbsp_bit(rbsp, &hrd->sub_pic_hrd_params_present_flag);
+ if (hrd->sub_pic_hrd_params_present_flag) {
+ rbsp_bits(rbsp, 8, &hrd->tick_divisor_minus2);
+ rbsp_bits(rbsp, 5, &hrd->du_cpb_removal_delay_increment_length_minus1);
+ rbsp_bit(rbsp, &hrd->sub_pic_cpb_params_in_pic_timing_sei_flag);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_du_length_minus1);
+ }
+ rbsp_bits(rbsp, 4, &hrd->bit_rate_scale);
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_scale);
+ if (hrd->sub_pic_hrd_params_present_flag)
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_du_scale);
+ rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->au_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1);
+ }
+ for (i = 0; i <= max_num_sub_layers_minus_1; i++) {
+ rbsp_bit(rbsp, &hrd->fixed_pic_rate_general_flag[i]);
+ if (!hrd->fixed_pic_rate_general_flag[i])
+ rbsp_bit(rbsp, &hrd->fixed_pic_rate_within_cvs_flag[i]);
+ if (hrd->fixed_pic_rate_within_cvs_flag[i])
+ rbsp_uev(rbsp, &hrd->elemental_duration_in_tc_minus1[i]);
+ else
+ rbsp_bit(rbsp, &hrd->low_delay_hrd_flag[i]);
+ if (!hrd->low_delay_hrd_flag[i])
+ rbsp_uev(rbsp, &hrd->cpb_cnt_minus1[i]);
+ if (hrd->nal_hrd_parameters_present_flag)
+ nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]);
+ if (hrd->vcl_hrd_parameters_present_flag)
+ nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]);
+ }
+}
+
+static void nal_hevc_rbsp_vui_parameters(struct rbsp *rbsp,
+ struct nal_hevc_vui_parameters *vui)
+{
+ if (!vui) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag);
+ if (vui->aspect_ratio_info_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255) {
+ rbsp_bits(rbsp, 16, &vui->sar_width);
+ rbsp_bits(rbsp, 16, &vui->sar_height);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->overscan_info_present_flag);
+ if (vui->overscan_info_present_flag)
+ rbsp_bit(rbsp, &vui->overscan_appropriate_flag);
+
+ rbsp_bit(rbsp, &vui->video_signal_type_present_flag);
+ if (vui->video_signal_type_present_flag) {
+ rbsp_bits(rbsp, 3, &vui->video_format);
+ rbsp_bit(rbsp, &vui->video_full_range_flag);
+
+ rbsp_bit(rbsp, &vui->colour_description_present_flag);
+ if (vui->colour_description_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->colour_primaries);
+ rbsp_bits(rbsp, 8, &vui->transfer_characteristics);
+ rbsp_bits(rbsp, 8, &vui->matrix_coeffs);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag);
+ if (vui->chroma_loc_info_present_flag) {
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field);
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field);
+ }
+
+ rbsp_bit(rbsp, &vui->neutral_chroma_indication_flag);
+ rbsp_bit(rbsp, &vui->field_seq_flag);
+ rbsp_bit(rbsp, &vui->frame_field_info_present_flag);
+ rbsp_bit(rbsp, &vui->default_display_window_flag);
+ if (vui->default_display_window_flag) {
+ rbsp_uev(rbsp, &vui->def_disp_win_left_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_right_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_top_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_bottom_offset);
+ }
+
+ rbsp_bit(rbsp, &vui->vui_timing_info_present_flag);
+ if (vui->vui_timing_info_present_flag) {
+ rbsp_bits(rbsp, 32, &vui->vui_num_units_in_tick);
+ rbsp_bits(rbsp, 32, &vui->vui_time_scale);
+ rbsp_bit(rbsp, &vui->vui_poc_proportional_to_timing_flag);
+ if (vui->vui_poc_proportional_to_timing_flag)
+ rbsp_uev(rbsp, &vui->vui_num_ticks_poc_diff_one_minus1);
+ rbsp_bit(rbsp, &vui->vui_hrd_parameters_present_flag);
+ if (vui->vui_hrd_parameters_present_flag)
+ nal_hevc_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters);
+ }
+
+ rbsp_bit(rbsp, &vui->bitstream_restriction_flag);
+ if (vui->bitstream_restriction_flag) {
+ rbsp_bit(rbsp, &vui->tiles_fixed_structure_flag);
+ rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag);
+ rbsp_bit(rbsp, &vui->restricted_ref_pic_lists_flag);
+ rbsp_uev(rbsp, &vui->min_spatial_segmentation_idc);
+ rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom);
+ rbsp_uev(rbsp, &vui->max_bits_per_min_cu_denom);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_vertical);
+ }
+}
+
static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
{
unsigned int i;
@@ -345,7 +405,7 @@ static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
rbsp_bit(rbsp, &sps->strong_intra_smoothing_enabled_flag);
rbsp_bit(rbsp, &sps->vui_parameters_present_flag);
if (sps->vui_parameters_present_flag)
- rbsp_unsupported(rbsp);
+ nal_hevc_rbsp_vui_parameters(rbsp, &sps->vui);
rbsp_bit(rbsp, &sps->extension_present_flag);
if (sps->extension_present_flag) {
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.h b/drivers/media/platform/allegro-dvt/nal-hevc.h
index c09bbe5446aa..eb46f12aae80 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.h
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.h
@@ -8,9 +8,11 @@
#ifndef __NAL_HEVC_H__
#define __NAL_HEVC_H__
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
struct nal_hevc_profile_tier_level {
unsigned int general_profile_space;
@@ -318,16 +320,183 @@ struct nal_hevc_pps {
};
};
-int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile);
-int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier);
-int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level);
+/**
+ * nal_hevc_profile() - Get profile_idc for v4l2 hevc profile
+ * @profile: the profile as &enum v4l2_mpeg_video_hevc_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_profile to profile_idc as specified
+ * in Rec. ITU-T H.265 (02/2018) A.3.
+ *
+ * Return: the profile_idc for the passed level
+ */
+static inline int nal_hevc_profile(enum v4l2_mpeg_video_hevc_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ return 1;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return 2;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_hevc_tier() - Get tier_flag for v4l2 hevc tier
+ * @tier: the tier as &enum v4l2_mpeg_video_hevc_tier
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_tier to tier_flag as specified
+ * in Rec. ITU-T H.265 (02/2018) A.4.1.
+ *
+ * Return: the tier_flag for the passed tier
+ */
+static inline int nal_hevc_tier(enum v4l2_mpeg_video_hevc_tier tier)
+{
+ switch (tier) {
+ case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
+ return 0;
+ case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_hevc_level() - Get level_idc for v4l2 hevc level
+ * @level: the level as &enum v4l2_mpeg_video_hevc_level
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_level to level_idc as specified in
+ * Rec. ITU-T H.265 (02/2018) A.4.1.
+ *
+ * Return: the level_idc for the passed level
+ */
+static inline int nal_hevc_level(enum v4l2_mpeg_video_hevc_level level)
+{
+ /*
+ * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
+ * shall be set equal to a value of 30 times the level number
+ * specified in Table A.6.
+ */
+ int factor = 30 / 10;
+
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return factor * 10;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return factor * 20;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return factor * 21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return factor * 30;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return factor * 31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return factor * 40;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return factor * 41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return factor * 50;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return factor * 51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return factor * 52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return factor * 60;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return factor * 61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return factor * 62;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int nal_hevc_full_range(enum v4l2_quantization quantization)
+{
+ switch (quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ return 1;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int nal_hevc_color_primaries(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 6;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 7;
+ case V4L2_COLORSPACE_REC709:
+ return 1;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return 4;
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return 5;
+ case V4L2_COLORSPACE_BT2020:
+ return 9;
+ case V4L2_COLORSPACE_DEFAULT:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_RAW:
+ case V4L2_COLORSPACE_DCI_P3:
+ default:
+ return 2;
+ }
+}
+
+static inline int nal_hevc_transfer_characteristics(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func)
+{
+ if (xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(colorspace);
+
+ switch (xfer_func) {
+ case V4L2_XFER_FUNC_709:
+ return 6;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return 16;
+ case V4L2_XFER_FUNC_SRGB:
+ case V4L2_XFER_FUNC_OPRGB:
+ case V4L2_XFER_FUNC_NONE:
+ case V4L2_XFER_FUNC_DCI_P3:
+ case V4L2_XFER_FUNC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
+
+static inline int nal_hevc_matrix_coeffs(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding)
+{
+ if (ycbcr_encoding == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_encoding = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
-int nal_range_from_v4l2(enum v4l2_quantization quantization);
-int nal_color_primaries_from_v4l2(enum v4l2_colorspace colorspace);
-int nal_transfer_characteristics_from_v4l2(enum v4l2_colorspace colorspace,
- enum v4l2_xfer_func xfer_func);
-int nal_matrix_coeffs_from_v4l2(enum v4l2_colorspace colorspace,
- enum v4l2_ycbcr_encoding ycbcr_encoding);
+ switch (ycbcr_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ return 5;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
+ return 1;
+ case V4L2_YCBCR_ENC_BT2020:
+ return 9;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return 10;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
ssize_t nal_hevc_write_vps(const struct device *dev,
void *dest, size_t n, struct nal_hevc_vps *vps);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 1c9cb9e05fdf..2dfae9bc0bba 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2297,7 +2297,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
dev_dbg(dev, "vpfe_get_pdata\n");
- v4l2_async_notifier_init(&vpfe->notifier);
+ v4l2_async_nf_init(&vpfe->notifier);
if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
return dev->platform_data;
@@ -2365,9 +2365,10 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
goto cleanup;
}
- pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
- &vpfe->notifier, of_fwnode_handle(rem),
- struct v4l2_async_subdev);
+ pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier,
+ of_fwnode_handle(rem),
+ struct
+ v4l2_async_subdev);
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
@@ -2377,7 +2378,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
return pdata;
cleanup:
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
of_node_put(endpoint);
return NULL;
}
@@ -2392,7 +2393,6 @@ static int vpfe_probe(struct platform_device *pdev)
struct vpfe_config *vpfe_cfg;
struct vpfe_device *vpfe;
struct vpfe_ccdc *ccdc;
- struct resource *res;
int ret;
vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
@@ -2410,8 +2410,7 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->cfg = vpfe_cfg;
ccdc = &vpfe->ccdc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
+ ccdc->ccdc_cfg.base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccdc->ccdc_cfg.base_addr)) {
ret = PTR_ERR(ccdc->ccdc_cfg.base_addr);
goto probe_out_cleanup;
@@ -2465,7 +2464,7 @@ static int vpfe_probe(struct platform_device *pdev)
}
vpfe->notifier.ops = &vpfe_async_ops;
- ret = v4l2_async_notifier_register(&vpfe->v4l2_dev, &vpfe->notifier);
+ ret = v4l2_async_nf_register(&vpfe->v4l2_dev, &vpfe->notifier);
if (ret) {
vpfe_err(vpfe, "Error registering async notifier\n");
ret = -EINVAL;
@@ -2477,7 +2476,7 @@ static int vpfe_probe(struct platform_device *pdev)
probe_out_v4l2_unregister:
v4l2_device_unregister(&vpfe->v4l2_dev);
probe_out_cleanup:
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
return ret;
}
@@ -2490,8 +2489,8 @@ static int vpfe_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&vpfe->notifier);
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_unregister(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
v4l2_device_unregister(&vpfe->v4l2_dev);
video_unregister_device(&vpfe->video_dev);
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 7bb6babdcade..cad3f97515ae 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -23,6 +23,8 @@
#include <linux/videodev2.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -201,6 +203,14 @@ struct aspeed_video_buffer {
struct list_head link;
};
+struct aspeed_video_perf {
+ ktime_t last_sample;
+ u32 totaltime;
+ u32 duration;
+ u32 duration_min;
+ u32 duration_max;
+};
+
#define to_aspeed_video_buffer(x) \
container_of((x), struct aspeed_video_buffer, vb)
@@ -242,6 +252,8 @@ struct aspeed_video {
unsigned int frame_left;
unsigned int frame_right;
unsigned int frame_top;
+
+ struct aspeed_video_perf perf;
};
#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
@@ -422,6 +434,21 @@ static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420)
}
}
+// just update jpeg dct table per 420/444
+static void aspeed_video_update_jpeg_table(u32 *table, bool yuv420)
+{
+ int i;
+ unsigned int base;
+
+ for (i = 0; i < ASPEED_VIDEO_JPEG_NUM_QUALITIES; i++) {
+ base = 256 * i; /* AST HW requires this header spacing */
+ base += ASPEED_VIDEO_JPEG_HEADER_SIZE +
+ ASPEED_VIDEO_JPEG_DCT_SIZE;
+
+ table[base + 2] = (yuv420) ? 0x00220103 : 0x00110103;
+ }
+}
+
static void aspeed_video_update(struct aspeed_video *video, u32 reg, u32 clear,
u32 bits)
{
@@ -450,6 +477,16 @@ static void aspeed_video_write(struct aspeed_video *video, u32 reg, u32 val)
readl(video->base + reg));
}
+static void update_perf(struct aspeed_video_perf *p)
+{
+ p->duration =
+ ktime_to_ms(ktime_sub(ktime_get(), p->last_sample));
+ p->totaltime += p->duration;
+
+ p->duration_max = max(p->duration, p->duration_max);
+ p->duration_min = min(p->duration, p->duration_min);
+}
+
static int aspeed_video_start_frame(struct aspeed_video *video)
{
dma_addr_t addr;
@@ -488,6 +525,8 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
VE_INTERRUPT_COMP_COMPLETE);
+ video->perf.last_sample = ktime_get();
+
aspeed_video_update(video, VE_SEQ_CTRL, 0,
VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP);
@@ -564,6 +603,12 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
u32 sts = aspeed_video_read(video, VE_INTERRUPT_STATUS);
/*
+ * Hardware sometimes asserts interrupts that we haven't actually
+ * enabled; ignore them if so.
+ */
+ sts &= aspeed_video_read(video, VE_INTERRUPT_CTRL);
+
+ /*
* Resolution changed or signal was lost; reset the engine and
* re-initialize
*/
@@ -597,6 +642,8 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
u32 frame_size = aspeed_video_read(video,
video->comp_size_read);
+ update_perf(&video->perf);
+
spin_lock(&video->lock);
clear_bit(VIDEO_FRAME_INPRG, &video->flags);
buf = list_first_entry_or_null(&video->buffers,
@@ -629,16 +676,6 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
- /*
- * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
- * are disabled in the VE_INTERRUPT_CTRL register so clear them to
- * prevent unnecessary interrupt calls.
- */
- if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
- sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
- if (sts & VE_INTERRUPT_FRAME_COMPLETE)
- sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
-
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -764,6 +801,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
det->width = MIN_WIDTH;
det->height = MIN_HEIGHT;
video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ memset(&video->perf, 0, sizeof(video->perf));
do {
if (tries) {
@@ -1293,7 +1331,7 @@ static void aspeed_video_update_jpeg_quality(struct aspeed_video *video)
static void aspeed_video_update_subsampling(struct aspeed_video *video)
{
if (video->jpeg.virt)
- aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
+ aspeed_video_update_jpeg_table(video->jpeg.virt, video->yuv420);
if (video->yuv420)
aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_YUV420);
@@ -1454,6 +1492,8 @@ static int aspeed_video_start_streaming(struct vb2_queue *q,
struct aspeed_video *video = vb2_get_drv_priv(q);
video->sequence = 0;
+ video->perf.duration_max = 0;
+ video->perf.duration_min = 0xffffffff;
rc = aspeed_video_start_frame(video);
if (rc) {
@@ -1521,6 +1561,71 @@ static const struct vb2_ops aspeed_video_vb2_ops = {
.buf_queue = aspeed_video_buf_queue,
};
+#ifdef CONFIG_DEBUG_FS
+static int aspeed_video_debugfs_show(struct seq_file *s, void *data)
+{
+ struct aspeed_video *v = s->private;
+
+ seq_puts(s, "\n");
+
+ seq_printf(s, " %-20s:\t%s\n", "Signal",
+ v->v4l2_input_status ? "Unlock" : "Lock");
+ seq_printf(s, " %-20s:\t%d\n", "Width", v->pix_fmt.width);
+ seq_printf(s, " %-20s:\t%d\n", "Height", v->pix_fmt.height);
+ seq_printf(s, " %-20s:\t%d\n", "FRC", v->frame_rate);
+
+ seq_puts(s, "\n");
+
+ seq_puts(s, "Performance:\n");
+ seq_printf(s, " %-20s:\t%d\n", "Frame#", v->sequence);
+ seq_printf(s, " %-20s:\n", "Frame Duration(ms)");
+ seq_printf(s, " %-18s:\t%d\n", "Now", v->perf.duration);
+ seq_printf(s, " %-18s:\t%d\n", "Min", v->perf.duration_min);
+ seq_printf(s, " %-18s:\t%d\n", "Max", v->perf.duration_max);
+ seq_printf(s, " %-20s:\t%d\n", "FPS", 1000 / (v->perf.totaltime / v->sequence));
+
+ return 0;
+}
+
+static int aspeed_video_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aspeed_video_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations aspeed_video_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = aspeed_video_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *debugfs_entry;
+
+static void aspeed_video_debugfs_remove(struct aspeed_video *video)
+{
+ debugfs_remove_recursive(debugfs_entry);
+ debugfs_entry = NULL;
+}
+
+static int aspeed_video_debugfs_create(struct aspeed_video *video)
+{
+ debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL,
+ video,
+ &aspeed_video_debugfs_ops);
+ if (!debugfs_entry)
+ aspeed_video_debugfs_remove(video);
+
+ return !debugfs_entry ? -EIO : 0;
+}
+#else
+static void aspeed_video_debugfs_remove(struct aspeed_video *video) { }
+static int aspeed_video_debugfs_create(struct aspeed_video *video)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
static int aspeed_video_setup_video(struct aspeed_video *video)
{
const u64 mask = ~(BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_444) |
@@ -1725,6 +1830,10 @@ static int aspeed_video_probe(struct platform_device *pdev)
return rc;
}
+ rc = aspeed_video_debugfs_create(video);
+ if (rc)
+ dev_err(video->dev, "debugfs create failed\n");
+
return 0;
}
@@ -1736,6 +1845,8 @@ static int aspeed_video_remove(struct platform_device *pdev)
aspeed_video_off(video);
+ aspeed_video_debugfs_remove(video);
+
clk_unprepare(video->vclk);
clk_unprepare(video->eclk);
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 136ab7cf36ed..660cd0ab6749 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -123,11 +123,9 @@ static int isc_clk_prepare(struct clk_hw *hw)
struct isc_clk *isc_clk = to_isc_clk(hw);
int ret;
- if (isc_clk->id == ISC_ISPCK) {
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return ret;
- }
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return ret;
return isc_wait_clk_stable(hw);
}
@@ -138,8 +136,7 @@ static void isc_clk_unprepare(struct clk_hw *hw)
isc_wait_clk_stable(hw);
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_put_sync(isc_clk->dev);
+ pm_runtime_put_sync(isc_clk->dev);
}
static int isc_clk_enable(struct clk_hw *hw)
@@ -186,16 +183,13 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
u32 status;
int ret;
- if (isc_clk->id == ISC_ISPCK) {
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return 0;
- }
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return 0;
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_put_sync(isc_clk->dev);
+ pm_runtime_put_sync(isc_clk->dev);
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
@@ -325,6 +319,9 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
const char *parent_names[3];
int num_parents;
+ if (id == ISC_ISPCK && !isc->ispck_required)
+ return 0;
+
num_parents = of_clk_get_parent_count(np);
if (num_parents < 1 || num_parents > 3)
return -EINVAL;
@@ -2222,8 +2219,8 @@ void isc_subdev_cleanup(struct isc_device *isc)
struct isc_subdev_entity *subdev_entity;
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- v4l2_async_notifier_unregister(&subdev_entity->notifier);
- v4l2_async_notifier_cleanup(&subdev_entity->notifier);
+ v4l2_async_nf_unregister(&subdev_entity->notifier);
+ v4l2_async_nf_cleanup(&subdev_entity->notifier);
}
INIT_LIST_HEAD(&isc->subdev_entities);
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index 19cc60dfcbe0..2bfcb135ef13 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -178,6 +178,7 @@ struct isc_reg_offsets {
* @hclock: Hclock clock input (refer datasheet)
* @ispck: iscpck clock (refer datasheet)
* @isc_clks: ISC clocks
+ * @ispck_required: ISC requires ISP Clock initialization
* @dcfg: DMA master configuration, architecture dependent
*
* @dev: Registered device driver
@@ -252,6 +253,7 @@ struct isc_device {
struct clk *hclock;
struct clk *ispck;
struct isc_clk isc_clks[2];
+ bool ispck_required;
u32 dcfg;
struct device *dev;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 095d80c4f59e..4d15814e4481 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1159,12 +1159,11 @@ static int isi_graph_init(struct atmel_isi *isi)
if (!ep)
return -EINVAL;
- v4l2_async_notifier_init(&isi->notifier);
+ v4l2_async_nf_init(&isi->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isi->notifier,
- of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&isi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
if (IS_ERR(asd))
@@ -1172,10 +1171,10 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.ops = &isi_graph_notify_ops;
- ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
+ ret = v4l2_async_nf_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
dev_err(isi->dev, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&isi->notifier);
+ v4l2_async_nf_cleanup(&isi->notifier);
return ret;
}
@@ -1327,8 +1326,8 @@ static int atmel_isi_remove(struct platform_device *pdev)
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&isi->notifier);
- v4l2_async_notifier_cleanup(&isi->notifier);
+ v4l2_async_nf_unregister(&isi->notifier);
+ v4l2_async_nf_cleanup(&isi->notifier);
v4l2_device_unregister(&isi->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index b66f1d174e9d..1b2063cce0f7 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -454,6 +454,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
/* sama5d2-isc - 8 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+ /* sama5d2-isc : ISPCK is required and mandatory */
+ isc->ispck_required = true;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -476,22 +479,6 @@ static int atmel_isc_probe(struct platform_device *pdev)
dev_err(dev, "failed to init isc clock: %d\n", ret);
goto unprepare_hclk;
}
-
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
@@ -512,13 +499,14 @@ static int atmel_isc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
- v4l2_async_notifier_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &subdev_entity->notifier,
- of_fwnode_handle(subdev_entity->epn),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_subdev);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -530,8 +518,8 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &isc_async_ops;
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
@@ -545,19 +533,35 @@ static int atmel_isc_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_request_idle(dev);
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto cleanup_subdev;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
dev_info(dev, "Microchip ISC version %x\n", ver);
return 0;
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+
cleanup_subdev:
isc_subdev_cleanup(isc);
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
index f2785131ff56..5d1c76f680f3 100644
--- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
@@ -447,6 +447,9 @@ static int microchip_xisc_probe(struct platform_device *pdev)
/* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32;
+ /* sama7g5-isc : ISPCK does not exist, ISC is clocked by MCK */
+ isc->ispck_required = false;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -470,25 +473,10 @@ static int microchip_xisc_probe(struct platform_device *pdev)
goto unprepare_hclk;
}
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
- goto unprepare_clk;
+ goto unprepare_hclk;
}
ret = xisc_parse_dt(dev, isc);
@@ -505,13 +493,14 @@ static int microchip_xisc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
- v4l2_async_notifier_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &subdev_entity->notifier,
- of_fwnode_handle(subdev_entity->epn),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_subdev);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -523,8 +512,8 @@ static int microchip_xisc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &isc_async_ops;
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
@@ -549,8 +538,6 @@ cleanup_subdev:
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index f2b4ddd31177..cc3ebb0d96f6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -279,13 +279,11 @@ static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
struct platform_device *pdev)
{
- struct resource *res;
unsigned char i;
u32 dev_cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi2rx->base = devm_ioremap_resource(&pdev->dev, res);
+ csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2rx->base))
return PTR_ERR(csi2rx->base);
@@ -401,21 +399,19 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
return -EINVAL;
}
- v4l2_async_notifier_init(&csi2rx->notifier);
+ v4l2_async_nf_init(&csi2rx->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi2rx->notifier,
- fwh,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ struct v4l2_async_subdev);
of_node_put(ep);
if (IS_ERR(asd))
return PTR_ERR(asd);
csi2rx->notifier.ops = &csi2rx_notifier_ops;
- ret = v4l2_async_subdev_notifier_register(&csi2rx->subdev,
- &csi2rx->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi2rx->subdev, &csi2rx->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
return ret;
}
@@ -471,7 +467,7 @@ static int csi2rx_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- v4l2_async_notifier_cleanup(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
err_free_priv:
kfree(csi2rx);
return ret;
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index 5a67fba73ddd..8f8c36056354 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -433,13 +433,11 @@ static const struct v4l2_subdev_ops csi2tx_subdev_ops = {
static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
struct platform_device *pdev)
{
- struct resource *res;
unsigned int i;
u32 dev_cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi2tx->base = devm_ioremap_resource(&pdev->dev, res);
+ csi2tx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2tx->base))
return PTR_ERR(csi2tx->base);
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 8bc0d8371819..6996d4571e36 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -301,8 +301,7 @@ static int vdoa_probe(struct platform_device *pdev)
return PTR_ERR(vdoa->vdoa_clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vdoa->regs = devm_ioremap_resource(vdoa->dev, res);
+ vdoa->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vdoa->regs))
return PTR_ERR(vdoa->regs);
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index bde241c26d79..4c8e31de12b1 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -621,7 +621,6 @@ static int venc_probe(struct platform_device *pdev)
{
const struct platform_device_id *pdev_id;
struct venc_state *venc;
- struct resource *res;
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "No platform data for VENC sub device");
@@ -640,16 +639,12 @@ static int venc_probe(struct platform_device *pdev)
venc->pdev = &pdev->dev;
venc->pdata = pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+ venc->venc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(venc->venc_base))
return PTR_ERR(venc->venc_base);
if (venc->venc_type != VPBE_VERSION_1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+ venc->vdaccfg_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(venc->vdaccfg_reg))
return PTR_ERR(venc->vdaccfg_reg);
}
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index f1ce10828b8e..5a89d885d0e3 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -425,12 +425,11 @@ EXPORT_SYMBOL(vpif_channel_getfid);
static int vpif_probe(struct platform_device *pdev)
{
- static struct resource *res, *res_irq;
+ static struct resource *res_irq;
struct platform_device *pdev_capture, *pdev_display;
struct device_node *endpoint = NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vpif_base = devm_ioremap_resource(&pdev->dev, res);
+ vpif_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vpif_base))
return PTR_ERR(vpif_base);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index c034e25dd9aa..ae92e2c206d0 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1506,7 +1506,7 @@ vpif_capture_get_pdata(struct platform_device *pdev)
struct vpif_capture_chan_config *chan;
unsigned int i;
- v4l2_async_notifier_init(&vpif_obj.notifier);
+ v4l2_async_nf_init(&vpif_obj.notifier);
/*
* DT boot: OF node from parent device contains
@@ -1582,9 +1582,10 @@ vpif_capture_get_pdata(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Remote device %pOF found\n", rem);
sdinfo->name = rem->full_name;
- pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
- &vpif_obj.notifier, of_fwnode_handle(rem),
- struct v4l2_async_subdev);
+ pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpif_obj.notifier,
+ of_fwnode_handle(rem),
+ struct
+ v4l2_async_subdev);
if (IS_ERR(pdata->asd[i]))
goto err_cleanup;
@@ -1602,7 +1603,7 @@ done:
err_cleanup:
of_node_put(rem);
of_node_put(endpoint);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
return NULL;
}
@@ -1692,8 +1693,8 @@ static __init int vpif_probe(struct platform_device *pdev)
goto probe_subdev_out;
} else {
vpif_obj.notifier.ops = &vpif_async_ops;
- err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
- &vpif_obj.notifier);
+ err = v4l2_async_nf_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
if (err) {
vpif_err("Error registering async notifier\n");
err = -EINVAL;
@@ -1711,7 +1712,7 @@ vpif_unregister:
vpif_free:
free_vpif_objs();
cleanup:
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
return err;
}
@@ -1727,8 +1728,8 @@ static int vpif_remove(struct platform_device *device)
struct channel_obj *ch;
int i;
- v4l2_async_notifier_unregister(&vpif_obj.notifier);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_unregister(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
v4l2_device_unregister(&vpif_obj.v4l2_dev);
kfree(vpif_obj.sd);
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 7000f0bf0b35..d15b991ab17c 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -392,7 +392,6 @@ EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
static int vpss_probe(struct platform_device *pdev)
{
- struct resource *res;
char *platform_name;
if (!pdev->dev.platform_data) {
@@ -413,17 +412,12 @@ static int vpss_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+ oper_cfg.vpss_regs_base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(oper_cfg.vpss_regs_base0))
return PTR_ERR(oper_cfg.vpss_regs_base0);
if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
- res);
+ oper_cfg.vpss_regs_base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(oper_cfg.vpss_regs_base1))
return PTR_ERR(oper_cfg.vpss_regs_base1);
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index f49f3322f835..cfd6ae70b8d8 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1137,8 +1137,7 @@ static int gsc_probe(struct platform_device *pdev)
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gsc->regs = devm_ioremap_resource(dev, res);
+ gsc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gsc->regs))
return PTR_ERR(gsc->regs);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index fa648721eaab..544b54e428c9 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -464,9 +464,9 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
return -EINVAL;
}
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &fmd->subdev_notifier, of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&fmd->subdev_notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
@@ -557,7 +557,7 @@ rpm_put:
cleanup:
of_node_put(ports);
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
pm_runtime_put(fmd->pmf);
return ret;
}
@@ -1481,7 +1481,7 @@ static int fimc_md_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fmd);
- v4l2_async_notifier_init(&fmd->subdev_notifier);
+ v4l2_async_nf_init(&fmd->subdev_notifier);
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
if (ret)
@@ -1509,8 +1509,8 @@ static int fimc_md_probe(struct platform_device *pdev)
fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
- ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
- &fmd->subdev_notifier);
+ ret = v4l2_async_nf_register(&fmd->v4l2_dev,
+ &fmd->subdev_notifier);
if (ret)
goto err_clk_p;
}
@@ -1522,7 +1522,7 @@ err_clk_p:
err_attr:
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
err_cleanup:
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
@@ -1542,8 +1542,8 @@ static int fimc_md_remove(struct platform_device *pdev)
return 0;
fimc_md_unregister_clk_provider(fmd);
- v4l2_async_notifier_unregister(&fmd->subdev_notifier);
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_unregister(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
v4l2_device_unregister(&fmd->v4l2_dev);
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 32b23329b033..27a214936cb0 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -766,7 +766,6 @@ static int s5pcsis_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
const struct csis_drvdata *drv_data;
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csis_state *state;
int ret = -ENOMEM;
int i;
@@ -800,8 +799,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
if (IS_ERR(state->phy))
return PTR_ERR(state->phy);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
index 755138063ee6..4ca96cf9def7 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/string.h>
@@ -282,6 +283,20 @@ static const unsigned char jpeg_sos_maximal[] = {
0x11, 0x04, 0x11, 0x00, 0x3F, 0x00
};
+static const unsigned char jpeg_image_red[] = {
+ 0xFC, 0x5F, 0xA2, 0xBF, 0xCA, 0x73, 0xFE, 0xFE,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00
+};
+
static const unsigned char jpeg_eoi[] = {
0xFF, 0xD9
};
@@ -575,6 +590,10 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!dst_buf || !src_buf) {
+ dev_err(dev, "No source or destination buffer.\n");
+ goto job_unlock;
+ }
jpeg_src_buf = vb2_to_mxc_buf(&src_buf->vb2_buf);
if (dec_ret & SLOT_STATUS_ENC_CONFIG_ERR) {
@@ -760,6 +779,9 @@ static unsigned int mxc_jpeg_setup_cfg_stream(void *cfg_stream_vaddr,
sos = (struct mxc_jpeg_sos *)(cfg + offset);
offset += mxc_jpeg_fixup_sos(sos, fourcc);
+ memcpy(cfg + offset, jpeg_image_red, sizeof(jpeg_image_red));
+ offset += sizeof(jpeg_image_red);
+
memcpy(cfg + offset, jpeg_eoi, sizeof(jpeg_eoi));
offset += sizeof(jpeg_eoi);
@@ -795,6 +817,7 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
img_fmt = mxc_jpeg_fourcc_to_imgfmt(q_data_cap->fmt->fourcc);
desc->stm_ctrl &= ~STM_CTRL_IMAGE_FORMAT(0xF); /* clear image format */
desc->stm_ctrl |= STM_CTRL_IMAGE_FORMAT(img_fmt);
+ desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
desc->line_pitch = q_data_cap->bytesperline[0];
mxc_jpeg_addrs(desc, dst_buf, src_buf, 0);
mxc_jpeg_set_bufsize(desc, ALIGN(vb2_plane_size(src_buf, 0), 1024));
@@ -821,6 +844,7 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
cfg_desc->imgsize |= MXC_JPEG_MIN_HEIGHT;
cfg_desc->line_pitch = MXC_JPEG_MIN_WIDTH * 2;
cfg_desc->stm_ctrl = STM_CTRL_IMAGE_FORMAT(MXC_JPEG_YUV422);
+ cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
cfg_desc->stm_bufbase = cfg_stream_handle;
cfg_desc->stm_bufsize = ALIGN(*cfg_size, 1024);
print_descriptor_info(jpeg->dev, cfg_desc);
@@ -864,6 +888,7 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
cfg_desc->stm_bufsize = 0x0;
cfg_desc->imgsize = 0;
cfg_desc->stm_ctrl = STM_CTRL_CONFIG_MOD(1);
+ cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
desc->next_descpt_ptr = 0; /* end of chain */
@@ -878,6 +903,7 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
dev_err(jpeg->dev, "No valid image format detected\n");
desc->stm_ctrl = STM_CTRL_CONFIG_MOD(0) |
STM_CTRL_IMAGE_FORMAT(img_fmt);
+ desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
mxc_jpeg_addrs(desc, src_buf, dst_buf, 0);
dev_dbg(jpeg->dev, "cfg_desc:\n");
print_descriptor_info(jpeg->dev, cfg_desc);
@@ -933,11 +959,6 @@ static void mxc_jpeg_device_run(void *priv)
return;
}
- /*
- * TODO: this reset should be removed, once we figure out
- * how to overcome hardware issues both on encoder and decoder
- */
- mxc_jpeg_sw_reset(reg);
mxc_jpeg_enable(reg);
mxc_jpeg_set_l_endian(reg, 1);
@@ -1058,10 +1079,17 @@ static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, q->type);
+ int ret;
dev_dbg(ctx->mxc_jpeg->dev, "Start streaming ctx=%p", ctx);
q_data->sequence = 0;
+ ret = pm_runtime_resume_and_get(ctx->mxc_jpeg->dev);
+ if (ret < 0) {
+ dev_err(ctx->mxc_jpeg->dev, "Failed to power up jpeg\n");
+ return ret;
+ }
+
return 0;
}
@@ -1079,9 +1107,10 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
- return;
+ break;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
+ pm_runtime_put_sync(&ctx->mxc_jpeg->pdev->dev);
}
static int mxc_jpeg_valid_comp_id(struct device *dev,
@@ -1941,8 +1970,7 @@ static int mxc_jpeg_attach_pm_domains(struct mxc_jpeg_dev *jpeg)
jpeg->pd_link[i] = device_link_add(dev, jpeg->pd_dev[i],
DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
+ DL_FLAG_PM_RUNTIME);
if (!jpeg->pd_link[i]) {
ret = -EINVAL;
goto fail;
@@ -1959,7 +1987,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
{
struct mxc_jpeg_dev *jpeg;
struct device *dev = &pdev->dev;
- struct resource *res;
int dec_irq;
int ret;
int mode;
@@ -1982,8 +2009,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
goto err_irq;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->base_reg = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->base_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->base_reg))
return PTR_ERR(jpeg->base_reg);
@@ -2007,6 +2033,19 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->dev = dev;
jpeg->mode = mode;
+ /* Get clocks */
+ jpeg->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(jpeg->clk_ipg)) {
+ dev_err(dev, "failed to get clock: ipg\n");
+ goto err_clk;
+ }
+
+ jpeg->clk_per = devm_clk_get(dev, "per");
+ if (IS_ERR(jpeg->clk_per)) {
+ dev_err(dev, "failed to get clock: per\n");
+ goto err_clk;
+ }
+
ret = mxc_jpeg_attach_pm_domains(jpeg);
if (ret < 0) {
dev_err(dev, "failed to attach power domains %d\n", ret);
@@ -2075,6 +2114,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->dec_vdev->minor);
platform_set_drvdata(pdev, jpeg);
+ pm_runtime_enable(dev);
return 0;
@@ -2088,10 +2128,55 @@ err_m2m:
v4l2_device_unregister(&jpeg->v4l2_dev);
err_register:
+ mxc_jpeg_detach_pm_domains(jpeg);
+
err_irq:
+err_clk:
return ret;
}
+#ifdef CONFIG_PM
+static int mxc_jpeg_runtime_resume(struct device *dev)
+{
+ struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(jpeg->clk_ipg);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: ipg\n");
+ goto err_ipg;
+ }
+
+ ret = clk_prepare_enable(jpeg->clk_per);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: per\n");
+ goto err_per;
+ }
+
+ return 0;
+
+err_per:
+ clk_disable_unprepare(jpeg->clk_ipg);
+err_ipg:
+ return ret;
+}
+
+static int mxc_jpeg_runtime_suspend(struct device *dev)
+{
+ struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(jpeg->clk_ipg);
+ clk_disable_unprepare(jpeg->clk_per);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mxc_jpeg_pm_ops = {
+ SET_RUNTIME_PM_OPS(mxc_jpeg_runtime_suspend,
+ mxc_jpeg_runtime_resume, NULL)
+};
+
static int mxc_jpeg_remove(struct platform_device *pdev)
{
unsigned int slot;
@@ -2100,6 +2185,7 @@ static int mxc_jpeg_remove(struct platform_device *pdev)
for (slot = 0; slot < MXC_MAX_SLOTS; slot++)
mxc_jpeg_free_slot_data(jpeg, slot);
+ pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->dec_vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
@@ -2116,6 +2202,7 @@ static struct platform_driver mxc_jpeg_driver = {
.driver = {
.name = "mxc-jpeg",
.of_match_table = mxc_jpeg_match,
+ .pm = &mxc_jpeg_pm_ops,
},
};
module_platform_driver(mxc_jpeg_driver);
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
index 4c210852e876..9fb2a5aaa941 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
@@ -109,6 +109,8 @@ struct mxc_jpeg_dev {
spinlock_t hw_lock; /* hardware access lock */
unsigned int mode;
struct mutex lock; /* v4l2 ioctls serialization */
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct platform_device *pdev;
struct device *dev;
void __iomem *base_reg;
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 4321edc0c23d..723b096fedd1 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -1636,7 +1636,6 @@ static int pxp_soft_reset(struct pxp_dev *dev)
static int pxp_probe(struct platform_device *pdev)
{
struct pxp_dev *dev;
- struct resource *res;
struct video_device *vfd;
int irq;
int ret;
@@ -1652,8 +1651,7 @@ static int pxp_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ dev->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->mmio))
return PTR_ERR(dev->mmio);
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 9aa374fa8b36..b61b9d9551af 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -544,12 +544,11 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_pdown;
- v4l2_async_notifier_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier);
- asd = v4l2_async_notifier_add_i2c_subdev(&mcam->notifier,
- i2c_adapter_id(cam->i2c_adapter),
- ov7670_info.addr,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_i2c(&mcam->notifier,
+ i2c_adapter_id(cam->i2c_adapter),
+ ov7670_info.addr, struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out_smbus_shutdown;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 58f9463f3b8c..ad4a7922d0d7 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1877,7 +1877,7 @@ int mccic_register(struct mcam_camera *cam)
cam->mbus_code = mcam_def_mbus_code;
cam->notifier.ops = &mccic_notify_ops;
- ret = v4l2_async_notifier_register(&cam->v4l2_dev, &cam->notifier);
+ ret = v4l2_async_nf_register(&cam->v4l2_dev, &cam->notifier);
if (ret < 0) {
cam_warn(cam, "failed to register a sensor notifier");
goto out;
@@ -1914,9 +1914,9 @@ int mccic_register(struct mcam_camera *cam)
return 0;
out:
- v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_async_nf_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
- v4l2_async_notifier_cleanup(&cam->notifier);
+ v4l2_async_nf_cleanup(&cam->notifier);
return ret;
}
EXPORT_SYMBOL_GPL(mccic_register);
@@ -1936,9 +1936,9 @@ void mccic_shutdown(struct mcam_camera *cam)
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
- v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_async_nf_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
- v4l2_async_notifier_cleanup(&cam->notifier);
+ v4l2_async_nf_cleanup(&cam->notifier);
}
EXPORT_SYMBOL_GPL(mccic_shutdown);
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index f2f09cea751d..343ab4f7d807 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -239,10 +239,10 @@ static int mmpcam_probe(struct platform_device *pdev)
if (!ep)
return -ENODEV;
- v4l2_async_notifier_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&mcam->notifier, ep,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&mcam->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
index a1393fefa8ae..ccda18e5a377 100644
--- a/drivers/media/platform/meson/ge2d/ge2d.c
+++ b/drivers/media/platform/meson/ge2d/ge2d.c
@@ -779,11 +779,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
* If the rotation parameter changes the OUTPUT frames
* parameters, take them in account
*/
- if (fmt.width != ctx->out.pix_fmt.width ||
- fmt.height != ctx->out.pix_fmt.width ||
- fmt.bytesperline > ctx->out.pix_fmt.bytesperline ||
- fmt.sizeimage > ctx->out.pix_fmt.sizeimage)
- ctx->out.pix_fmt = fmt;
+ ctx->out.pix_fmt = fmt;
break;
}
@@ -926,7 +922,6 @@ static int ge2d_probe(struct platform_device *pdev)
struct reset_control *rst;
struct video_device *vfd;
struct meson_ge2d *ge2d;
- struct resource *res;
void __iomem *regs;
int ret = 0;
int irq;
@@ -941,8 +936,7 @@ static int ge2d_probe(struct platform_device *pdev)
ge2d->dev = &pdev->dev;
mutex_init(&ge2d->mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(ge2d->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index a89c7b206eef..af994b9913a6 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -1341,7 +1341,6 @@ static inline void mtk_jpeg_clk_release(struct mtk_jpeg_dev *jpeg)
static int mtk_jpeg_probe(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg;
- struct resource *res;
int jpeg_irq;
int ret;
@@ -1355,8 +1354,7 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
jpeg->variant = of_device_get_match_data(jpeg->dev);
INIT_DELAYED_WORK(&jpeg->job_timeout_work, mtk_jpeg_job_timeout_work);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->reg_base)) {
ret = PTR_ERR(jpeg->reg_base);
return ret;
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index 4618d43dbbc8..ca8e9e7a9c4e 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -7,10 +7,13 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
vdec/vdec_vp8_if.o \
vdec/vdec_vp9_if.o \
+ vdec/vdec_h264_req_if.o \
mtk_vcodec_dec_drv.o \
vdec_drv_if.o \
vdec_vpu_if.o \
mtk_vcodec_dec.o \
+ mtk_vcodec_dec_stateful.o \
+ mtk_vcodec_dec_stateless.o \
mtk_vcodec_dec_pm.o \
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 56d86e59421e..2b334a8a81c6 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -16,68 +16,18 @@
#include "vdec_drv_if.h"
#include "mtk_vcodec_dec_pm.h"
-#define OUT_FMT_IDX 0
-#define CAP_FMT_IDX 3
-
-#define MTK_VDEC_MIN_W 64U
-#define MTK_VDEC_MIN_H 64U
#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
-static const struct mtk_video_fmt mtk_video_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_MT21C,
- .type = MTK_FMT_FRAME,
- .num_planes = 2,
- },
-};
-
-static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
-};
-
-#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
-#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
-
-static const struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+static const struct mtk_video_fmt *
+mtk_vdec_find_format(struct v4l2_format *f,
+ const struct mtk_vcodec_dec_pdata *dec_pdata)
{
const struct mtk_video_fmt *fmt;
unsigned int k;
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
+ for (k = 0; k < dec_pdata->num_formats; k++) {
+ fmt = &dec_pdata->vdec_formats[k];
if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
return fmt;
}
@@ -94,408 +44,17 @@ static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
return &ctx->q_data[MTK_Q_DATA_DST];
}
-/*
- * This function tries to clean all display buffers, the buffers will return
- * in display order.
- * Note the buffers returned from codec driver may still be in driver's
- * reference list.
- */
-static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vdec_fb *disp_frame_buffer = NULL;
- struct mtk_video_dec_buf *dstbuf;
- struct vb2_v4l2_buffer *vb;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- if (vdec_if_get_param(ctx,
- GET_PARAM_DISP_FRAME_BUFFER,
- &disp_frame_buffer)) {
- mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
- ctx->id);
- return NULL;
- }
-
- if (disp_frame_buffer == NULL) {
- mtk_v4l2_debug(3, "No display frame buffer");
- return NULL;
- }
-
- dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
- frame_buffer);
- vb = &dstbuf->m2m_buf.vb;
- mutex_lock(&ctx->lock);
- if (dstbuf->used) {
- vb2_set_plane_payload(&vb->vb2_buf, 0,
- ctx->picinfo.fb_sz[0]);
- if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&vb->vb2_buf, 1,
- ctx->picinfo.fb_sz[1]);
-
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to done_list %d",
- ctx->id, disp_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2);
-
- v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
- ctx->decoded_frame_cnt++;
- }
- mutex_unlock(&ctx->lock);
- return &vb->vb2_buf;
-}
-
-/*
- * This function tries to clean all capture buffers that are not used as
- * reference buffers by codec driver any more
- * In this case, we need re-queue buffer to vb2 buffer if user space
- * already returns this buffer to v4l2 or this buffer is just the output of
- * previous sps/pps/resolution change decode, or do nothing if user
- * space still owns this buffer
- */
-static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct mtk_video_dec_buf *dstbuf;
- struct vdec_fb *free_frame_buffer = NULL;
- struct vb2_v4l2_buffer *vb;
-
- if (vdec_if_get_param(ctx,
- GET_PARAM_FREE_FRAME_BUFFER,
- &free_frame_buffer)) {
- mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
- return NULL;
- }
- if (free_frame_buffer == NULL) {
- mtk_v4l2_debug(3, " No free frame buffer");
- return NULL;
- }
-
- mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
- ctx->id, free_frame_buffer);
-
- dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
- frame_buffer);
- vb = &dstbuf->m2m_buf.vb;
-
- mutex_lock(&ctx->lock);
- if (dstbuf->used) {
- if ((dstbuf->queued_in_vb2) &&
- (dstbuf->queued_in_v4l2) &&
- (free_frame_buffer->status == FB_ST_FREE)) {
- /*
- * After decode sps/pps or non-display buffer, we don't
- * need to return capture buffer to user space, but
- * just re-queue this capture buffer to vb2 queue.
- * This reduce overheads that dq/q unused capture
- * buffer. In this case, queued_in_vb2 = true.
- */
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
- /*
- * If buffer in v4l2 driver but not in vb2 queue yet,
- * and we get this buffer from free_list, it means
- * that codec driver do not use this buffer as
- * reference buffer anymore. We should q buffer to vb2
- * queue, so later work thread could get this buffer
- * for decode. In this case, queued_in_vb2 = false
- * means this buffer is not from previous decode
- * output.
- */
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- dstbuf->queued_in_vb2 = true;
- } else {
- /*
- * Codec driver do not need to reference this capture
- * buffer and this buffer is not in v4l2 driver.
- * Then we don't need to do any thing, just add log when
- * we need to debug buffer flow.
- * When this buffer q from user space, it could
- * directly q to vb2 buffer
- */
- mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2,
- dstbuf->queued_in_v4l2);
- }
- dstbuf->used = false;
- }
- mutex_unlock(&ctx->lock);
- return &vb->vb2_buf;
-}
-
-static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vb2_buffer *framptr;
-
- do {
- framptr = get_display_buffer(ctx);
- } while (framptr);
-}
-
-static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vb2_buffer *framptr;
-
- do {
- framptr = get_free_buffer(ctx);
- } while (framptr);
-}
-
-static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
-{
- static const struct v4l2_event ev_src_ch = {
- .type = V4L2_EVENT_SOURCE_CHANGE,
- .u.src_change.changes =
- V4L2_EVENT_SRC_CH_RESOLUTION,
- };
-
- mtk_v4l2_debug(1, "[%d]", ctx->id);
- v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
-}
-
-static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
-{
- bool res_chg;
- int ret = 0;
-
- ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
- if (ret)
- mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
-
- clean_display_buffer(ctx);
- clean_free_buffer(ctx);
-}
-
-static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
- unsigned int pixelformat)
-{
- const struct mtk_video_fmt *fmt;
- struct mtk_q_data *dst_q_data;
- unsigned int k;
-
- dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
- if (fmt->fourcc == pixelformat) {
- mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt->fourcc, pixelformat);
- dst_q_data->fmt = fmt;
- return;
- }
- }
-
- mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
-}
-
-static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
-{
- unsigned int dpbsize = 0;
- int ret;
-
- if (vdec_if_get_param(ctx,
- GET_PARAM_PIC_INFO,
- &ctx->last_decoded_picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
- ctx->id);
- return -EINVAL;
- }
-
- if (ctx->last_decoded_picinfo.pic_w == 0 ||
- ctx->last_decoded_picinfo.pic_h == 0 ||
- ctx->last_decoded_picinfo.buf_w == 0 ||
- ctx->last_decoded_picinfo.buf_h == 0) {
- mtk_v4l2_err("Cannot get correct pic info");
- return -EINVAL;
- }
-
- if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
- ctx->picinfo.cap_fourcc != 0)
- mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
-
- if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
- (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
- return 0;
-
- mtk_v4l2_debug(1,
- "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
- ctx->id, ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->last_decoded_picinfo.buf_w,
- ctx->last_decoded_picinfo.buf_h);
-
- ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
- if (dpbsize == 0)
- mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
-
- ctx->dpb_size = dpbsize;
-
- return ret;
-}
-
-static void mtk_vdec_worker(struct work_struct *work)
-{
- struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
- decode_work);
- struct mtk_vcodec_dev *dev = ctx->dev;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct mtk_vcodec_mem buf;
- struct vdec_fb *pfb;
- bool res_chg = false;
- int ret;
- struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
-
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- if (src_buf == NULL) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
- return;
- }
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- if (dst_buf == NULL) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
- return;
- }
-
- src_buf_info = container_of(src_buf, struct mtk_video_dec_buf,
- m2m_buf.vb);
- dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf,
- m2m_buf.vb);
-
- pfb = &dst_buf_info->frame_buffer;
- pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
- pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- pfb->base_y.size = ctx->picinfo.fb_sz[0];
-
- pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
- pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
- pfb->base_c.size = ctx->picinfo.fb_sz[1];
- pfb->status = 0;
- mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
-
- mtk_v4l2_debug(3,
- "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
- dst_buf->vb2_buf.index, pfb,
- pfb->base_y.va, &pfb->base_y.dma_addr,
- &pfb->base_c.dma_addr, pfb->base_y.size);
-
- if (src_buf_info->lastframe) {
- mtk_v4l2_debug(1, "Got empty flush input buffer.");
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
-
- /* update dst buf status */
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- mutex_lock(&ctx->lock);
- dst_buf_info->used = false;
- mutex_unlock(&ctx->lock);
-
- vdec_if_decode(ctx, NULL, NULL, &res_chg);
- clean_display_buffer(ctx);
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
- if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
- dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
- clean_free_buffer(ctx);
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- return;
- }
- buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
- buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- if (!buf.va) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
- ctx->id, src_buf->vb2_buf.index);
- return;
- }
- mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
- dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
- dst_buf->timecode = src_buf->timecode;
- mutex_lock(&ctx->lock);
- dst_buf_info->used = true;
- mutex_unlock(&ctx->lock);
- src_buf_info->used = true;
-
- ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
-
- if (ret) {
- mtk_v4l2_err(
- " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
- ctx->id,
- src_buf->vb2_buf.index,
- buf.size,
- src_buf->vb2_buf.timestamp,
- dst_buf->vb2_buf.index,
- ret, res_chg);
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- if (ret == -EIO) {
- mutex_lock(&ctx->lock);
- src_buf_info->error = true;
- mutex_unlock(&ctx->lock);
- }
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else if (!res_chg) {
- /*
- * we only return src buffer with VB2_BUF_STATE_DONE
- * when decode success without resolution change
- */
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- }
-
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- clean_display_buffer(ctx);
- clean_free_buffer(ctx);
-
- if (!ret && res_chg) {
- mtk_vdec_pic_info_update(ctx);
- /*
- * On encountering a resolution change in the stream.
- * The driver must first process and decode all
- * remaining buffers from before the resolution change
- * point, so call flush decode here
- */
- mtk_vdec_flush_decoder(ctx);
- /*
- * After all buffers containing decoded frames from
- * before the resolution change point ready to be
- * dequeued on the CAPTURE queue, the driver sends a
- * V4L2_EVENT_SOURCE_CHANGE event for source change
- * type V4L2_EVENT_SRC_CH_RESOLUTION
- */
- mtk_vdec_queue_res_chg_event(ctx);
- }
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
-}
-
static int vidioc_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- switch (cmd->cmd) {
- case V4L2_DEC_CMD_STOP:
- case V4L2_DEC_CMD_START:
- if (cmd->flags != 0) {
- mtk_v4l2_err("cmd->flags=%u", cmd->flags);
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ /* Use M2M stateless helper if relevant */
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv,
+ cmd);
+ else
+ return v4l2_m2m_ioctl_try_decoder_cmd(file, priv, cmd);
}
@@ -510,6 +69,10 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
if (ret)
return ret;
+ /* Use M2M stateless helper if relevant */
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_m2m_ioctl_stateless_decoder_cmd(file, priv, cmd);
+
mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -525,8 +88,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
return 0;
}
- v4l2_m2m_buf_queue(ctx->m2m_ctx,
- &ctx->empty_flush_buf->m2m_buf.vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
v4l2_m2m_try_schedule(ctx->m2m_ctx);
break;
@@ -561,10 +123,12 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
{
struct mtk_q_data *q_data;
+ ctx->dev->vdec_pdata->init_vdec_params(ctx);
+
ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
- INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+ INIT_WORK(&ctx->decode_work, ctx->dev->vdec_pdata->worker);
ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
@@ -574,7 +138,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
memset(q_data, 0, sizeof(struct mtk_q_data));
q_data->visible_width = DFT_CFG_WIDTH;
q_data->visible_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->fmt = ctx->dev->vdec_pdata->default_out_fmt;
q_data->field = V4L2_FIELD_NONE;
q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
@@ -586,7 +150,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
q_data->visible_height = DFT_CFG_HEIGHT;
q_data->coded_width = DFT_CFG_WIDTH;
q_data->coded_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->fmt = ctx->dev->vdec_pdata->default_cap_fmt;
q_data->field = V4L2_FIELD_NONE;
v4l_bound_align_image(&q_data->coded_width,
@@ -660,19 +224,17 @@ static int vidioc_try_fmt(struct v4l2_format *f,
pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->width =
+ clamp(pix_fmt_mp->width, MTK_VDEC_MIN_W, MTK_VDEC_MAX_W);
+ pix_fmt_mp->height =
+ clamp(pix_fmt_mp->height, MTK_VDEC_MIN_H, MTK_VDEC_MAX_H);
+
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pix_fmt_mp->num_planes = 1;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
int tmp_w, tmp_h;
- pix_fmt_mp->height = clamp(pix_fmt_mp->height,
- MTK_VDEC_MIN_H,
- MTK_VDEC_MAX_H);
- pix_fmt_mp->width = clamp(pix_fmt_mp->width,
- MTK_VDEC_MIN_W,
- MTK_VDEC_MAX_W);
-
/*
* Find next closer width align 64, heign align 64, size align
* 64 rectangle
@@ -722,11 +284,14 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ f->fmt.pix.pixelformat =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
return vidioc_try_fmt(f, fmt);
@@ -737,11 +302,14 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ f->fmt.pix.pixelformat =
+ ctx->q_data[MTK_Q_DATA_SRC].fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
@@ -831,6 +399,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret = 0;
const struct mtk_video_fmt *fmt;
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
mtk_v4l2_debug(3, "[%d]", ctx->id);
@@ -843,7 +412,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
* Setting OUTPUT format after OUTPUT buffers are allocated is invalid
* if using the stateful API.
*/
- if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ if (!dec_pdata->uses_stateless_api &&
+ f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
@@ -859,16 +429,16 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ret = -EBUSY;
}
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (fmt == NULL) {
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
f->fmt.pix.pixelformat =
- mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ dec_pdata->default_out_fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
f->fmt.pix.pixelformat =
- mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ dec_pdata->default_cap_fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
}
if (fmt == NULL)
@@ -886,6 +456,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ctx->quantization = pix_mp->quantization;
ctx->xfer_func = pix_mp->xfer_func;
+ ctx->current_codec = fmt->fourcc;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
if (ret) {
@@ -897,6 +468,48 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
}
}
+ /*
+ * If using the stateless API, S_FMT should have the effect of setting
+ * the CAPTURE queue resolution no matter which queue it was called on.
+ */
+ if (dec_pdata->uses_stateless_api) {
+ ctx->picinfo.pic_w = pix_mp->width;
+ ctx->picinfo.pic_h = pix_mp->height;
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo);
+ if (ret) {
+ mtk_v4l2_err("[%d]Error!! Get GET_PARAM_PICTURE_INFO Fail",
+ ctx->id);
+ return -EINVAL;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) {
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.fb_sz[0] +
+ ctx->picinfo.fb_sz[1];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ } else {
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.fb_sz[0];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+ ctx->picinfo.fb_sz[1];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] =
+ ctx->picinfo.buf_w;
+ }
+
+ ctx->q_data[MTK_Q_DATA_DST].coded_width = ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].coded_height = ctx->picinfo.buf_h;
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() num_plane = %d wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, pix_mp->num_planes, ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+ }
return 0;
}
@@ -905,16 +518,17 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
{
int i = 0;
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
if (fsize->index != 0)
return -EINVAL;
- for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
- if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+ for (i = 0; i < dec_pdata->num_framesizes; ++i) {
+ if (fsize->pixel_format != dec_pdata->vdec_framesizes[i].fourcc)
continue;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+ fsize->stepwise = dec_pdata->vdec_framesizes[i].stepwise;
if (!(ctx->dev->dec_capability &
VCODEC_CAPABILITY_4K_DISABLED)) {
mtk_v4l2_debug(3, "4K is enabled");
@@ -937,16 +551,20 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
-static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
+ bool output_queue)
{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
int i, j = 0;
- for (i = 0; i < NUM_FORMATS; i++) {
- if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+ for (i = 0; i < dec_pdata->num_formats; i++) {
+ if (output_queue &&
+ dec_pdata->vdec_formats[i].type != MTK_FMT_DEC)
continue;
if (!output_queue &&
- (mtk_video_formats[i].type != MTK_FMT_FRAME))
+ dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
continue;
if (j == f->index)
@@ -954,10 +572,10 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
++j;
}
- if (i == NUM_FORMATS)
+ if (i == dec_pdata->num_formats)
return -EINVAL;
- fmt = &mtk_video_formats[i];
+ fmt = &dec_pdata->vdec_formats[i];
f->pixelformat = fmt->fourcc;
f->flags = fmt->flags;
@@ -967,13 +585,13 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
static int vidioc_vdec_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, false);
+ return vidioc_enum_fmt(f, priv, false);
}
static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, true);
+ return vidioc_enum_fmt(f, priv, true);
}
static int vidioc_vdec_g_fmt(struct file *file, void *priv,
@@ -1064,11 +682,9 @@ static int vidioc_vdec_g_fmt(struct file *file, void *priv,
return 0;
}
-static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers,
- unsigned int *nplanes,
- unsigned int sizes[],
- struct device *alloc_devs[])
+int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
struct mtk_q_data *q_data;
@@ -1088,7 +704,7 @@ static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
}
} else {
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- *nplanes = 2;
+ *nplanes = q_data->fmt->num_planes;
else
*nplanes = 1;
@@ -1104,7 +720,7 @@ static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_q_data *q_data;
@@ -1126,128 +742,7 @@ static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *src_buf;
- struct mtk_vcodec_mem src_mem;
- bool res_chg = false;
- int ret = 0;
- unsigned int dpbsize = 1, i = 0;
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
- struct mtk_video_dec_buf *buf = NULL;
- struct mtk_q_data *dst_q_data;
-
- mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
- ctx->id, vb->vb2_queue->type,
- vb->index, vb);
- /*
- * check if this buffer is ready to be used after decode
- */
- if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- vb2_v4l2 = to_vb2_v4l2_buffer(vb);
- buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
- m2m_buf.vb);
- mutex_lock(&ctx->lock);
- if (!buf->used) {
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
- buf->queued_in_vb2 = true;
- buf->queued_in_v4l2 = true;
- } else {
- buf->queued_in_vb2 = false;
- buf->queued_in_v4l2 = true;
- }
- mutex_unlock(&ctx->lock);
- return;
- }
-
- v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
-
- if (ctx->state != MTK_STATE_INIT) {
- mtk_v4l2_debug(3, "[%d] already init driver %d",
- ctx->id, ctx->state);
- return;
- }
-
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- if (!src_buf) {
- mtk_v4l2_err("No src buffer");
- return;
- }
- buf = container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
- if (buf->lastframe) {
- /* This shouldn't happen. Just in case. */
- mtk_v4l2_err("Invalid flush buffer.");
- v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- return;
- }
-
- src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
- src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- mtk_v4l2_debug(2,
- "[%d] buf id=%d va=%p dma=%pad size=%zx",
- ctx->id, src_buf->vb2_buf.index,
- src_mem.va, &src_mem.dma_addr,
- src_mem.size);
-
- ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
- if (ret || !res_chg) {
- /*
- * fb == NULL means to parse SPS/PPS header or
- * resolution info in src_mem. Decode can fail
- * if there is no SPS header or picture info
- * in bs
- */
-
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- if (ret == -EIO) {
- mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.",
- ctx->id);
- ctx->state = MTK_STATE_ABORT;
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else {
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- }
- mtk_v4l2_debug(ret ? 0 : 1,
- "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
- ctx->id, src_buf->vb2_buf.index,
- src_mem.size, ret, res_chg);
- return;
- }
-
- if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
- ctx->id);
- return;
- }
-
- ctx->last_decoded_picinfo = ctx->picinfo;
- dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
- for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
- dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
- dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
- }
-
- mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
- ctx->id,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- dst_q_data->sizeimage[0],
- dst_q_data->sizeimage[1]);
-
- ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
- if (dpbsize == 0)
- mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
-
- ctx->dpb_size = dpbsize;
- ctx->state = MTK_STATE_HEADER;
- mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
-
- mtk_vdec_queue_res_chg_event(ctx);
-}
-
-static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2;
@@ -1270,7 +765,7 @@ static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
}
}
-static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
struct vb2_v4l2_buffer, vb2_buf);
@@ -1280,14 +775,12 @@ static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
buf->used = false;
buf->queued_in_v4l2 = false;
- } else {
- buf->lastframe = false;
}
return 0;
}
-static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
@@ -1297,21 +790,25 @@ static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
}
-static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
{
struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL;
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
- struct mtk_video_dec_buf *buf_info = container_of(
- src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
- if (!buf_info->lastframe)
+ if (src_buf != &ctx->empty_flush_buf.vb) {
+ struct media_request *req =
+ src_buf->vb2_buf.req_obj.req;
v4l2_m2m_buf_done(src_buf,
VB2_BUF_STATE_ERROR);
+ if (req)
+ v4l2_ctrl_request_complete(req, &ctx->ctrl_hdl);
+ }
}
return;
}
@@ -1334,7 +831,9 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
ctx->last_decoded_picinfo.buf_w,
ctx->last_decoded_picinfo.buf_h);
- mtk_vdec_flush_decoder(ctx);
+ ret = ctx->dev->vdec_pdata->flush_decoder(ctx);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
}
ctx->state = MTK_STATE_FLUSH;
@@ -1381,75 +880,12 @@ static void m2mops_vdec_job_abort(void *priv)
ctx->state = MTK_STATE_ABORT;
}
-static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- if (ctx->state >= MTK_STATE_HEADER) {
- ctrl->val = ctx->dpb_size;
- } else {
- mtk_v4l2_debug(0, "Seqinfo not ready");
- ctrl->val = 0;
- }
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
- .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
-};
-
-int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
-{
- struct v4l2_ctrl *ctrl;
-
- v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
-
- ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
- &mtk_vcodec_dec_ctrl_ops,
- V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
- 0, 32, 1, 1);
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl,
- &mtk_vcodec_dec_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
- V4L2_MPEG_VIDEO_VP9_PROFILE_0,
- 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
-
- if (ctx->ctrl_hdl.error) {
- mtk_v4l2_err("Adding control failed %d",
- ctx->ctrl_hdl.error);
- return ctx->ctrl_hdl.error;
- }
-
- v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
- return 0;
-}
-
const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
.device_run = m2mops_vdec_device_run,
.job_ready = m2mops_vdec_job_ready,
.job_abort = m2mops_vdec_job_abort,
};
-static const struct vb2_ops mtk_vdec_vb2_ops = {
- .queue_setup = vb2ops_vdec_queue_setup,
- .buf_prepare = vb2ops_vdec_buf_prepare,
- .buf_queue = vb2ops_vdec_buf_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .buf_init = vb2ops_vdec_buf_init,
- .buf_finish = vb2ops_vdec_buf_finish,
- .start_streaming = vb2ops_vdec_start_streaming,
- .stop_streaming = vb2ops_vdec_stop_streaming,
-};
-
const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -1496,7 +932,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
- src_vq->ops = &mtk_vdec_vb2_ops;
+ src_vq->ops = ctx->dev->vdec_pdata->vdec_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
@@ -1511,7 +947,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
- dst_vq->ops = &mtk_vdec_vb2_ops;
+ dst_vq->ops = ctx->dev->vdec_pdata->vdec_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index cf26b6c1486a..46783516b84a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -16,6 +16,8 @@
#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
#define MTK_VDEC_MAX_W 2048U
#define MTK_VDEC_MAX_H 1088U
+#define MTK_VDEC_MIN_W 64U
+#define MTK_VDEC_MIN_H 64U
#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
@@ -40,9 +42,9 @@ struct vdec_fb {
* @queued_in_vb2: Capture buffer is queue in vb2
* @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
* queue yet
- * @lastframe: Intput buffer is last buffer - EOS
* @error: An unrecoverable error occurs on this buffer.
* @frame_buffer: Decode status, and buffer information of Capture buffer
+ * @bs_buffer: Output buffer info
*
* Note : These status information help us track and debug buffer state
*/
@@ -52,13 +54,19 @@ struct mtk_video_dec_buf {
bool used;
bool queued_in_vb2;
bool queued_in_v4l2;
- bool lastframe;
bool error;
- struct vdec_fb frame_buffer;
+
+ union {
+ struct vdec_fb frame_buffer;
+ struct mtk_vcodec_mem bs_buffer;
+ };
};
extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+extern const struct media_device_ops mtk_vcodec_media_ops;
+extern const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata;
+extern const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata;
/*
@@ -73,7 +81,18 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
-int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+/*
+ * VB2 ops
+ */
+int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[]);
+int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb);
+void vb2ops_vdec_buf_finish(struct vb2_buffer *vb);
+int vb2ops_vdec_buf_init(struct vb2_buffer *vb);
+int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count);
+void vb2ops_vdec_stop_streaming(struct vb2_queue *q);
#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index f87dc47d9e63..e6e6a8203eeb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -14,6 +14,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
@@ -81,21 +82,14 @@ static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
- struct mtk_video_dec_buf *mtk_buf = NULL;
int ret = 0;
struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- mtk_buf = kzalloc(sizeof(*mtk_buf), GFP_KERNEL);
- if (!mtk_buf) {
- kfree(ctx);
- return -ENOMEM;
- }
mutex_lock(&dev->dev_mutex);
- ctx->empty_flush_buf = mtk_buf;
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
@@ -106,7 +100,7 @@ static int fops_vcodec_open(struct file *file)
mutex_init(&ctx->lock);
ctx->type = MTK_INST_DECODER;
- ret = mtk_vcodec_dec_ctrls_setup(ctx);
+ ret = dev->vdec_pdata->ctrls_setup(ctx);
if (ret) {
mtk_v4l2_err("Failed to setup mt vcodec controls");
goto err_ctrls_setup;
@@ -121,8 +115,7 @@ static int fops_vcodec_open(struct file *file)
}
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- ctx->empty_flush_buf->m2m_buf.vb.vb2_buf.vb2_queue = src_vq;
- ctx->empty_flush_buf->lastframe = true;
+ ctx->empty_flush_buf.vb.vb2_buf.vb2_queue = src_vq;
mtk_vcodec_dec_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -162,7 +155,6 @@ err_m2m_ctx_init:
err_ctrls_setup:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
- kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -193,7 +185,6 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
list_del_init(&ctx->list);
- kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -224,6 +215,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dev->ctx_list);
dev->plat_dev = pdev;
+ dev->vdec_pdata = of_device_get_match_data(&pdev->dev);
if (!of_property_read_u32(pdev->dev.of_node, "mediatek,vpu",
&rproc_phandle)) {
fw_type = VPU;
@@ -325,18 +317,47 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
+ if (dev->vdec_pdata->uses_stateless_api) {
+ dev->mdev_dec.dev = &pdev->dev;
+ strscpy(dev->mdev_dec.model, MTK_VCODEC_DEC_NAME,
+ sizeof(dev->mdev_dec.model));
+
+ media_device_init(&dev->mdev_dec);
+ dev->mdev_dec.ops = &mtk_vcodec_media_ops;
+ dev->v4l2_dev.mdev = &dev->mdev_dec;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev_dec, dev->vfd_dec,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ mtk_v4l2_err("Failed to register media controller");
+ goto err_reg_cont;
+ }
+
+ ret = media_device_register(&dev->mdev_dec);
+ if (ret) {
+ mtk_v4l2_err("Failed to register media device");
+ goto err_media_reg;
+ }
+
+ mtk_v4l2_debug(0, "media registered as /dev/media%d", vfd_dec->minor);
+ }
ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, 0);
if (ret) {
mtk_v4l2_err("Failed to register video device");
goto err_dec_reg;
}
- mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
- vfd_dec->num);
+ mtk_v4l2_debug(0, "decoder registered as /dev/video%d", vfd_dec->minor);
return 0;
err_dec_reg:
+ if (dev->vdec_pdata->uses_stateless_api)
+ media_device_unregister(&dev->mdev_dec);
+err_media_reg:
+ if (dev->vdec_pdata->uses_stateless_api)
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+err_reg_cont:
destroy_workqueue(dev->decode_workqueue);
err_event_workq:
v4l2_m2m_release(dev->m2m_dev_dec);
@@ -352,7 +373,14 @@ err_dec_pm:
}
static const struct of_device_id mtk_vcodec_match[] = {
- {.compatible = "mediatek,mt8173-vcodec-dec",},
+ {
+ .compatible = "mediatek,mt8173-vcodec-dec",
+ .data = &mtk_vdec_8173_pdata,
+ },
+ {
+ .compatible = "mediatek,mt8183-vcodec-dec",
+ .data = &mtk_vdec_8183_pdata,
+ },
{},
};
@@ -364,6 +392,13 @@ static int mtk_vcodec_dec_remove(struct platform_device *pdev)
flush_workqueue(dev->decode_workqueue);
destroy_workqueue(dev->decode_workqueue);
+
+ if (media_devnode_is_registered(dev->mdev_dec.devnode)) {
+ media_device_unregister(&dev->mdev_dec);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+ media_device_cleanup(&dev->mdev_dec);
+ }
+
if (dev->m2m_dev_dec)
v4l2_m2m_release(dev->m2m_dev_dec);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
new file mode 100644
index 000000000000..bef49244e61b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "vdec_drv_if.h"
+
+static const struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MT21C,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+#define DEFAULT_OUT_FMT_IDX 0
+#define DEFAULT_CAP_FMT_IDX 3
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_fb *disp_frame_buffer = NULL;
+ struct mtk_video_dec_buf *dstbuf;
+ struct vb2_v4l2_buffer *vb;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ if (vdec_if_get_param(ctx, GET_PARAM_DISP_FRAME_BUFFER,
+ &disp_frame_buffer)) {
+ mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER", ctx->id);
+ return NULL;
+ }
+
+ if (!disp_frame_buffer) {
+ mtk_v4l2_debug(3, "No display frame buffer");
+ return NULL;
+ }
+
+ dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ vb2_set_plane_payload(&vb->vb2_buf, 0, ctx->picinfo.fb_sz[0]);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&vb->vb2_buf, 1,
+ ctx->picinfo.fb_sz[1]);
+
+ mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
+
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
+ ctx->decoded_frame_cnt++;
+ }
+ mutex_unlock(&ctx->lock);
+ return &vb->vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_video_dec_buf *dstbuf;
+ struct vdec_fb *free_frame_buffer = NULL;
+ struct vb2_v4l2_buffer *vb;
+
+ if (vdec_if_get_param(ctx, GET_PARAM_FREE_FRAME_BUFFER,
+ &free_frame_buffer)) {
+ mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ return NULL;
+ }
+ if (!free_frame_buffer) {
+ mtk_v4l2_debug(3, " No free frame buffer");
+ return NULL;
+ }
+
+ mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p", ctx->id,
+ free_frame_buffer);
+
+ dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
+
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ if (dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2 &&
+ free_frame_buffer->status == FB_ST_FREE) {
+ /*
+ * After decode sps/pps or non-display buffer, we don't
+ * need to return capture buffer to user space, but
+ * just re-queue this capture buffer to vb2 queue.
+ * This reduce overheads that dq/q unused capture
+ * buffer. In this case, queued_in_vb2 = true.
+ */
+ mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
+ /*
+ * If buffer in v4l2 driver but not in vb2 queue yet,
+ * and we get this buffer from free_list, it means
+ * that codec driver do not use this buffer as
+ * reference buffer anymore. We should q buffer to vb2
+ * queue, so later work thread could get this buffer
+ * for decode. In this case, queued_in_vb2 = false
+ * means this buffer is not from previous decode
+ * output.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ dstbuf->queued_in_vb2 = true;
+ } else {
+ /*
+ * Codec driver do not need to reference this capture
+ * buffer and this buffer is not in v4l2 driver.
+ * Then we don't need to do any thing, just add log when
+ * we need to debug buffer flow.
+ * When this buffer q from user space, it could
+ * directly q to vb2 buffer
+ */
+ mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
+ }
+ dstbuf->used = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return &vb->vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ while (get_display_buffer(ctx))
+ ;
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ while (get_free_buffer(ctx))
+ ;
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ mtk_v4l2_debug(1, "[%d]", ctx->id);
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+ int ret;
+
+ ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ return 0;
+}
+
+static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
+ unsigned int pixelformat)
+{
+ const struct mtk_video_fmt *fmt;
+ struct mtk_q_data *dst_q_data;
+ unsigned int k;
+
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == pixelformat) {
+ mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
+ dst_q_data->fmt->fourcc, pixelformat);
+ dst_q_data->fmt = fmt;
+ return;
+ }
+ }
+
+ mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
+}
+
+static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int dpbsize = 0;
+ int ret;
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO,
+ &ctx->last_decoded_picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.pic_w == 0 ||
+ ctx->last_decoded_picinfo.pic_h == 0 ||
+ ctx->last_decoded_picinfo.buf_w == 0 ||
+ ctx->last_decoded_picinfo.buf_h == 0) {
+ mtk_v4l2_err("Cannot get correct pic info");
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
+ ctx->picinfo.cap_fourcc != 0)
+ mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
+
+ if (ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h)
+ return 0;
+
+ mtk_v4l2_debug(1, "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)", ctx->id,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+ ctx->dpb_size = dpbsize;
+
+ return ret;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_ctx, decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct mtk_vcodec_mem buf;
+ struct vdec_fb *pfb;
+ bool res_chg = false;
+ int ret;
+ struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!dst_buf) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf_info =
+ container_of(dst_buf, struct mtk_video_dec_buf, m2m_buf.vb);
+
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.dma_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.size = ctx->picinfo.fb_sz[0];
+
+ pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.dma_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.size = ctx->picinfo.fb_sz[1];
+ pfb->status = 0;
+ mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+
+ mtk_v4l2_debug(3,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->vb2_buf.index, pfb, pfb->base_y.va,
+ &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size);
+
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ mtk_v4l2_debug(1, "Got empty flush input buffer.");
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /* update dst buf status */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = false;
+ mutex_unlock(&ctx->lock);
+
+ vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ clean_display_buffer(ctx);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ clean_free_buffer(ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ return;
+ }
+
+ src_buf_info =
+ container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
+
+ buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!", ctx->id,
+ src_buf->vb2_buf.index);
+ return;
+ }
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = true;
+ mutex_unlock(&ctx->lock);
+ src_buf_info->used = true;
+
+ ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+ if (ret) {
+ mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id, src_buf->vb2_buf.index, buf.size,
+ src_buf->vb2_buf.timestamp, dst_buf->vb2_buf.index, ret, res_chg);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ src_buf_info->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ } else if (!res_chg) {
+ /*
+ * we only return src buffer with VB2_BUF_STATE_DONE
+ * when decode success without resolution change
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ if (!ret && res_chg) {
+ mtk_vdec_pic_info_update(ctx);
+ /*
+ * On encountering a resolution change in the stream.
+ * The driver must first process and decode all
+ * remaining buffers from before the resolution change
+ * point, so call flush decode here
+ */
+ mtk_vdec_flush_decoder(ctx);
+ /*
+ * After all buffers containing decoded frames from
+ * before the resolution change point ready to be
+ * dequeued on the CAPTURE queue, the driver sends a
+ * V4L2_EVENT_SOURCE_CHANGE event for source change
+ * type V4L2_EVENT_SRC_CH_RESOLUTION
+ */
+ mtk_vdec_queue_res_chg_event(ctx);
+ }
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct mtk_vcodec_mem src_mem;
+ bool res_chg = false;
+ int ret;
+ unsigned int dpbsize = 1, i;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct mtk_q_data *dst_q_data;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb->vb2_queue->type, vb->index, vb);
+ /*
+ * check if this buffer is ready to be used after decode
+ */
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct mtk_video_dec_buf *buf;
+
+ vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ mutex_lock(&ctx->lock);
+ if (!buf->used) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ buf->queued_in_vb2 = true;
+ buf->queued_in_v4l2 = true;
+ } else {
+ buf->queued_in_vb2 = false;
+ buf->queued_in_v4l2 = true;
+ }
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+
+ if (ctx->state != MTK_STATE_INIT) {
+ mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id,
+ ctx->state);
+ return;
+ }
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ mtk_v4l2_err("No src buffer");
+ return;
+ }
+
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ /* This shouldn't happen. Just in case. */
+ mtk_v4l2_err("Invalid flush buffer.");
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ return;
+ }
+
+ src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ mtk_v4l2_debug(2, "[%d] buf id=%d va=%p dma=%pad size=%zx", ctx->id,
+ src_buf->vb2_buf.index, src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+ ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+ if (ret || !res_chg) {
+ /*
+ * fb == NULL means to parse SPS/PPS header or
+ * resolution info in src_mem. Decode can fail
+ * if there is no SPS header or picture info
+ * in bs
+ */
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.", ctx->id);
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+ mtk_v4l2_debug(ret ? 0 : 1,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->vb2_buf.index, src_mem.size, ret, res_chg);
+ return;
+ }
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ return;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
+ dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
+ dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
+ }
+
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, ctx->picinfo.buf_w, ctx->picinfo.buf_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, dst_q_data->sizeimage[0], dst_q_data->sizeimage[1]);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+ ctx->dpb_size = dpbsize;
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+
+ mtk_vdec_queue_res_chg_event(ctx);
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MTK_STATE_HEADER) {
+ ctrl->val = ctx->dpb_size;
+ } else {
+ mtk_v4l2_debug(0, "Seqinfo not ready");
+ ctrl->val = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 0, 32, 1, 1);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0, 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0);
+ /*
+ * H264. Baseline / Extended decoding is not supported.
+ */
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control failed %d", ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ return 0;
+}
+
+static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+{
+}
+
+static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+
+ .buf_queue = vb2ops_vdec_stateful_buf_queue,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+};
+
+const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata = {
+ .chip = MTK_MT8173,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_frame_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
new file mode 100644
index 000000000000..8f4a1f0a0769
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/module.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "vdec_drv_if.h"
+
+/**
+ * struct mtk_stateless_control - CID control type
+ * @cfg: control configuration
+ * @codec_type: codec type (V4L2 pixel format) for CID control type
+ */
+struct mtk_stateless_control {
+ struct v4l2_ctrl_config cfg;
+ int codec_type;
+};
+
+static const struct mtk_stateless_control mtk_stateless_controls[] = {
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ }
+};
+
+#define NUM_CTRLS ARRAY_SIZE(mtk_stateless_controls)
+
+static const struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MM21,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+#define DEFAULT_OUT_FMT_IDX 0
+#define DEFAULT_CAP_FMT_IDX 1
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+
+static void mtk_vdec_stateless_set_dst_payload(struct mtk_vcodec_ctx *ctx,
+ struct vdec_fb *fb)
+{
+ struct mtk_video_dec_buf *vdec_frame_buf =
+ container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ struct vb2_v4l2_buffer *vb = &vdec_frame_buf->m2m_buf.vb;
+ unsigned int cap_y_size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
+
+ vb2_set_plane_payload(&vb->vb2_buf, 0, cap_y_size);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ unsigned int cap_c_size =
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
+
+ vb2_set_plane_payload(&vb->vb2_buf, 1, cap_c_size);
+ }
+}
+
+static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx,
+ struct vb2_v4l2_buffer *vb2_v4l2)
+{
+ struct mtk_video_dec_buf *framebuf =
+ container_of(vb2_v4l2, struct mtk_video_dec_buf, m2m_buf.vb);
+ struct vdec_fb *pfb = &framebuf->frame_buffer;
+ struct vb2_buffer *dst_buf = &vb2_v4l2->vb2_buf;
+
+ pfb = &framebuf->frame_buffer;
+ pfb->base_y.va = NULL;
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
+
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ pfb->base_c.va = NULL;
+ pfb->base_c.dma_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
+ }
+ mtk_v4l2_debug(1, "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx frame_count = %d",
+ dst_buf->index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size, ctx->decoded_frame_cnt);
+
+ return pfb;
+}
+
+static void vb2ops_vdec_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl);
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_ctx, decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_v4l2_src, *vb2_v4l2_dst;
+ struct vb2_buffer *vb2_src;
+ struct mtk_vcodec_mem *bs_src;
+ struct mtk_video_dec_buf *dec_buf_src;
+ struct media_request *src_buf_req;
+ struct vdec_fb *dst_buf;
+ bool res_chg = false;
+ int ret;
+
+ vb2_v4l2_src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!vb2_v4l2_src) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] no available source buffer", ctx->id);
+ return;
+ }
+
+ vb2_v4l2_dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!vb2_v4l2_dst) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] no available destination buffer", ctx->id);
+ return;
+ }
+
+ vb2_src = &vb2_v4l2_src->vb2_buf;
+ dec_buf_src = container_of(vb2_v4l2_src, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ bs_src = &dec_buf_src->bs_buffer;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb2_src->vb2_queue->type, vb2_src->index, vb2_src);
+
+ bs_src->va = NULL;
+ bs_src->dma_addr = vb2_dma_contig_plane_dma_addr(vb2_src, 0);
+ bs_src->size = (size_t)vb2_src->planes[0].bytesused;
+
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, bs_src->va, &bs_src->dma_addr, bs_src->size, vb2_src);
+ /* Apply request controls. */
+ src_buf_req = vb2_src->req_obj.req;
+ if (src_buf_req)
+ v4l2_ctrl_request_setup(src_buf_req, &ctx->ctrl_hdl);
+ else
+ mtk_v4l2_err("vb2 buffer media request is NULL");
+
+ dst_buf = vdec_get_cap_buffer(ctx, vb2_v4l2_dst);
+ v4l2_m2m_buf_copy_metadata(vb2_v4l2_src, vb2_v4l2_dst, true);
+ ret = vdec_if_decode(ctx, bs_src, dst_buf, &res_chg);
+ if (ret) {
+ mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id, vb2_src->index, bs_src->size,
+ vb2_src->timestamp, ret, res_chg);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ dec_buf_src->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ }
+
+ mtk_vdec_stateless_set_dst_payload(ctx, dst_buf);
+
+ v4l2_m2m_buf_done_and_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx,
+ ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+}
+
+static void vb2ops_vdec_stateless_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id, vb->vb2_queue->type, vb->index, vb);
+
+ mutex_lock(&ctx->lock);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ mutex_unlock(&ctx->lock);
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return;
+
+ /* If an OUTPUT buffer, we may need to update the state */
+ if (ctx->state == MTK_STATE_INIT) {
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "Init driver from init to header.");
+ } else {
+ mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id, ctx->state);
+ }
+}
+
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+
+ return vdec_if_decode(ctx, NULL, NULL, &res_chg);
+}
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, NUM_CTRLS);
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_hdl.error;
+ }
+
+ for (i = 0; i < NUM_CTRLS; i++) {
+ struct v4l2_ctrl_config cfg = mtk_stateless_controls[i].cfg;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &cfg, NULL);
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control %d failed %d", i, ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+ return 0;
+}
+
+static int fops_media_request_validate(struct media_request *mreq)
+{
+ const unsigned int buffer_cnt = vb2_request_buffer_cnt(mreq);
+
+ switch (buffer_cnt) {
+ case 1:
+ /* We expect exactly one buffer with the request */
+ break;
+ case 0:
+ mtk_v4l2_debug(1, "No buffer provided with the request");
+ return -ENOENT;
+ default:
+ mtk_v4l2_debug(1, "Too many buffers (%d) provided with the request",
+ buffer_cnt);
+ return -EINVAL;
+ }
+
+ return vb2_request_validate(mreq);
+}
+
+const struct media_device_ops mtk_vcodec_media_ops = {
+ .req_validate = fops_media_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_queue *src_vq;
+
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ /* Support request api for output plane */
+ src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
+}
+
+static int vb2ops_vdec_out_buf_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static struct vb2_ops mtk_vdec_request_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+
+ .buf_queue = vb2ops_vdec_stateless_buf_queue,
+ .buf_out_validate = vb2ops_vdec_out_buf_validate,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .buf_request_complete = vb2ops_vdec_buf_request_complete,
+};
+
+const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata = {
+ .chip = MTK_MT8183,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_request_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .uses_stateless_api = true,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c6c7672fecfb..581522177308 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -13,6 +13,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include "mtk_vcodec_util.h"
@@ -249,7 +250,10 @@ struct vdec_pic_info {
* @decode_work: worker for the decoding
* @encode_work: worker for the encoding
* @last_decoded_picinfo: pic information get from latest decode
- * @empty_flush_buf: a fake size-0 capture buffer that indicates flush
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. Only
+ * to be used with encoder and stateful decoder.
+ * @is_flushing: set to true if flushing is in progress.
+ * @current_codec: current set input codec, in V4L2 pixel format
*
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
@@ -288,7 +292,10 @@ struct mtk_vcodec_ctx {
struct work_struct decode_work;
struct work_struct encode_work;
struct vdec_pic_info last_decoded_picinfo;
- struct mtk_video_dec_buf *empty_flush_buf;
+ struct v4l2_m2m_buffer empty_flush_buf;
+ bool is_flushing;
+
+ u32 current_codec;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
@@ -304,6 +311,50 @@ enum mtk_chip {
MTK_MT8173,
MTK_MT8183,
MTK_MT8192,
+ MTK_MT8195,
+};
+
+/**
+ * struct mtk_vcodec_dec_pdata - compatible data for each IC
+ * @init_vdec_params: init vdec params
+ * @ctrls_setup: init vcodec dec ctrls
+ * @worker: worker to start a decode job
+ * @flush_decoder: function that flushes the decoder
+ *
+ * @vdec_vb2_ops: struct vb2_ops
+ *
+ * @vdec_formats: supported video decoder formats
+ * @num_formats: count of video decoder formats
+ * @default_out_fmt: default output buffer format
+ * @default_cap_fmt: default capture buffer format
+ *
+ * @vdec_framesizes: supported video decoder frame sizes
+ * @num_framesizes: count of video decoder frame sizes
+ *
+ * @chip: chip this decoder is compatible with
+ *
+ * @uses_stateless_api: whether the decoder uses the stateless API with requests
+ */
+
+struct mtk_vcodec_dec_pdata {
+ void (*init_vdec_params)(struct mtk_vcodec_ctx *ctx);
+ int (*ctrls_setup)(struct mtk_vcodec_ctx *ctx);
+ void (*worker)(struct work_struct *work);
+ int (*flush_decoder)(struct mtk_vcodec_ctx *ctx);
+
+ struct vb2_ops *vdec_vb2_ops;
+
+ const struct mtk_video_fmt *vdec_formats;
+ const int num_formats;
+ const struct mtk_video_fmt *default_out_fmt;
+ const struct mtk_video_fmt *default_cap_fmt;
+
+ const struct mtk_codec_framesizes *vdec_framesizes;
+ const int num_framesizes;
+
+ enum mtk_chip chip;
+
+ bool uses_stateless_api;
};
/**
@@ -339,6 +390,7 @@ struct mtk_vcodec_enc_pdata {
* struct mtk_vcodec_dev - driver data
* @v4l2_dev: V4L2 device to register video devices for.
* @vfd_dec: Video device for decoder
+ * @mdev_dec: Media device for decoder
* @vfd_enc: Video device for encoder.
*
* @m2m_dev_dec: m2m device for decoder
@@ -349,6 +401,7 @@ struct mtk_vcodec_enc_pdata {
* @curr_ctx: The context that is waiting for codec hardware
*
* @reg_base: Mapped address of MTK Vcodec registers.
+ * @vdec_pdata: decoder IC-specific data
* @venc_pdata: encoder IC-specific data
*
* @fw_handler: used to communicate with the firmware.
@@ -375,6 +428,7 @@ struct mtk_vcodec_enc_pdata {
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd_dec;
+ struct media_device mdev_dec;
struct video_device *vfd_enc;
struct v4l2_m2m_dev *m2m_dev_dec;
@@ -384,6 +438,7 @@ struct mtk_vcodec_dev {
spinlock_t irqlock;
struct mtk_vcodec_ctx *curr_ctx;
void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+ const struct mtk_vcodec_dec_pdata *vdec_pdata;
const struct mtk_vcodec_enc_pdata *venc_pdata;
struct mtk_vcodec_fw *fw_handler;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 416f356af363..7457451ebff0 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -672,6 +672,7 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ int ret;
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
@@ -679,7 +680,83 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
return -EIO;
}
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * Complete flush if the user dequeued the 0-payload LAST buffer.
+ * We check the payload because a buffer with the LAST flag can also
+ * be seen during resolution changes. If we happen to be flushing at
+ * that time, the last buffer before the resolution changes could be
+ * misinterpreted for the buffer generated by the flush and terminate
+ * it earlier than we want.
+ */
+ if (!V4L2_TYPE_IS_OUTPUT(buf->type) &&
+ buf->flags & V4L2_BUF_FLAG_LAST &&
+ buf->m.planes[0].bytesused == 0 &&
+ ctx->is_flushing) {
+ /*
+ * Last CAPTURE buffer is dequeued, we can allow another flush
+ * to take place.
+ */
+ ctx->is_flushing = false;
+ }
+
+ return 0;
+}
+
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *cmd)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq, *dst_vq;
+ int ret;
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call to CMD after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, cmd);
+ if (ret)
+ return ret;
+
+ /* Calling START or STOP is invalid if a flush is in progress */
+ if (ctx->is_flushing)
+ return -EBUSY;
+
+ mtk_v4l2_debug(1, "encoder cmd=%u", cmd->cmd);
+
+ dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (cmd->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!vb2_is_streaming(src_vq)) {
+ mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ return 0;
+ }
+ if (!vb2_is_streaming(dst_vq)) {
+ mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ return 0;
+ }
+ ctx->is_flushing = true;
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ break;
+
+ case V4L2_ENC_CMD_START:
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
@@ -715,6 +792,9 @@ const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
.vidioc_g_selection = vidioc_venc_g_selection,
.vidioc_s_selection = vidioc_venc_s_selection,
+
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
};
static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
@@ -793,7 +873,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
struct venc_enc_param param;
- int ret;
+ int ret, pm_ret;
int i;
/* Once state turn into MTK_STATE_ABORT, we need stop_streaming
@@ -845,9 +925,9 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
err_set_param:
- ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
- if (ret < 0)
- mtk_v4l2_err("pm_runtime_put fail %d", ret);
+ pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
+ if (pm_ret < 0)
+ mtk_v4l2_err("pm_runtime_put fail %d", pm_ret);
err_start_stream:
for (i = 0; i < q->num_buffers; ++i) {
@@ -882,9 +962,38 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
dst_buf->vb2_buf.planes[0].bytesused = 0;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
+ /* STREAMOFF on the CAPTURE queue completes any ongoing flush */
+ if (ctx->is_flushing) {
+ struct v4l2_m2m_buffer *b, *n;
+
+ mtk_v4l2_debug(1, "STREAMOFF called while flushing");
+ /*
+ * STREAMOFF could be called before the flush buffer is
+ * dequeued. Check whether empty flush buf is still in
+ * queue before removing it.
+ */
+ v4l2_m2m_for_each_src_buf_safe(ctx->m2m_ctx, b, n) {
+ if (b == &ctx->empty_flush_buf) {
+ v4l2_m2m_src_buf_remove_by_buf(ctx->m2m_ctx, &b->vb);
+ break;
+ }
+ }
+ ctx->is_flushing = false;
+ }
} else {
- while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
+ if (src_buf != &ctx->empty_flush_buf.vb)
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ }
+ if (ctx->is_flushing) {
+ /*
+ * If we are in the middle of a flush, put the flush
+ * buffer back into the queue so the next CAPTURE
+ * buffer gets returned with the LAST flag set.
+ */
+ v4l2_m2m_buf_queue(ctx->m2m_ctx,
+ &ctx->empty_flush_buf.vb);
+ }
}
if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
@@ -984,12 +1093,15 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
{
struct venc_enc_param enc_prm;
struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- struct mtk_video_enc_buf *mtk_buf =
- container_of(vb2_v4l2, struct mtk_video_enc_buf,
- m2m_buf.vb);
-
+ struct mtk_video_enc_buf *mtk_buf;
int ret = 0;
+ /* Don't upcast the empty flush buffer */
+ if (vb2_v4l2 == &ctx->empty_flush_buf.vb)
+ return 0;
+
+ mtk_buf = container_of(vb2_v4l2, struct mtk_video_enc_buf, m2m_buf.vb);
+
memset(&enc_prm, 0, sizeof(enc_prm));
if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE)
return 0;
@@ -1075,6 +1187,20 @@ static void mtk_venc_worker(struct work_struct *work)
}
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /*
+ * If we see the flush buffer, send an empty buffer with the LAST flag
+ * to the client. is_flushing will be reset at the time the buffer
+ * is dequeued.
+ */
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+ return;
+ }
+
memset(&frm_buf, 0, sizeof(frm_buf));
for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
frm_buf.fb_addr[i].dma_addr =
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 45d1870c83dd..eed67394cf46 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -26,7 +26,7 @@
module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
-static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
+static const struct mtk_video_fmt mtk_video_formats_output[] = {
{
.fourcc = V4L2_PIX_FMT_NV12M,
.type = MTK_FMT_FRAME,
@@ -49,7 +49,7 @@ static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_avc[] = {
+static const struct mtk_video_fmt mtk_video_formats_capture_h264[] = {
{
.fourcc = V4L2_PIX_FMT_H264,
.type = MTK_FMT_ENC,
@@ -57,7 +57,7 @@ static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_avc[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_vp8[] = {
+static const struct mtk_video_fmt mtk_video_formats_capture_vp8[] = {
{
.fourcc = V4L2_PIX_FMT_VP8,
.type = MTK_FMT_ENC,
@@ -65,14 +65,6 @@ static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_vp8[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8183[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_ENC,
- .num_planes = 1,
- },
-};
-
/* Wake up context wait_queue */
static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
{
@@ -131,6 +123,7 @@ static int fops_vcodec_open(struct file *file)
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
int ret = 0;
+ struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -157,13 +150,16 @@ static int fops_vcodec_open(struct file *file)
goto err_ctrls_setup;
}
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
- &mtk_vcodec_enc_queue_init);
+ &mtk_vcodec_enc_queue_init);
if (IS_ERR((__force void *)ctx->m2m_ctx)) {
ret = PTR_ERR((__force void *)ctx->m2m_ctx);
mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
ret);
goto err_m2m_ctx_init;
}
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ctx->empty_flush_buf.vb.vb2_buf.vb2_queue = src_vq;
mtk_vcodec_enc_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -392,34 +388,33 @@ err_enc_pm:
static const struct mtk_vcodec_enc_pdata mt8173_avc_pdata = {
.chip = MTK_MT8173,
- .capture_formats = mtk_video_formats_capture_mt8173_avc,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173_avc),
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
- .min_bitrate = 1,
- .max_bitrate = 4000000,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 60000000,
.core_id = VENC_SYS,
};
static const struct mtk_vcodec_enc_pdata mt8173_vp8_pdata = {
.chip = MTK_MT8173,
- .capture_formats = mtk_video_formats_capture_mt8173_vp8,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173_vp8),
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_vp8,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_vp8),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
- .max_bitrate = 4000000,
+ .max_bitrate = 9000000,
.core_id = VENC_LT_SYS,
};
static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
.chip = MTK_MT8183,
.uses_ext = true,
- .capture_formats = mtk_video_formats_capture_mt8183,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
- /* MT8183 supports the same output formats as MT8173 */
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
.max_bitrate = 40000000,
.core_id = VENC_SYS,
@@ -428,16 +423,27 @@ static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
static const struct mtk_vcodec_enc_pdata mt8192_pdata = {
.chip = MTK_MT8192,
.uses_ext = true,
- /* MT8192 supports the same capture formats as MT8183 */
- .capture_formats = mtk_video_formats_capture_mt8183,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
- /* MT8192 supports the same output formats as MT8173 */
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
.max_bitrate = 100000000,
.core_id = VENC_SYS,
};
+
+static const struct mtk_vcodec_enc_pdata mt8195_pdata = {
+ .chip = MTK_MT8195,
+ .uses_ext = true,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 100000000,
+ .core_id = VENC_SYS,
+};
+
static const struct of_device_id mtk_vcodec_enc_match[] = {
{.compatible = "mediatek,mt8173-vcodec-enc",
.data = &mt8173_avc_pdata},
@@ -445,6 +451,7 @@ static const struct of_device_id mtk_vcodec_enc_match[] = {
.data = &mt8173_vp8_pdata},
{.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
{.compatible = "mediatek,mt8192-vcodec-enc", .data = &mt8192_pdata},
+ {.compatible = "mediatek,mt8195-vcodec-enc", .data = &mt8195_pdata},
{},
};
MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
new file mode 100644
index 000000000000..946c23088308
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-h264.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_drv_if.h"
+#include "../vdec_vpu_if.h"
+
+#define BUF_PREDICTION_SZ (64 * 4096)
+#define MB_UNIT_LEN 16
+
+/* get used parameters for sps/pps */
+#define GET_MTK_VDEC_FLAG(cond, flag) \
+ { dst_param->cond = ((src_param->flags & (flag)) ? (1) : (0)); }
+#define GET_MTK_VDEC_PARAM(param) \
+ { dst_param->param = src_param->param; }
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ 64
+
+#define H264_MAX_FB_NUM 17
+#define H264_MAX_MV_NUM 32
+#define HDR_PARSING_BUF_SZ 1024
+
+/**
+ * struct mtk_h264_dpb_info - h264 dpb information
+ * @y_dma_addr: Y bitstream physical address
+ * @c_dma_addr: CbCr bitstream physical address
+ * @reference_flag: reference picture flag (short/long term reference picture)
+ * @field: field picture flag
+ */
+struct mtk_h264_dpb_info {
+ dma_addr_t y_dma_addr;
+ dma_addr_t c_dma_addr;
+ int reference_flag;
+ int field;
+};
+
+/*
+ * struct mtk_h264_sps_param - parameters for sps
+ */
+struct mtk_h264_sps_param {
+ unsigned char chroma_format_idc;
+ unsigned char bit_depth_luma_minus8;
+ unsigned char bit_depth_chroma_minus8;
+ unsigned char log2_max_frame_num_minus4;
+ unsigned char pic_order_cnt_type;
+ unsigned char log2_max_pic_order_cnt_lsb_minus4;
+ unsigned char max_num_ref_frames;
+ unsigned char separate_colour_plane_flag;
+ unsigned short pic_width_in_mbs_minus1;
+ unsigned short pic_height_in_map_units_minus1;
+ unsigned int max_frame_nums;
+ unsigned char qpprime_y_zero_transform_bypass_flag;
+ unsigned char delta_pic_order_always_zero_flag;
+ unsigned char frame_mbs_only_flag;
+ unsigned char mb_adaptive_frame_field_flag;
+ unsigned char direct_8x8_inference_flag;
+ unsigned char reserved[3];
+};
+
+/*
+ * struct mtk_h264_pps_param - parameters for pps
+ */
+struct mtk_h264_pps_param {
+ unsigned char num_ref_idx_l0_default_active_minus1;
+ unsigned char num_ref_idx_l1_default_active_minus1;
+ unsigned char weighted_bipred_idc;
+ char pic_init_qp_minus26;
+ char chroma_qp_index_offset;
+ char second_chroma_qp_index_offset;
+ unsigned char entropy_coding_mode_flag;
+ unsigned char pic_order_present_flag;
+ unsigned char deblocking_filter_control_present_flag;
+ unsigned char constrained_intra_pred_flag;
+ unsigned char weighted_pred_flag;
+ unsigned char redundant_pic_cnt_present_flag;
+ unsigned char transform_8x8_mode_flag;
+ unsigned char scaling_matrix_present_flag;
+ unsigned char reserved[2];
+};
+
+struct slice_api_h264_scaling_matrix {
+ unsigned char scaling_list_4x4[6][16];
+ unsigned char scaling_list_8x8[6][64];
+};
+
+struct slice_h264_dpb_entry {
+ unsigned long long reference_ts;
+ unsigned short frame_num;
+ unsigned short pic_num;
+ /* Note that field is indicated by v4l2_buffer.field */
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ unsigned int flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
+};
+
+/*
+ * struct slice_api_h264_decode_param - parameters for decode.
+ */
+struct slice_api_h264_decode_param {
+ struct slice_h264_dpb_entry dpb[16];
+ unsigned short num_slices;
+ unsigned short nal_ref_idc;
+ unsigned char ref_pic_list_p0[32];
+ unsigned char ref_pic_list_b0[32];
+ unsigned char ref_pic_list_b1[32];
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ unsigned int flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */
+};
+
+/*
+ * struct mtk_h264_dec_slice_param - parameters for decode current frame
+ */
+struct mtk_h264_dec_slice_param {
+ struct mtk_h264_sps_param sps;
+ struct mtk_h264_pps_param pps;
+ struct slice_api_h264_scaling_matrix scaling_matrix;
+ struct slice_api_h264_decode_param decode_params;
+ struct mtk_h264_dpb_info h264_dpb_info[16];
+};
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_fb {
+ u64 vdec_fb_va;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ s32 poc;
+ u32 reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
+ * @cap_num_planes : number planes of capture buffer
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+ u32 dpb_sz;
+ u32 resolution_changed;
+ u32 realloc_mv_buf;
+ u32 cap_num_planes;
+ u64 bs_dma;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ u64 vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ * @h264_slice_params : the parameters that hardware use to decode
+ */
+struct vdec_h264_vsi {
+ u64 pred_buf_dma;
+ u64 mv_buf_dma[H264_MAX_MV_NUM];
+ struct vdec_h264_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+ struct mtk_h264_dec_slice_param h264_slice_params;
+};
+
+/**
+ * struct vdec_h264_slice_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf : HW working motion vector buffer
+ * @vpu : VPU instance
+ * @vsi_ctx : Local VSI data for this decoding context
+ * @h264_slice_param : the parameters that hardware use to decode
+ * @dpb : decoded picture buffer used to store reference buffer information
+ */
+struct vdec_h264_slice_inst {
+ unsigned int num_nalu;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem pred_buf;
+ struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_h264_vsi vsi_ctx;
+ struct mtk_h264_dec_slice_param h264_slice_param;
+
+ struct v4l2_h264_dpb_entry dpb[16];
+};
+
+static void *get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
+
+ return ctrl->p_cur.p;
+}
+
+static void get_h264_dpb_list(struct vdec_h264_slice_inst *inst,
+ struct mtk_h264_dec_slice_param *slice_param)
+{
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ u64 index;
+
+ vq = v4l2_m2m_get_vq(inst->ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ for (index = 0; index < ARRAY_SIZE(slice_param->decode_params.dpb); index++) {
+ const struct slice_h264_dpb_entry *dpb;
+ int vb2_index;
+
+ dpb = &slice_param->decode_params.dpb[index];
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) {
+ slice_param->h264_dpb_info[index].reference_flag = 0;
+ continue;
+ }
+
+ vb2_index = vb2_find_timestamp(vq, dpb->reference_ts, 0);
+ if (vb2_index < 0) {
+ mtk_vcodec_err(inst, "Reference invalid: dpb_index(%lld) reference_ts(%lld)",
+ index, dpb->reference_ts);
+ continue;
+ }
+ /* 1 for short term reference, 2 for long term reference */
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ slice_param->h264_dpb_info[index].reference_flag = 1;
+ else
+ slice_param->h264_dpb_info[index].reference_flag = 2;
+
+ vb = vq->bufs[vb2_index];
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ slice_param->h264_dpb_info[index].field = vb2_v4l2->field;
+
+ slice_param->h264_dpb_info[index].y_dma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ slice_param->h264_dpb_info[index].c_dma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ }
+ }
+}
+
+static void get_h264_sps_parameters(struct mtk_h264_sps_param *dst_param,
+ const struct v4l2_ctrl_h264_sps *src_param)
+{
+ GET_MTK_VDEC_PARAM(chroma_format_idc);
+ GET_MTK_VDEC_PARAM(bit_depth_luma_minus8);
+ GET_MTK_VDEC_PARAM(bit_depth_chroma_minus8);
+ GET_MTK_VDEC_PARAM(log2_max_frame_num_minus4);
+ GET_MTK_VDEC_PARAM(pic_order_cnt_type);
+ GET_MTK_VDEC_PARAM(log2_max_pic_order_cnt_lsb_minus4);
+ GET_MTK_VDEC_PARAM(max_num_ref_frames);
+ GET_MTK_VDEC_PARAM(pic_width_in_mbs_minus1);
+ GET_MTK_VDEC_PARAM(pic_height_in_map_units_minus1);
+
+ GET_MTK_VDEC_FLAG(separate_colour_plane_flag,
+ V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
+ GET_MTK_VDEC_FLAG(qpprime_y_zero_transform_bypass_flag,
+ V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
+ GET_MTK_VDEC_FLAG(delta_pic_order_always_zero_flag,
+ V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
+ GET_MTK_VDEC_FLAG(frame_mbs_only_flag,
+ V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
+ GET_MTK_VDEC_FLAG(mb_adaptive_frame_field_flag,
+ V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ GET_MTK_VDEC_FLAG(direct_8x8_inference_flag,
+ V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
+}
+
+static void get_h264_pps_parameters(struct mtk_h264_pps_param *dst_param,
+ const struct v4l2_ctrl_h264_pps *src_param)
+{
+ GET_MTK_VDEC_PARAM(num_ref_idx_l0_default_active_minus1);
+ GET_MTK_VDEC_PARAM(num_ref_idx_l1_default_active_minus1);
+ GET_MTK_VDEC_PARAM(weighted_bipred_idc);
+ GET_MTK_VDEC_PARAM(pic_init_qp_minus26);
+ GET_MTK_VDEC_PARAM(chroma_qp_index_offset);
+ GET_MTK_VDEC_PARAM(second_chroma_qp_index_offset);
+
+ GET_MTK_VDEC_FLAG(entropy_coding_mode_flag,
+ V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
+ GET_MTK_VDEC_FLAG(pic_order_present_flag,
+ V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
+ GET_MTK_VDEC_FLAG(weighted_pred_flag,
+ V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
+ GET_MTK_VDEC_FLAG(deblocking_filter_control_present_flag,
+ V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
+ GET_MTK_VDEC_FLAG(constrained_intra_pred_flag,
+ V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
+ GET_MTK_VDEC_FLAG(redundant_pic_cnt_present_flag,
+ V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
+ GET_MTK_VDEC_FLAG(transform_8x8_mode_flag,
+ V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
+ GET_MTK_VDEC_FLAG(scaling_matrix_present_flag,
+ V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT);
+}
+
+static void
+get_h264_scaling_matrix(struct slice_api_h264_scaling_matrix *dst_matrix,
+ const struct v4l2_ctrl_h264_scaling_matrix *src_matrix)
+{
+ memcpy(dst_matrix->scaling_list_4x4, src_matrix->scaling_list_4x4,
+ sizeof(dst_matrix->scaling_list_4x4));
+
+ memcpy(dst_matrix->scaling_list_8x8, src_matrix->scaling_list_8x8,
+ sizeof(dst_matrix->scaling_list_8x8));
+}
+
+static void
+get_h264_decode_parameters(struct slice_api_h264_decode_param *dst_params,
+ const struct v4l2_ctrl_h264_decode_params *src_params,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dst_params->dpb); i++) {
+ struct slice_h264_dpb_entry *dst_entry = &dst_params->dpb[i];
+ const struct v4l2_h264_dpb_entry *src_entry = &dpb[i];
+
+ dst_entry->reference_ts = src_entry->reference_ts;
+ dst_entry->frame_num = src_entry->frame_num;
+ dst_entry->pic_num = src_entry->pic_num;
+ dst_entry->top_field_order_cnt = src_entry->top_field_order_cnt;
+ dst_entry->bottom_field_order_cnt =
+ src_entry->bottom_field_order_cnt;
+ dst_entry->flags = src_entry->flags;
+ }
+
+ /*
+ * num_slices is a leftover from the old H.264 support and is ignored
+ * by the firmware.
+ */
+ dst_params->num_slices = 0;
+ dst_params->nal_ref_idc = src_params->nal_ref_idc;
+ dst_params->top_field_order_cnt = src_params->top_field_order_cnt;
+ dst_params->bottom_field_order_cnt = src_params->bottom_field_order_cnt;
+ dst_params->flags = src_params->flags;
+}
+
+static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
+ const struct v4l2_h264_dpb_entry *b)
+{
+ return a->top_field_order_cnt == b->top_field_order_cnt &&
+ a->bottom_field_order_cnt == b->bottom_field_order_cnt;
+}
+
+/*
+ * Move DPB entries of dec_param that refer to a frame already existing in dpb
+ * into the already existing slot in dpb, and move other entries into new slots.
+ *
+ * This function is an adaptation of the similarly-named function in
+ * hantro_h264.c.
+ */
+static void update_dpb(const struct v4l2_ctrl_h264_decode_params *dec_param,
+ struct v4l2_h264_dpb_entry *dpb)
+{
+ DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(in_use, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ unsigned int i, j;
+
+ /* Disable all entries by default, and mark the ones in use. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ set_bit(i, in_use);
+ dpb[i].flags &= ~V4L2_H264_DPB_ENTRY_FLAG_ACTIVE;
+ }
+
+ /* Try to match new DPB entries with existing ones by their POCs. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+
+ if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ /*
+ * To cut off some comparisons, iterate only on target DPB
+ * entries were already used.
+ */
+ for_each_set_bit(j, in_use, ARRAY_SIZE(dec_param->dpb)) {
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ cdpb = &dpb[j];
+ if (!dpb_entry_match(cdpb, ndpb))
+ continue;
+
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ /* Don't reiterate on this one. */
+ clear_bit(j, in_use);
+ break;
+ }
+
+ if (j == ARRAY_SIZE(dec_param->dpb))
+ set_bit(i, new);
+ }
+
+ /* For entries that could not be matched, use remaining free slots. */
+ for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ /*
+ * Both arrays are of the same sizes, so there is no way
+ * we can end up with no space in target array, unless
+ * something is buggy.
+ */
+ j = find_first_zero_bit(used, ARRAY_SIZE(dec_param->dpb));
+ if (WARN_ON(j >= ARRAY_SIZE(dec_param->dpb)))
+ return;
+
+ cdpb = &dpb[j];
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ }
+}
+
+/*
+ * The firmware expects unused reflist entries to have the value 0x20.
+ */
+static void fixup_ref_list(u8 *ref_list, size_t num_valid)
+{
+ memset(&ref_list[num_valid], 0x20, 32 - num_valid);
+}
+
+static void get_vdec_decode_parameters(struct vdec_h264_slice_inst *inst)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_params =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ const struct v4l2_ctrl_h264_sps *sps =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SPS);
+ const struct v4l2_ctrl_h264_pps *pps =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_PPS);
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
+ struct mtk_h264_dec_slice_param *slice_param = &inst->h264_slice_param;
+ struct v4l2_h264_reflist_builder reflist_builder;
+ u8 *p0_reflist = slice_param->decode_params.ref_pic_list_p0;
+ u8 *b0_reflist = slice_param->decode_params.ref_pic_list_b0;
+ u8 *b1_reflist = slice_param->decode_params.ref_pic_list_b1;
+
+ update_dpb(dec_params, inst->dpb);
+
+ get_h264_sps_parameters(&slice_param->sps, sps);
+ get_h264_pps_parameters(&slice_param->pps, pps);
+ get_h264_scaling_matrix(&slice_param->scaling_matrix, scaling_matrix);
+ get_h264_decode_parameters(&slice_param->decode_params, dec_params,
+ inst->dpb);
+ get_h264_dpb_list(inst, slice_param);
+
+ /* Build the reference lists */
+ v4l2_h264_init_reflist_builder(&reflist_builder, dec_params, sps,
+ inst->dpb);
+ v4l2_h264_build_p_ref_list(&reflist_builder, p0_reflist);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, b0_reflist, b1_reflist);
+ /* Adapt the built lists to the firmware's expectations */
+ fixup_ref_list(p0_reflist, reflist_builder.num_valid);
+ fixup_ref_list(b0_reflist, reflist_builder.num_valid);
+ fixup_ref_list(b1_reflist, reflist_builder.num_valid);
+
+ memcpy(&inst->vsi_ctx.h264_slice_params, slice_param,
+ sizeof(inst->vsi_ctx.h264_slice_params));
+}
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ int unit_size = (width / MB_UNIT_LEN) * (height / MB_UNIT_LEN) + 8;
+
+ return HW_MB_STORE_SZ * unit_size;
+}
+
+static int allocate_predication_buf(struct vdec_h264_slice_inst *inst)
+{
+ int err;
+
+ inst->pred_buf.size = BUF_PREDICTION_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ return err;
+ }
+
+ inst->vsi_ctx.pred_buf_dma = inst->pred_buf.dma_addr;
+ return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_slice_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = &inst->pred_buf;
+
+ mtk_vcodec_debug_enter(inst);
+
+ inst->vsi_ctx.pred_buf_dma = 0;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ int i;
+ int err;
+ struct mtk_vcodec_mem *mem = NULL;
+ unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+ mtk_v4l2_debug(3, "size = 0x%lx", buf_sz);
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ inst->vsi_ctx.mv_buf_dma[i] = mem->dma_addr;
+ }
+
+ return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_slice_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ inst->vsi_ctx.mv_buf_dma[i] = 0;
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static void get_pic_info(struct vdec_h264_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ ctx->picinfo.buf_w = (ctx->picinfo.pic_w + 15) & 0xFFFFFFF0;
+ ctx->picinfo.buf_h = (ctx->picinfo.pic_h + 31) & 0xFFFFFFE0;
+ ctx->picinfo.fb_sz[0] = ctx->picinfo.buf_w * ctx->picinfo.buf_h;
+ ctx->picinfo.fb_sz[1] = ctx->picinfo.fb_sz[0] >> 1;
+ inst->vsi_ctx.dec.cap_num_planes =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
+
+ *pic = ctx->picinfo;
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
+
+ if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
+ inst->vsi_ctx.dec.resolution_changed = true;
+ if (ctx->last_decoded_picinfo.buf_w != ctx->picinfo.buf_w ||
+ ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
+ inst->vsi_ctx.dec.realloc_mv_buf = true;
+
+ mtk_v4l2_debug(1, "ResChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->vsi_ctx.dec.resolution_changed,
+ inst->vsi_ctx.dec.realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ }
+}
+
+static void get_crop_info(struct vdec_h264_slice_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi_ctx.crop.left;
+ cr->top = inst->vsi_ctx.crop.top;
+ cr->width = inst->vsi_ctx.crop.width;
+ cr->height = inst->vsi_ctx.crop.height;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_slice_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi_ctx.dec.dpb_sz;
+ mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_h264_slice_inst *inst;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = SCP_IPI_VDEC_H264;
+ inst->vpu.ctx = ctx;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
+ inst->vsi_ctx.dec.resolution_changed = true;
+ inst->vsi_ctx.dec.realloc_mv_buf = true;
+
+ err = allocate_predication_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ mtk_vcodec_debug(inst, "struct size = %d,%d,%d,%d\n",
+ sizeof(struct mtk_h264_sps_param),
+ sizeof(struct mtk_h264_pps_param),
+ sizeof(struct mtk_h264_dec_slice_param),
+ sizeof(struct mtk_h264_dpb_info));
+
+ mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+ ctx->drv_handle = inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_slice_deinit(void *h_vdec)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_predication_buf(inst);
+ free_mv_buf(inst);
+
+ kfree(inst);
+}
+
+static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ const struct v4l2_ctrl_h264_decode_params *dec_params =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ u32 data[2];
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ int err;
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vpu_dec_reset(vpu);
+
+ y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+ inst->vsi_ctx.dec.bs_dma = (uint64_t)bs->dma_addr;
+ inst->vsi_ctx.dec.y_fb_dma = y_fb_dma;
+ inst->vsi_ctx.dec.c_fb_dma = c_fb_dma;
+ inst->vsi_ctx.dec.vdec_fb_va = (u64)(uintptr_t)fb;
+
+ get_vdec_decode_parameters(inst);
+ data[0] = bs->size;
+ /*
+ * Reconstruct the first byte of the NAL unit, as the firmware requests
+ * that information to be passed even though it is present in the stream
+ * itself...
+ */
+ data[1] = (dec_params->nal_ref_idc << 5) |
+ ((dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
+ ? 0x5 : 0x1);
+
+ *res_chg = inst->vsi_ctx.dec.resolution_changed;
+ if (*res_chg) {
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ if (inst->vsi_ctx.dec.realloc_mv_buf) {
+ err = alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->vsi_ctx.dec.realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ *res_chg = false;
+ }
+
+ memcpy(inst->vpu.vsi, &inst->vsi_ctx, sizeof(inst->vsi_ctx));
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+ if (err)
+ goto err_free_fb_out;
+ vpu_dec_end(vpu);
+
+ memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
+ mtk_vcodec_debug(inst, "\n - NALU[%d]", inst->num_nalu);
+ return 0;
+
+err_free_fb_out:
+ mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ return err;
+}
+
+static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type, void *out)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ switch (type) {
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct vdec_common_if vdec_h264_slice_if = {
+ .init = vdec_h264_slice_init,
+ .decode = vdec_h264_slice_decode,
+ .get_param = vdec_h264_slice_get_param,
+ .deinit = vdec_h264_slice_deinit,
+};
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
index b18743b906ea..42008243ceac 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -19,6 +19,9 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
int ret = 0;
switch (fourcc) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ ctx->dec_if = &vdec_h264_slice_if;
+ break;
case V4L2_PIX_FMT_H264:
ctx->dec_if = &vdec_h264_if;
break;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
index ec8f4e8d3d23..d467e8af4a84 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -55,6 +55,7 @@ struct vdec_fb_node {
};
extern const struct vdec_common_if vdec_h264_if;
+extern const struct vdec_common_if vdec_h264_slice_if;
extern const struct vdec_common_if vdec_vp8_if;
extern const struct vdec_common_if vdec_vp9_if;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
index 68e8d5cb16d7..5f45a537beb4 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -29,11 +29,15 @@ enum vdec_ipi_msgid {
/**
* struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
* @msg_id : vdec_ipi_msgid
- * @vpu_inst_addr : VPU decoder instance address
+ * @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
+ * @inst_id : instance ID. Used if the ABI version >= 2.
*/
struct vdec_ap_ipi_cmd {
uint32_t msg_id;
- uint32_t vpu_inst_addr;
+ union {
+ uint32_t vpu_inst_addr;
+ uint32_t inst_id;
+ };
};
/**
@@ -63,7 +67,8 @@ struct vdec_ap_ipi_init {
/**
* struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
* @msg_id : AP_IPIMSG_DEC_START
- * @vpu_inst_addr : VPU decoder instance address
+ * @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
+ * @inst_id : instance ID. Used if the ABI version >= 2.
* @data : Header info
* H264 decoder [0]:buf_sz [1]:nal_start
* VP8 decoder [0]:width/height
@@ -72,7 +77,10 @@ struct vdec_ap_ipi_init {
*/
struct vdec_ap_ipi_dec_start {
uint32_t msg_id;
- uint32_t vpu_inst_addr;
+ union {
+ uint32_t vpu_inst_addr;
+ uint32_t inst_id;
+ };
uint32_t data[3];
uint32_t reserved;
};
@@ -83,12 +91,19 @@ struct vdec_ap_ipi_dec_start {
* @status : VPU exeuction result
* @ap_inst_addr : AP vcodec_vpu_inst instance address
* @vpu_inst_addr : VPU decoder instance address
+ * @vdec_abi_version: ABI version of the firmware. Kernel can use it to
+ * ensure that it is compatible with the firmware.
+ * This field is not valid for MT8173 and must not be
+ * accessed for this chip.
+ * @inst_id : instance ID. Valid only if the ABI version >= 2.
*/
struct vdec_vpu_ipi_init_ack {
uint32_t msg_id;
int32_t status;
uint64_t ap_inst_addr;
uint32_t vpu_inst_addr;
+ uint32_t vdec_abi_version;
+ uint32_t inst_id;
};
#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 58b0e6fa8fd2..5dffc459a33d 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -24,6 +24,34 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
vpu->inst_addr = msg->vpu_inst_addr;
mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+
+ /* Set default ABI version if dealing with unversioned firmware. */
+ vpu->fw_abi_version = 0;
+ /*
+ * Instance ID is only used if ABI version >= 2. Initialize it with
+ * garbage by default.
+ */
+ vpu->inst_id = 0xdeadbeef;
+
+ /* Firmware version field does not exist on MT8173. */
+ if (vpu->ctx->dev->vdec_pdata->chip == MTK_MT8173)
+ return;
+
+ /* Check firmware version. */
+ vpu->fw_abi_version = msg->vdec_abi_version;
+ mtk_vcodec_debug(vpu, "firmware version 0x%x\n", vpu->fw_abi_version);
+ switch (vpu->fw_abi_version) {
+ case 1:
+ break;
+ case 2:
+ vpu->inst_id = msg->inst_id;
+ break;
+ default:
+ mtk_vcodec_err(vpu, "unhandled firmware version 0x%x\n",
+ vpu->fw_abi_version);
+ vpu->failure = 1;
+ break;
+ }
}
/*
@@ -44,6 +72,9 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+ vpu->failure = msg->status;
+ vpu->signaled = 1;
+
if (msg->status == 0) {
switch (msg->msg_id) {
case VPU_IPIMSG_DEC_INIT_ACK:
@@ -63,8 +94,6 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
}
mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
- vpu->failure = msg->status;
- vpu->signaled = 1;
}
static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
@@ -96,7 +125,10 @@ static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
memset(&msg, 0, sizeof(msg));
msg.msg_id = msg_id;
- msg.vpu_inst_addr = vpu->inst_addr;
+ if (vpu->fw_abi_version < 2)
+ msg.vpu_inst_addr = vpu->inst_addr;
+ else
+ msg.inst_id = vpu->inst_id;
err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
@@ -146,7 +178,10 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_DEC_START;
- msg.vpu_inst_addr = vpu->inst_addr;
+ if (vpu->fw_abi_version < 2)
+ msg.vpu_inst_addr = vpu->inst_addr;
+ else
+ msg.inst_id = vpu->inst_id;
for (i = 0; i < len; i++)
msg.data[i] = data[i];
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index 85224eb7e34b..c2ed5b6cab8b 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -18,6 +18,9 @@ struct mtk_vcodec_ctx;
* for control and info share
* @failure : VPU execution result status, 0: success, others: fail
* @inst_addr : VPU decoder instance address
+ * @fw_abi_version : ABI version of the firmware.
+ * @inst_id : if fw_abi_version >= 2, contains the instance ID to be given
+ * in place of inst_addr in messages.
* @signaled : 1 - Host has received ack message from VPU, 0 - not received
* @ctx : context for v4l2 layer integration
* @dev : platform device of VPU
@@ -29,6 +32,8 @@ struct vdec_vpu_inst {
void *vsi;
int32_t failure;
uint32_t inst_addr;
+ uint32_t fw_abi_version;
+ uint32_t inst_id;
unsigned int signaled;
struct mtk_vcodec_ctx *ctx;
wait_queue_head_t wq;
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index ec290dde59cf..7f1647da0ade 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -848,7 +848,8 @@ static int mtk_vpu_probe(struct platform_device *pdev)
vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
if (!vpu->wdt.wq) {
dev_err(dev, "initialize wdt workqueue failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto clk_unprepare;
}
INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
mutex_init(&vpu->vpu_mutex);
@@ -942,6 +943,8 @@ disable_vpu_clk:
vpu_clock_disable(vpu);
workqueue_destroy:
destroy_workqueue(vpu->wdt.wq);
+clk_unprepare:
+ clk_unprepare(vpu->clk);
return ret;
}
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 08a5473b5610..3ce84d0f078c 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -804,7 +804,6 @@ static int emmaprp_probe(struct platform_device *pdev)
{
struct emmaprp_dev *pcdev;
struct video_device *vfd;
- struct resource *res;
int irq, ret;
pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
@@ -822,8 +821,7 @@ static int emmaprp_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk_emma_ahb))
return PTR_ERR(pcdev->clk_emma_ahb);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res);
+ pcdev->base_emma = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcdev->base_emma))
return PTR_ERR(pcdev->base_emma);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 21193f0b7f61..3e0d9af7ffec 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -277,7 +277,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
*/
static int omapvid_setup_overlay(struct omap_vout_device *vout,
struct omap_overlay *ovl, int posx, int posy, int outw,
- int outh, u32 addr)
+ int outh, dma_addr_t addr)
{
int ret = 0;
struct omap_overlay_info info;
@@ -352,7 +352,7 @@ setup_ovl_err:
/*
* Initialize the overlay structure
*/
-static int omapvid_init(struct omap_vout_device *vout, u32 addr)
+static int omapvid_init(struct omap_vout_device *vout, dma_addr_t addr)
{
int ret = 0, i;
struct v4l2_window *win;
@@ -479,7 +479,8 @@ err:
static void omap_vout_isr(void *arg, unsigned int irqstatus)
{
int ret, fid, mgr_id;
- u32 addr, irq;
+ dma_addr_t addr;
+ u32 irq;
struct omap_overlay *ovl;
u64 ts;
struct omapvideo_info *ovid;
@@ -543,7 +544,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
struct omap_vout_buffer, queue);
list_del(&vout->next_frm->queue);
- addr = (unsigned long)vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ addr = vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
/* First save the configuration in ovelray structure */
@@ -976,7 +977,7 @@ static int omap_vout_vb2_prepare(struct vb2_buffer *vb)
vb2_set_plane_payload(vb, 0, vout->pix.sizeimage);
voutbuf->vbuf.field = V4L2_FIELD_NONE;
- vout->queued_buf_addr[vb->index] = (u8 *)buf_phy_addr;
+ vout->queued_buf_addr[vb->index] = buf_phy_addr;
if (ovid->rotation_type == VOUT_ROT_VRFB)
return omap_vout_prepare_vrfb(vout, vb);
return 0;
@@ -995,7 +996,8 @@ static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int coun
struct omap_vout_device *vout = vb2_get_drv_priv(vq);
struct omapvideo_info *ovid = &vout->vid_info;
struct omap_vout_buffer *buf, *tmp;
- u32 addr = 0, mask = 0;
+ dma_addr_t addr = 0;
+ u32 mask = 0;
int ret, j;
/* Get the next frame from the buffer queue */
@@ -1018,7 +1020,7 @@ static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int coun
goto out;
}
- addr = (unsigned long)vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ addr = vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
@@ -1476,7 +1478,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
* To be precise: fbuf.base should match smem_start of
* struct fb_fix_screeninfo.
*/
- vout->fbuf.base = (void *)info.paddr;
+ vout->fbuf.base = (void *)(uintptr_t)info.paddr;
/* Set VRFB as rotation_type for omap2 and omap3 */
if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 6bd672cbdb62..0cfa0169875f 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -305,7 +305,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
rotation = calc_rotation(vout);
- vout->queued_buf_addr[vb->index] = (u8 *)
+ vout->queued_buf_addr[vb->index] =
vout->vrfb_context[vb->index].paddr[rotation];
return 0;
}
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 1cff6dea1879..b586193341d2 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -170,7 +170,7 @@ struct omap_vout_device {
struct omap_vout_buffer *cur_frm, *next_frm;
spinlock_t vbq_lock; /* spinlock for dma_queue */
struct list_head dma_queue;
- u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ dma_addr_t queued_buf_addr[VIDEO_MAX_FRAME];
u32 cropped_offset;
s32 tv_field1_offset;
void *isr_handle;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 20f59c59ff8a..6de377ce281d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2003,7 +2003,7 @@ static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&isp->notifier);
+ v4l2_async_nf_unregister(&isp->notifier);
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
isp_xclk_cleanup(isp);
@@ -2013,7 +2013,7 @@ static int isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
- v4l2_async_notifier_cleanup(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
kfree(isp);
@@ -2172,8 +2172,9 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (!ret) {
- isd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, struct isp_async_subdev);
+ isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
+ ep, struct
+ isp_async_subdev);
if (!IS_ERR(isd))
isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
}
@@ -2211,8 +2212,10 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
}
if (!ret) {
- isd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, struct isp_async_subdev);
+ isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
+ ep,
+ struct
+ isp_async_subdev);
if (!IS_ERR(isd)) {
switch (vep.bus_type) {
@@ -2289,7 +2292,7 @@ static int isp_probe(struct platform_device *pdev)
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
- v4l2_async_notifier_init(&isp->notifier);
+ v4l2_async_nf_init(&isp->notifier);
isp->dev = &pdev->dev;
ret = isp_parse_of_endpoints(isp);
@@ -2418,7 +2421,7 @@ static int isp_probe(struct platform_device *pdev)
isp->notifier.ops = &isp_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
+ ret = v4l2_async_nf_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
goto error_register_entities;
@@ -2437,7 +2440,7 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
- v4l2_async_notifier_cleanup(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
error_release_isp:
kfree(isp);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index ec4c010644ca..3ba00b0f9320 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2249,10 +2249,9 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &pcdev->notifier,
- of_fwnode_handle(np),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&pcdev->notifier,
+ of_fwnode_handle(np),
+ struct v4l2_async_subdev);
if (IS_ERR(asd))
err = PTR_ERR(asd);
out:
@@ -2289,7 +2288,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk))
return PTR_ERR(pcdev->clk);
- v4l2_async_notifier_init(&pcdev->notifier);
+ v4l2_async_nf_init(&pcdev->notifier);
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
if (pcdev->pdata) {
@@ -2297,11 +2296,10 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
- asd = v4l2_async_notifier_add_i2c_subdev(
- &pcdev->notifier,
- pcdev->pdata->sensor_i2c_adapter_id,
- pcdev->pdata->sensor_i2c_address,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_i2c(&pcdev->notifier,
+ pcdev->pdata->sensor_i2c_adapter_id,
+ pcdev->pdata->sensor_i2c_address,
+ struct v4l2_async_subdev);
if (IS_ERR(asd))
err = PTR_ERR(asd);
} else if (pdev->dev.of_node) {
@@ -2402,13 +2400,13 @@ static int pxa_camera_probe(struct platform_device *pdev)
goto exit_notifier_cleanup;
pcdev->notifier.ops = &pxa_camera_sensor_ops;
- err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
+ err = v4l2_async_nf_register(&pcdev->v4l2_dev, &pcdev->notifier);
if (err)
goto exit_notifier_cleanup;
return 0;
exit_notifier_cleanup:
- v4l2_async_notifier_cleanup(&pcdev->notifier);
+ v4l2_async_nf_cleanup(&pcdev->notifier);
v4l2_device_unregister(&pcdev->v4l2_dev);
exit_deactivate:
pxa_camera_deactivate(pcdev);
@@ -2432,8 +2430,8 @@ static int pxa_camera_remove(struct platform_device *pdev)
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
- v4l2_async_notifier_unregister(&pcdev->notifier);
- v4l2_async_notifier_cleanup(&pcdev->notifier);
+ v4l2_async_nf_unregister(&pcdev->notifier);
+ v4l2_async_nf_cleanup(&pcdev->notifier);
v4l2_device_unregister(&pcdev->v4l2_dev);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index 8594d275b41d..5c083d70d495 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -177,7 +177,7 @@
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
@@ -185,7 +185,10 @@ static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
u32 rev = (hw_version >> 16) & 0xFFF;
u32 step = hw_version & 0xFFFF;
- dev_err(dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
+ gen, rev, step);
+
+ return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
@@ -771,7 +774,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_170 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 53c56a8d4545..42047b11ba52 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -210,11 +210,13 @@
#define MSM_VFE_VFE0_UB_SIZE 1023
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_dbg(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
@@ -288,22 +290,14 @@ static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12 ||
+ pix->pixelformat == V4L2_PIX_FMT_NV21)
if (plane == 1)
*height /= 2;
- break;
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- break;
- }
}
static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
@@ -1004,7 +998,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_1 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index a59635217758..ab2d57bdf5e7 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -254,11 +254,13 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
@@ -368,30 +370,26 @@ static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
+ *width = pix->width;
+ *height = pix->height;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
-
}
}
@@ -1196,7 +1194,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_7 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 998429dbb65c..7e6b62c930ac 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -247,11 +247,13 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
@@ -341,27 +343,24 @@ static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
+ *width = pix->width;
+ *height = pix->height;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
}
@@ -1180,7 +1179,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_8 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index e0f3a36f3f3f..71f78b40e7f5 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -604,6 +604,8 @@ static int vfe_get(struct vfe_device *vfe)
vfe_reset_output_maps(vfe);
vfe_init_outputs(vfe);
+
+ vfe->ops->hw_version(vfe);
} else {
ret = vfe_check_clock_rates(vfe);
if (ret < 0)
@@ -713,8 +715,6 @@ static int vfe_set_power(struct v4l2_subdev *sd, int on)
ret = vfe_get(vfe);
if (ret < 0)
return ret;
-
- vfe->ops->hw_version_read(vfe, vfe->camss->dev);
} else {
vfe_put(vfe);
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 844b9275031d..f166d176cb77 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -103,7 +103,7 @@ struct vfe_device;
struct vfe_hw_ops {
void (*enable_irq_common)(struct vfe_device *vfe);
void (*global_reset)(struct vfe_device *vfe);
- void (*hw_version_read)(struct vfe_device *vfe, struct device *dev);
+ u32 (*hw_version)(struct vfe_device *vfe);
irqreturn_t (*isr)(int irq, void *dev);
void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1);
void (*pm_domain_off)(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index ef100d5f7763..be091c50a3c0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -886,9 +886,9 @@ static int camss_of_parse_ports(struct camss *camss)
goto err_cleanup;
}
- csd = v4l2_async_notifier_add_fwnode_subdev(
- &camss->notifier, of_fwnode_handle(remote),
- struct camss_async_subdev);
+ csd = v4l2_async_nf_add_fwnode(&camss->notifier,
+ of_fwnode_handle(remote),
+ struct camss_async_subdev);
of_node_put(remote);
if (IS_ERR(csd)) {
ret = PTR_ERR(csd);
@@ -1361,7 +1361,7 @@ static int camss_probe(struct platform_device *pdev)
goto err_free;
}
- v4l2_async_notifier_init(&camss->notifier);
+ v4l2_async_nf_init(&camss->notifier);
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
@@ -1397,8 +1397,8 @@ static int camss_probe(struct platform_device *pdev)
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&camss->v4l2_dev,
- &camss->notifier);
+ ret = v4l2_async_nf_register(&camss->v4l2_dev,
+ &camss->notifier);
if (ret) {
dev_err(dev,
"Failed to register async subdev nodes: %d\n",
@@ -1436,7 +1436,7 @@ err_register_subdevs:
err_register_entities:
v4l2_device_unregister(&camss->v4l2_dev);
err_cleanup:
- v4l2_async_notifier_cleanup(&camss->notifier);
+ v4l2_async_nf_cleanup(&camss->notifier);
err_free:
kfree(camss);
@@ -1478,8 +1478,8 @@ static int camss_remove(struct platform_device *pdev)
{
struct camss *camss = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&camss->notifier);
- v4l2_async_notifier_cleanup(&camss->notifier);
+ v4l2_async_nf_unregister(&camss->notifier);
+ v4l2_async_nf_cleanup(&camss->notifier);
camss_unregister_entities(camss);
if (atomic_read(&camss->ref_count) == 0)
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 91b15842c555..f5fa81896012 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -65,7 +65,7 @@ static void venus_event_notify(struct venus_core *core, u32 event)
}
mutex_lock(&core->lock);
- core->sys_error = true;
+ set_bit(0, &core->sys_error);
list_for_each_entry(inst, &core->instances, list)
inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
mutex_unlock(&core->lock);
@@ -95,9 +95,8 @@ static void venus_sys_error_handler(struct work_struct *work)
failed = true;
}
- hfi_core_deinit(core, true);
-
- mutex_lock(&core->lock);
+ core->ops->core_deinit(core);
+ core->state = CORE_UNINIT;
for (i = 0; i < max_attempts; i++) {
if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
@@ -105,6 +104,8 @@ static void venus_sys_error_handler(struct work_struct *work)
msleep(10);
}
+ mutex_lock(&core->lock);
+
venus_shutdown(core);
venus_coredump(core);
@@ -161,7 +162,8 @@ static void venus_sys_error_handler(struct work_struct *work)
dev_warn(core->dev, "system error has occurred (recovered)\n");
mutex_lock(&core->lock);
- core->sys_error = false;
+ clear_bit(0, &core->sys_error);
+ wake_up_all(&core->sys_err_done);
mutex_unlock(&core->lock);
}
@@ -267,7 +269,6 @@ static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct venus_core *core;
- struct resource *r;
int ret;
core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
@@ -276,8 +277,7 @@ static int venus_probe(struct platform_device *pdev)
core->dev = dev;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- core->base = devm_ioremap_resource(dev, r);
+ core->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(core->base))
return PTR_ERR(core->base);
@@ -318,6 +318,7 @@ static int venus_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
+ init_waitqueue_head(&core->sys_err_done);
ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -567,6 +568,69 @@ static const struct venus_resources msm8996_res = {
.fwname = "qcom/venus-4.2/venus.mdt",
};
+static const struct freq_tbl sdm660_freq_table[] = {
+ { 979200, 518400000 },
+ { 489600, 441600000 },
+ { 432000, 404000000 },
+ { 244800, 320000000 },
+ { 216000, 269330000 },
+ { 108000, 133330000 },
+};
+
+static const struct reg_val sdm660_reg_preset[] = {
+ { 0x80010, 0x001f001f },
+ { 0x80018, 0x00000156 },
+ { 0x8001c, 0x00000156 },
+};
+
+static const struct bw_tbl sdm660_bw_table_enc[] = {
+ { 979200, 1044000, 0, 2446336, 0 }, /* 4k UHD @ 30 */
+ { 864000, 887000, 0, 2108416, 0 }, /* 720p @ 240 */
+ { 489600, 666000, 0, 1207296, 0 }, /* 1080p @ 60 */
+ { 432000, 578000, 0, 1058816, 0 }, /* 720p @ 120 */
+ { 244800, 346000, 0, 616448, 0 }, /* 1080p @ 30 */
+ { 216000, 293000, 0, 534528, 0 }, /* 720p @ 60 */
+ { 108000, 151000, 0, 271360, 0 }, /* 720p @ 30 */
+};
+
+static const struct bw_tbl sdm660_bw_table_dec[] = {
+ { 979200, 2365000, 0, 1892000, 0 }, /* 4k UHD @ 30 */
+ { 864000, 1978000, 0, 1554000, 0 }, /* 720p @ 240 */
+ { 489600, 1133000, 0, 895000, 0 }, /* 1080p @ 60 */
+ { 432000, 994000, 0, 781000, 0 }, /* 720p @ 120 */
+ { 244800, 580000, 0, 460000, 0 }, /* 1080p @ 30 */
+ { 216000, 501000, 0, 301000, 0 }, /* 720p @ 60 */
+ { 108000, 255000, 0, 202000, 0 }, /* 720p @ 30 */
+};
+
+static const struct venus_resources sdm660_res = {
+ .freq_tbl = sdm660_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm660_freq_table),
+ .reg_tbl = sdm660_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(sdm660_reg_preset),
+ .bw_tbl_enc = sdm660_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm660_bw_table_enc),
+ .bw_tbl_dec = sdm660_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm660_bw_table_dec),
+ .clks = {"core", "iface", "bus", "bus_throttle" },
+ .clks_num = 4,
+ .vcodec0_clks = { "vcodec0_core" },
+ .vcodec1_clks = { "vcodec0_core" },
+ .vcodec_clks_num = 1,
+ .vcodec_num = 1,
+ .max_load = 1036800,
+ .hfi_version = HFI_VERSION_3XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .cp_start = 0,
+ .cp_size = 0x79000000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x28000000,
+ .dma_mask = 0xd9000000 - 1,
+ .fwname = "qcom/venus-4.4/venus.mdt",
+};
+
static const struct freq_tbl sdm845_freq_table[] = {
{ 3110400, 533000000 }, /* 4096x2160@90 */
{ 2073600, 444000000 }, /* 4096x2160@60 */
@@ -729,6 +793,7 @@ static const struct venus_resources sm8250_res = {
.vcodec_num = 1,
.max_load = 7833600,
.hfi_version = HFI_VERSION_6XX,
+ .num_vpp_pipes = 4,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
@@ -736,12 +801,66 @@ static const struct venus_resources sm8250_res = {
.fwname = "qcom/vpu-1.0/venus.mdt",
};
+static const struct freq_tbl sc7280_freq_table[] = {
+ { 0, 460000000 },
+ { 0, 424000000 },
+ { 0, 335000000 },
+ { 0, 240000000 },
+ { 0, 133333333 },
+};
+
+static const struct bw_tbl sc7280_bw_table_enc[] = {
+ { 1944000, 1896000, 0, 3657000, 0 }, /* 3840x2160@60 */
+ { 972000, 968000, 0, 1848000, 0 }, /* 3840x2160@30 */
+ { 489600, 618000, 0, 941000, 0 }, /* 1920x1080@60 */
+ { 244800, 318000, 0, 480000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sc7280_bw_table_dec[] = {
+ { 2073600, 2128000, 0, 3831000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1085000, 0, 1937000, 0 }, /* 4096x2160@30 */
+ { 489600, 779000, 0, 998000, 0 }, /* 1920x1080@60 */
+ { 244800, 400000, 0, 509000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct reg_val sm7280_reg_preset[] = {
+ { 0xb0088, 0 },
+};
+
+static const struct venus_resources sc7280_res = {
+ .freq_tbl = sc7280_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sc7280_freq_table),
+ .reg_tbl = sm7280_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(sm7280_reg_preset),
+ .bw_tbl_enc = sc7280_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sc7280_bw_table_enc),
+ .bw_tbl_dec = sc7280_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sc7280_bw_table_dec),
+ .clks = {"core", "bus", "iface"},
+ .clks_num = 3,
+ .vcodec0_clks = {"vcodec_core", "vcodec_bus"},
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains_num = 2,
+ .opp_pmdomain = (const char *[]) { "cx", NULL },
+ .vcodec_num = 1,
+ .hfi_version = HFI_VERSION_6XX,
+ .num_vpp_pipes = 1,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu-2.0/venus.mbn",
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { .compatible = "qcom,sdm660-venus", .data = &sdm660_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
{ .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
+ { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sm8250-venus", .data = &sm8250_res, },
{ }
};
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 5ec851115eca..7c3bac01cd49 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -7,6 +7,7 @@
#ifndef __VENUS_CORE_H_
#define __VENUS_CORE_H_
+#include <linux/bitops.h>
#include <linux/list.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
@@ -68,6 +69,7 @@ struct venus_resources {
const char * const resets[VIDC_RESETS_NUM_MAX];
unsigned int resets_num;
enum hfi_version hfi_version;
+ u8 num_vpp_pipes;
u32 max_load;
unsigned int vmem_id;
u32 vmem_size;
@@ -181,7 +183,8 @@ struct venus_core {
unsigned int state;
struct completion done;
unsigned int error;
- bool sys_error;
+ unsigned long sys_error;
+ wait_queue_head_t sys_err_done;
const struct hfi_core_ops *core_ops;
const struct venus_pm_ops *pm_ops;
struct mutex pm_lock;
@@ -334,6 +337,7 @@ enum venus_inst_modes {
* @registeredbufs: a list of registered capture bufferes
* @delayed_process: a list of delayed buffers
* @delayed_process_work: a work_struct for process delayed buffers
+ * @nonblock: nonblocking flag
* @ctrl_handler: v4l control handler
* @controls: a union of decoder and encoder control parameters
* @fh: a holder of v4l file handle structure
@@ -397,6 +401,7 @@ struct venus_inst {
struct list_head registeredbufs;
struct list_head delayed_process;
struct work_struct delayed_process_work;
+ bool nonblock;
struct v4l2_ctrl_handler ctrl_handler;
union {
@@ -408,6 +413,7 @@ struct venus_inst {
u32 width;
u32 height;
struct v4l2_rect crop;
+ u32 fw_min_cnt;
u32 out_width;
u32 out_height;
u32 colorspace;
@@ -452,6 +458,7 @@ struct venus_inst {
bool next_buf_last;
bool drain_active;
enum venus_inst_modes flags;
+ struct ida dpb_ids;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 227bd3b3f84c..14b6f1d05991 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -27,7 +27,12 @@
static void venus_reset_cpu(struct venus_core *core)
{
u32 fw_size = core->fw.mapped_mem_size;
- void __iomem *wrapper_base = core->wrapper_base;
+ void __iomem *wrapper_base;
+
+ if (IS_V6(core))
+ wrapper_base = core->wrapper_tz_base;
+ else
+ wrapper_base = core->wrapper_base;
writel(0, wrapper_base + WRAPPER_FW_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
@@ -35,11 +40,17 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
- writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
- writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
- /* Bring ARM9 out of reset */
- writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ if (IS_V6(core)) {
+ /* Bring XTSS out of reset */
+ writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
+ } else {
+ writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
+ writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
+
+ /* Bring ARM9 out of reset */
+ writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ }
}
int venus_set_hw_state(struct venus_core *core, bool resume)
@@ -56,7 +67,9 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume) {
venus_reset_cpu(core);
} else {
- if (!IS_V6(core))
+ if (IS_V6(core))
+ writel(1, core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ else
writel(1, core->wrapper_base + WRAPPER_A9SS_SW_RESET);
}
@@ -162,12 +175,19 @@ static int venus_shutdown_no_tz(struct venus_core *core)
u32 reg;
struct device *dev = core->fw.dev;
void __iomem *wrapper_base = core->wrapper_base;
+ void __iomem *wrapper_tz_base = core->wrapper_tz_base;
- /* Assert the reset to ARM9 */
- reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
- reg |= WRAPPER_A9SS_SW_RESET_BIT;
- writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
-
+ if (IS_V6(core)) {
+ /* Assert the reset to XTSS */
+ reg = readl_relaxed(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ reg |= WRAPPER_XTSS_SW_RESET_BIT;
+ writel_relaxed(reg, wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ } else {
+ /* Assert the reset to ARM9 */
+ reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
+ reg |= WRAPPER_A9SS_SW_RESET_BIT;
+ writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ }
/* Make sure reset is asserted before the mapping is removed */
mb();
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 8012f5c7bf34..84c3a511ec31 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
+#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -18,8 +19,13 @@
#include "hfi_platform.h"
#include "hfi_parser.h"
-#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
-#define NUM_MBS_4K (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
+#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4))
+#define NUM_MBS_4K (((ALIGN(4096, 16)) >> 4) * ((ALIGN(2304, 16)) >> 4))
+
+enum dpb_buf_owner {
+ DRIVER,
+ FIRMWARE,
+};
struct intbuf {
struct list_head list;
@@ -28,6 +34,8 @@ struct intbuf {
void *va;
dma_addr_t da;
unsigned long attrs;
+ enum dpb_buf_owner owned_by;
+ u32 dpb_out_tag;
};
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
@@ -95,9 +103,16 @@ int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
fdata.device_addr = buf->da;
fdata.buffer_type = buf->type;
+ if (buf->owned_by == FIRMWARE)
+ continue;
+
+ fdata.clnt_data = buf->dpb_out_tag;
+
ret = hfi_session_process_buf(inst, &fdata);
if (ret)
goto fail;
+
+ buf->owned_by = FIRMWARE;
}
fail:
@@ -110,13 +125,19 @@ int venus_helper_free_dpb_bufs(struct venus_inst *inst)
struct intbuf *buf, *n;
list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
+ if (buf->owned_by == FIRMWARE)
+ continue;
+
+ ida_free(&inst->dpb_ids, buf->dpb_out_tag);
+
list_del_init(&buf->list);
dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
buf->attrs);
kfree(buf);
}
- INIT_LIST_HEAD(&inst->dpbbufs);
+ if (list_empty(&inst->dpbbufs))
+ INIT_LIST_HEAD(&inst->dpbbufs);
return 0;
}
@@ -134,6 +155,7 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
unsigned int i;
u32 count;
int ret;
+ int id;
/* no need to allocate dpb buffers */
if (!inst->dpb_fmt)
@@ -171,6 +193,15 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
ret = -ENOMEM;
goto fail;
}
+ buf->owned_by = DRIVER;
+
+ id = ida_alloc_min(&inst->dpb_ids, VB2_MAX_FRAME, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto fail;
+ }
+
+ buf->dpb_out_tag = id;
list_add_tail(&buf->list, &inst->dpbbufs);
}
@@ -583,7 +614,7 @@ static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
return -EINVAL;
params.version = version;
- params.num_vpp_pipes = hfi_platform_num_vpp_pipes(version);
+ params.num_vpp_pipes = inst->core->res->num_vpp_pipes;
if (is_dec) {
params.width = inst->width;
@@ -623,9 +654,15 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
if (req)
memset(req, 0, sizeof(*req));
+ if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
+ req->count_min = inst->fw_min_cnt;
+
ret = platform_get_bufreq(inst, type, req);
- if (!ret)
+ if (!ret) {
+ if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
+ inst->fw_min_cnt = req->count_min;
return 0;
+ }
ret = hfi_session_get_property(inst, ptype, &hprop);
if (ret)
@@ -1365,6 +1402,24 @@ venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
}
EXPORT_SYMBOL_GPL(venus_helper_find_buf);
+void venus_helper_change_dpb_owner(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf, unsigned int type,
+ unsigned int buf_type, u32 tag)
+{
+ struct intbuf *dpb_buf;
+
+ if (!V4L2_TYPE_IS_CAPTURE(type) ||
+ buf_type != inst->dpb_buftype)
+ return;
+
+ list_for_each_entry(dpb_buf, &inst->dpbbufs, list)
+ if (dpb_buf->dpb_out_tag == tag) {
+ dpb_buf->owned_by = DRIVER;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_change_dpb_owner);
+
int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
@@ -1480,7 +1535,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
ret |= venus_helper_intbufs_free(inst);
ret |= hfi_session_deinit(inst);
- if (inst->session_error || core->sys_error)
+ if (inst->session_error || test_bit(0, &core->sys_error))
ret = -EIO;
if (ret)
@@ -1504,10 +1559,24 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_pm_release_core(inst);
+ inst->session_error = 0;
+
mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
+void venus_helper_vb2_queue_error(struct venus_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_src_vq(m2m_ctx);
+ vb2_queue_error(q);
+ q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vb2_queue_error(q);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_queue_error);
+
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index e6269b4be3af..32619c3e8c97 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -14,6 +14,9 @@ struct venus_core;
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt);
struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
unsigned int type, u32 idx);
+void venus_helper_change_dpb_owner(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf, unsigned int type,
+ unsigned int buf_type, u32 idx);
void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type,
enum vb2_buffer_state state);
int venus_helper_vb2_buf_init(struct vb2_buffer *vb);
@@ -21,6 +24,7 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb);
void venus_helper_vb2_buf_queue(struct vb2_buffer *vb);
void venus_helper_vb2_stop_streaming(struct vb2_queue *q);
int venus_helper_vb2_start_streaming(struct venus_inst *inst);
+void venus_helper_vb2_queue_error(struct venus_inst *inst);
void venus_helper_m2m_device_run(void *priv);
void venus_helper_m2m_job_abort(void *priv);
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 0f2482367e06..4e2151fb47f0 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -187,6 +187,11 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
mutex_lock(&core->lock);
+ if (test_bit(0, &inst->core->sys_error)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
max = atomic_add_unless(&core->insts_count, 1,
core->max_sessions_supported);
if (!max) {
@@ -196,6 +201,7 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
ret = 0;
}
+unlock:
mutex_unlock(&core->lock);
return ret;
@@ -214,7 +220,7 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
* session_init() can't pass successfully
*/
mutex_lock(&core->lock);
- if (!core->ops || core->sys_error) {
+ if (!core->ops || test_bit(0, &inst->core->sys_error)) {
mutex_unlock(&core->lock);
return -EIO;
}
@@ -263,6 +269,9 @@ int hfi_session_deinit(struct venus_inst *inst)
if (inst->state < INST_INIT)
return -EINVAL;
+ if (test_bit(0, &inst->core->sys_error))
+ goto done;
+
reinit_completion(&inst->done);
ret = ops->session_end(inst);
@@ -273,6 +282,7 @@ int hfi_session_deinit(struct venus_inst *inst)
if (ret)
return ret;
+done:
inst->state = INST_UNINIT;
return 0;
@@ -284,6 +294,9 @@ int hfi_session_start(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_LOAD_RESOURCES)
return -EINVAL;
@@ -308,6 +321,9 @@ int hfi_session_stop(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_START)
return -EINVAL;
@@ -331,6 +347,9 @@ int hfi_session_continue(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (core->res->hfi_version == HFI_VERSION_1XX)
return 0;
@@ -343,6 +362,9 @@ int hfi_session_abort(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_abort(inst);
@@ -362,6 +384,9 @@ int hfi_session_load_res(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_INIT)
return -EINVAL;
@@ -385,6 +410,9 @@ int hfi_session_unload_res(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_STOP)
return -EINVAL;
@@ -409,6 +437,9 @@ int hfi_session_flush(struct venus_inst *inst, u32 type, bool block)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_flush(inst, type);
@@ -429,6 +460,9 @@ int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
return ops->session_set_buffers(inst, bd);
}
@@ -438,6 +472,9 @@ int hfi_session_unset_buffers(struct venus_inst *inst,
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_unset_buffers(inst, bd);
@@ -460,6 +497,9 @@ int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
@@ -483,6 +523,9 @@ int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
@@ -494,6 +537,9 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (fd->buffer_type == HFI_BUFFER_INPUT)
return ops->session_etb(inst, fd);
else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 60f4b8e4b8d0..5aea07307e02 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1299,6 +1299,13 @@ pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
break;
}
+ case HFI_PROPERTY_PARAM_WORK_ROUTE: {
+ struct hfi_video_work_route *in = pdata, *wr = prop_data;
+
+ wr->video_work_route = in->video_work_route;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wr);
+ break;
+ }
default:
return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index bec4feb63ceb..2daa88e3df9f 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -167,6 +167,7 @@
#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c
#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d
#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e
+#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e
#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011
#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012
#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013
@@ -448,6 +449,7 @@
#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
+#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
/*
* HFI_PROPERTY_CONFIG_COMMON_START
@@ -873,6 +875,10 @@ struct hfi_video_work_mode {
u32 video_work_mode;
};
+struct hfi_video_work_route {
+ u32 video_work_route;
+};
+
struct hfi_h264_vui_timing_info {
u32 enable;
u32 fixed_framerate;
@@ -910,6 +916,14 @@ struct hfi_extradata_input_crop {
u32 height;
};
+struct hfi_dpb_counts {
+ u32 max_dpb_count;
+ u32 max_ref_frames;
+ u32 max_dec_buffering;
+ u32 max_reorder_frames;
+ u32 fw_min_cnt;
+};
+
#define HFI_COLOR_FORMAT_MONOCHROME 0x01
#define HFI_COLOR_FORMAT_NV12 0x02
#define HFI_COLOR_FORMAT_NV21 0x03
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 9a2bdb002edc..df96db3761a7 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -32,6 +32,7 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_colour_space *colour_info;
struct hfi_buffer_requirements *bufreq;
struct hfi_extradata_input_crop *crop;
+ struct hfi_dpb_counts *dpb_count;
u8 *data_ptr;
u32 ptype;
@@ -110,6 +111,12 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
event.input_crop.height = crop->height;
data_ptr += sizeof(*crop);
break;
+ case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
+ data_ptr += sizeof(u32);
+ dpb_count = (struct hfi_dpb_counts *)data_ptr;
+ event.buf_count = dpb_count->fw_min_cnt;
+ data_ptr += sizeof(*dpb_count);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index 479178b0600d..ea25c451222b 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1164,7 +1164,7 @@ static int output_buffer_count(u32 session_type, u32 codec)
output_min_count = 6;
break;
case V4L2_PIX_FMT_VP9:
- output_min_count = 9;
+ output_min_count = 11;
break;
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_HEVC:
@@ -1213,6 +1213,8 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
}
out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec);
+ /* Max of driver and FW count */
+ out_min_count = max(out_min_count, bufreq->count_min);
bufreq->type = buftype;
bufreq->region_size = 0;
@@ -1237,7 +1239,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
bufreq->size = dec_ops->scratch(width, height, is_interlaced);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
- bufreq->size = dec_ops->scratch1(width, height, out_min_count,
+ bufreq->size = dec_ops->scratch1(width, height, VB2_MAX_FRAME,
is_secondary_output,
num_vpp_pipes);
} else if (buftype == HFI_BUFFER_INTERNAL_PERSIST_1) {
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index f5b4e1f4764f..f16f8962273c 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -66,16 +66,3 @@ hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_
return freq;
}
-u8 hfi_platform_num_vpp_pipes(enum hfi_version version)
-{
- const struct hfi_platform *plat;
-
- plat = hfi_platform_get(version);
- if (!plat)
- return 0;
-
- if (plat->num_vpp_pipes)
- return plat->num_vpp_pipes();
-
- return 0;
-}
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index 2dbe608c53af..1dcf4085928c 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -52,7 +52,6 @@ struct hfi_platform {
unsigned long (*codec_lp_freq)(u32 session_type, u32 codec);
void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
- u8 (*num_vpp_pipes)(void);
int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type,
u32 buftype, struct hfi_buffer_requirements *bufreq);
};
@@ -67,5 +66,4 @@ unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 code
u32 session_type);
unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
u32 session_type);
-u8 hfi_platform_num_vpp_pipes(enum hfi_version version);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
index d8243b22568a..c10618e44f5d 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -322,17 +322,11 @@ static unsigned long codec_lp_freq(u32 session_type, u32 codec)
return 0;
}
-static u8 num_vpp_pipes(void)
-{
- return 4;
-}
-
const struct hfi_platform hfi_plat_v6 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
.codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
- .num_vpp_pipes = num_vpp_pipes,
.bufreq = hfi_plat_bufreq_v6,
};
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index ce98c523b3c6..3a75a27632fb 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -551,6 +551,9 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
if (IS_V6(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
+ if (hdev->core->res->num_vpp_pipes == 1)
+ goto skip_aon_mvp_noc;
+
writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
val,
@@ -560,6 +563,7 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
if (ret)
return -ETIMEDOUT;
+skip_aon_mvp_noc:
mask_val = (BIT(2) | BIT(1) | BIT(0));
writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 300c6e47e72f..9735a246ce36 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -149,6 +149,8 @@
/* Wrapper TZ 6xx */
#define WRAPPER_TZ_BASE_V6 0x000c0000
#define WRAPPER_TZ_CPU_STATUS_V6 0x10
+#define WRAPPER_TZ_XTSS_SW_RESET 0x1000
+#define WRAPPER_XTSS_SW_RESET_BIT BIT(0)
/* Venus AON */
#define AON_BASE_V6 0x000e0000
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 3e2345eb47f7..cedc664ba755 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -1085,12 +1085,16 @@ static unsigned long calculate_inst_freq(struct venus_inst *inst,
if (inst->state != INST_START)
return 0;
- if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
vpp_freq_per_mb = inst->flags & VENUS_LOW_POWER ?
inst->clk_data.low_power_freq :
inst->clk_data.vpp_freq;
- vpp_freq = mbs_per_sec * vpp_freq_per_mb;
+ vpp_freq = mbs_per_sec * vpp_freq_per_mb;
+ } else {
+ vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
+ }
+
/* 21 / 20 is overhead factor */
vpp_freq += vpp_freq / 20;
vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
@@ -1139,9 +1143,10 @@ static int load_scale_v4(struct venus_inst *inst)
freq = max(freq_core1, freq_core2);
if (freq > table[0].freq) {
+ dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
+ freq, table[0].freq);
+
freq = table[0].freq;
- dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
- freq, table[0].freq);
goto set_freq;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 198e47eb63f4..91da3f509724 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -332,8 +332,11 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->fmt_out = fmt;
- else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
inst->fmt_cap = fmt;
+ inst->output2_buf_size =
+ venus_helper_get_framesz(pixfmt_cap, orig_pixmp.width, orig_pixmp.height);
+ }
return 0;
}
@@ -653,6 +656,19 @@ static int vdec_set_properties(struct venus_inst *inst)
return 0;
}
+static int vdec_set_work_route(struct venus_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_WORK_ROUTE;
+ struct hfi_video_work_route wr;
+
+ if (!IS_V6(inst->core))
+ return 0;
+
+ wr.video_work_route = inst->core->res->num_vpp_pipes;
+
+ return hfi_session_set_property(inst, ptype, &wr);
+}
+
#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
static int vdec_output_conf(struct venus_inst *inst)
@@ -830,6 +846,7 @@ static int vdec_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
unsigned int in_num, out_num;
int ret = 0;
@@ -855,6 +872,16 @@ static int vdec_queue_setup(struct vb2_queue *q,
return 0;
}
+ if (test_bit(0, &core->sys_error)) {
+ if (inst->nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(core->sys_err_done,
+ !test_bit(0, &core->sys_error));
+ if (ret)
+ return ret;
+ }
+
ret = vdec_pm_get(inst);
if (ret)
return ret;
@@ -970,23 +997,23 @@ reconfigure:
if (ret)
goto err;
+ venus_pm_load_scale(inst);
+
+ inst->next_buf_last = false;
+
ret = venus_helper_alloc_dpb_bufs(inst);
if (ret)
goto err;
- ret = venus_helper_queue_dpb_bufs(inst);
+ ret = hfi_session_continue(inst);
if (ret)
goto free_dpb_bufs;
- ret = venus_helper_process_initial_cap_bufs(inst);
+ ret = venus_helper_queue_dpb_bufs(inst);
if (ret)
goto free_dpb_bufs;
- venus_pm_load_scale(inst);
-
- inst->next_buf_last = false;
-
- ret = hfi_session_continue(inst);
+ ret = venus_helper_process_initial_cap_bufs(inst);
if (ret)
goto free_dpb_bufs;
@@ -1039,6 +1066,10 @@ static int vdec_start_output(struct venus_inst *inst)
if (ret)
return ret;
+ ret = vdec_set_work_route(inst);
+ if (ret)
+ return ret;
+
ret = vdec_output_conf(inst);
if (ret)
return ret;
@@ -1178,6 +1209,8 @@ static void vdec_stop_streaming(struct vb2_queue *q)
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
+ inst->session_error = 0;
+
if (ret)
goto unlock;
@@ -1211,7 +1244,7 @@ static void vdec_session_release(struct venus_inst *inst)
ret = hfi_session_deinit(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
- if (inst->session_error || core->sys_error)
+ if (inst->session_error || test_bit(0, &core->sys_error))
abort = 1;
if (abort)
@@ -1306,8 +1339,10 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vbuf = venus_helper_find_buf(inst, type, tag);
- if (!vbuf)
+ if (!vbuf) {
+ venus_helper_change_dpb_owner(inst, vbuf, type, buf_type, tag);
return;
+ }
vbuf->flags = flags;
vbuf->field = V4L2_FIELD_NONE;
@@ -1389,6 +1424,11 @@ static void vdec_event_change(struct venus_inst *inst,
inst->crop.height = ev_data->height;
}
+ inst->fw_min_cnt = ev_data->buf_count;
+ /* overwriting this to 11 for vp9 due to fw bug */
+ if (inst->hfi_codec == HFI_VIDEO_CODEC_VP9)
+ inst->fw_min_cnt = 11;
+
inst->out_width = ev_data->width;
inst->out_height = ev_data->height;
@@ -1448,6 +1488,7 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
switch (event) {
case EVT_SESSION_ERROR:
inst->session_error = true;
+ venus_helper_vb2_queue_error(inst);
dev_err(dev, "dec: event session error %x\n", inst->error);
break;
case EVT_SYS_EVENT_CHANGE:
@@ -1492,6 +1533,7 @@ static void vdec_inst_init(struct venus_inst *inst)
inst->crop.top = 0;
inst->crop.width = inst->width;
inst->crop.height = inst->height;
+ inst->fw_min_cnt = 8;
inst->out_width = frame_width_min(inst);
inst->out_height = frame_height_min(inst);
inst->fps = 30;
@@ -1568,6 +1610,8 @@ static int vdec_open(struct file *file)
inst->bit_depth = VIDC_BITDEPTH_8;
inst->pic_struct = HFI_INTERLACE_FRAME_PROGRESSIVE;
init_waitqueue_head(&inst->reconf_wait);
+ inst->nonblock = file->f_flags & O_NONBLOCK;
+
venus_helper_init_instance(inst);
ret = vdec_ctrl_init(inst);
@@ -1580,6 +1624,8 @@ static int vdec_open(struct file *file)
vdec_inst_init(inst);
+ ida_init(&inst->dpb_ids);
+
/*
* create m2m device for every instance, the m2m context scheduling
* is made by firmware side so we do not need to care about.
@@ -1625,6 +1671,7 @@ static int vdec_close(struct file *file)
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
vdec_ctrl_deinit(inst);
+ ida_destroy(&inst->dpb_ids);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
v4l2_fh_del(&inst->fh);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index bc1c42dd53c0..84bafc3118cc 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -538,6 +538,64 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
+static int venc_pm_get(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+ ret = pm_runtime_resume_and_get(dev);
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int venc_pm_put(struct venus_inst *inst, bool autosuspend)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+
+ if (autosuspend)
+ ret = pm_runtime_put_autosuspend(dev);
+ else
+ ret = pm_runtime_put_sync(dev);
+
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int venc_pm_get_put(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret = 0;
+
+ mutex_lock(&core->pm_lock);
+
+ if (pm_runtime_suspended(dev)) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto error;
+
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+error:
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void venc_pm_touch(struct venus_inst *inst)
+{
+ pm_runtime_mark_last_busy(inst->core->dev_enc);
+}
+
static int venc_set_properties(struct venus_inst *inst)
{
struct venc_controls *ctr = &inst->controls.enc;
@@ -908,6 +966,7 @@ static int venc_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
unsigned int num, min = 4;
int ret;
@@ -931,11 +990,29 @@ static int venc_queue_setup(struct vb2_queue *q,
return 0;
}
+ if (test_bit(0, &core->sys_error)) {
+ if (inst->nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(core->sys_err_done,
+ !test_bit(0, &core->sys_error));
+ if (ret)
+ return ret;
+ }
+
+ ret = venc_pm_get(inst);
+ if (ret)
+ return ret;
+
mutex_lock(&inst->lock);
ret = venc_init_session(inst);
mutex_unlock(&inst->lock);
if (ret)
+ goto put_power;
+
+ ret = venc_pm_put(inst, false);
+ if (ret)
return ret;
switch (q->type) {
@@ -971,6 +1048,9 @@ static int venc_queue_setup(struct vb2_queue *q,
}
return ret;
+put_power:
+ venc_pm_put(inst, false);
+ return ret;
}
static int venc_buf_init(struct vb2_buffer *vb)
@@ -986,6 +1066,8 @@ static void venc_release_session(struct venus_inst *inst)
{
int ret;
+ venc_pm_get(inst);
+
mutex_lock(&inst->lock);
ret = hfi_session_deinit(inst);
@@ -997,6 +1079,8 @@ static void venc_release_session(struct venus_inst *inst)
venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
venus_pm_release_core(inst);
+
+ venc_pm_put(inst, false);
}
static void venc_buf_cleanup(struct vb2_buffer *vb)
@@ -1066,8 +1150,16 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
inst->sequence_cap = 0;
inst->sequence_out = 0;
+ ret = venc_pm_get(inst);
+ if (ret)
+ goto error;
+
ret = venus_pm_acquire_core(inst);
if (ret)
+ goto put_power;
+
+ ret = venc_pm_put(inst, true);
+ if (ret)
goto error;
ret = venc_set_properties(inst);
@@ -1091,6 +1183,8 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
+put_power:
+ venc_pm_put(inst, false);
error:
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -1105,6 +1199,8 @@ static void venc_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ venc_pm_get_put(inst);
+
mutex_lock(&inst->lock);
venus_helper_vb2_buf_queue(vb);
mutex_unlock(&inst->lock);
@@ -1128,6 +1224,8 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
struct vb2_buffer *vb;
unsigned int type;
+ venc_pm_touch(inst);
+
if (buf_type == HFI_BUFFER_INPUT)
type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
else
@@ -1157,8 +1255,11 @@ static void venc_event_notify(struct venus_inst *inst, u32 event,
{
struct device *dev = inst->core->dev_enc;
+ venc_pm_touch(inst);
+
if (event == EVT_SESSION_ERROR) {
inst->session_error = true;
+ venus_helper_vb2_queue_error(inst);
dev_err(dev, "enc: event session error %x\n", inst->error);
}
}
@@ -1242,16 +1343,13 @@ static int venc_open(struct file *file)
inst->session_type = VIDC_SESSION_TYPE_ENC;
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
+ inst->nonblock = file->f_flags & O_NONBLOCK;
venus_helper_init_instance(inst);
- ret = pm_runtime_resume_and_get(core->dev_enc);
- if (ret < 0)
- goto err_free;
-
ret = venc_ctrl_init(inst);
if (ret)
- goto err_put_sync;
+ goto err_free;
ret = hfi_session_create(inst, &venc_hfi_ops);
if (ret)
@@ -1290,8 +1388,6 @@ err_session_destroy:
hfi_session_destroy(inst);
err_ctrl_deinit:
venc_ctrl_deinit(inst);
-err_put_sync:
- pm_runtime_put_sync(core->dev_enc);
err_free:
kfree(inst);
return ret;
@@ -1301,6 +1397,8 @@ static int venc_close(struct file *file)
{
struct venus_inst *inst = to_inst(file);
+ venc_pm_get(inst);
+
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
venc_ctrl_deinit(inst);
@@ -1309,7 +1407,7 @@ static int venc_close(struct file *file)
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
- pm_runtime_put_sync(inst->core->dev_enc);
+ venc_pm_put(inst, false);
kfree(inst);
return 0;
@@ -1366,6 +1464,8 @@ static int venc_probe(struct platform_device *pdev)
core->dev_enc = dev;
video_set_drvdata(vdev, core);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
+ pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/media/platform/rcar-isp.c b/drivers/media/platform/rcar-isp.c
new file mode 100644
index 000000000000..2ffab30bc011
--- /dev/null
+++ b/drivers/media/platform/rcar-isp.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Driver for Renesas R-Car ISP Channel Selector
+ *
+ * The ISP hardware is capable of more than just channel selection, features
+ * such as demosaicing, white balance control and color space conversion are
+ * also possible. These more advanced features are not supported by the driver
+ * due to lack of documentation.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-subdev.h>
+
+#define ISPINPUTSEL0_REG 0x0008
+#define ISPINPUTSEL0_SEL_CSI0 BIT(31)
+
+#define ISPSTART_REG 0x0014
+#define ISPSTART_START 0xffff
+#define ISPSTART_STOP 0x0000
+
+#define ISPPROCMODE_DT_REG(n) (0x1100 + (0x4 * (n)))
+#define ISPPROCMODE_DT_PROC_MODE_VC3(pm) (((pm) & 0x3f) << 24)
+#define ISPPROCMODE_DT_PROC_MODE_VC2(pm) (((pm) & 0x3f) << 16)
+#define ISPPROCMODE_DT_PROC_MODE_VC1(pm) (((pm) & 0x3f) << 8)
+#define ISPPROCMODE_DT_PROC_MODE_VC0(pm) ((pm) & 0x3f)
+
+#define ISPCS_FILTER_ID_CH_REG(n) (0x3000 + (0x0100 * (n)))
+
+#define ISPCS_DT_CODE03_CH_REG(n) (0x3008 + (0x100 * (n)))
+#define ISPCS_DT_CODE03_EN3 BIT(31)
+#define ISPCS_DT_CODE03_DT3(dt) (((dt) & 0x3f) << 24)
+#define ISPCS_DT_CODE03_EN2 BIT(23)
+#define ISPCS_DT_CODE03_DT2(dt) (((dt) & 0x3f) << 16)
+#define ISPCS_DT_CODE03_EN1 BIT(15)
+#define ISPCS_DT_CODE03_DT1(dt) (((dt) & 0x3f) << 8)
+#define ISPCS_DT_CODE03_EN0 BIT(7)
+#define ISPCS_DT_CODE03_DT0(dt) ((dt) & 0x3f)
+
+struct rcar_isp_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int procmode;
+};
+
+static const struct rcar_isp_format rcar_isp_formats[] = {
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .procmode = 0x15 },
+ { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = 0x2b, .procmode = 0x10 },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .procmode = 0x0c },
+};
+
+static const struct rcar_isp_format *risp_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcar_isp_formats); i++) {
+ if (rcar_isp_formats[i].code == code)
+ return &rcar_isp_formats[i];
+ }
+
+ return NULL;
+}
+
+enum rcar_isp_input {
+ RISP_CSI_INPUT0,
+ RISP_CSI_INPUT1,
+};
+
+enum rcar_isp_pads {
+ RCAR_ISP_SINK,
+ RCAR_ISP_PORT0,
+ RCAR_ISP_PORT1,
+ RCAR_ISP_PORT2,
+ RCAR_ISP_PORT3,
+ RCAR_ISP_PORT4,
+ RCAR_ISP_PORT5,
+ RCAR_ISP_PORT6,
+ RCAR_ISP_PORT7,
+ RCAR_ISP_NUM_PADS,
+};
+
+struct rcar_isp {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rstc;
+
+ enum rcar_isp_input csi_input;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RCAR_ISP_NUM_PADS];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote;
+
+ struct mutex lock; /* Protects mf and stream_count. */
+ struct v4l2_mbus_framefmt mf;
+ int stream_count;
+};
+
+static inline struct rcar_isp *sd_to_isp(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rcar_isp, subdev);
+}
+
+static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rcar_isp, notifier);
+}
+
+static void risp_write(struct rcar_isp *isp, u32 offset, u32 value)
+{
+ iowrite32(value, isp->base + offset);
+}
+
+static u32 risp_read(struct rcar_isp *isp, u32 offset)
+{
+ return ioread32(isp->base + offset);
+}
+
+static int risp_power_on(struct rcar_isp *isp)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isp->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = reset_control_deassert(isp->rstc);
+ if (ret < 0) {
+ pm_runtime_put(isp->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void risp_power_off(struct rcar_isp *isp)
+{
+ reset_control_assert(isp->rstc);
+ pm_runtime_put(isp->dev);
+}
+
+static int risp_start(struct rcar_isp *isp)
+{
+ const struct rcar_isp_format *format;
+ unsigned int vc;
+ u32 sel_csi = 0;
+ int ret;
+
+ format = risp_code_to_fmt(isp->mf.code);
+ if (!format) {
+ dev_err(isp->dev, "Unsupported bus format\n");
+ return -EINVAL;
+ }
+
+ ret = risp_power_on(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power on ISP\n");
+ return ret;
+ }
+
+ /* Select CSI-2 input source. */
+ if (isp->csi_input == RISP_CSI_INPUT1)
+ sel_csi = ISPINPUTSEL0_SEL_CSI0;
+
+ risp_write(isp, ISPINPUTSEL0_REG,
+ risp_read(isp, ISPINPUTSEL0_REG) | sel_csi);
+
+ /* Configure Channel Selector. */
+ for (vc = 0; vc < 4; vc++) {
+ u8 ch = vc + 4;
+ u8 dt = format->datatype;
+
+ risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
+ risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch),
+ ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
+ ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
+ ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
+ ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
+ }
+
+ /* Setup processing method. */
+ risp_write(isp, ISPPROCMODE_DT_REG(format->datatype),
+ ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
+
+ /* Start ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_START);
+
+ ret = v4l2_subdev_call(isp->remote, video, s_stream, 1);
+ if (ret)
+ risp_power_off(isp);
+
+ return ret;
+}
+
+static void risp_stop(struct rcar_isp *isp)
+{
+ v4l2_subdev_call(isp->remote, video, s_stream, 0);
+
+ /* Stop ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
+
+ risp_power_off(isp);
+}
+
+static int risp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ int ret = 0;
+
+ mutex_lock(&isp->lock);
+
+ if (!isp->remote) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (enable && isp->stream_count == 0) {
+ ret = risp_start(isp);
+ if (ret)
+ goto out;
+ } else if (!enable && isp->stream_count == 1) {
+ risp_stop(isp);
+ }
+
+ isp->stream_count += enable ? 1 : -1;
+out:
+ mutex_unlock(&isp->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops risp_video_ops = {
+ .s_stream = risp_s_stream,
+};
+
+static int risp_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&isp->lock);
+
+ if (!risp_code_to_fmt(format->format.code))
+ format->format.code = rcar_isp_formats[0].code;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ isp->mf = format->format;
+ } else {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ *framefmt = format->format;
+ }
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static int risp_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+
+ mutex_lock(&isp->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = isp->mf;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops risp_pad_ops = {
+ .set_fmt = risp_set_pad_format,
+ .get_fmt = risp_get_pad_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
+ .video = &risp_video_ops,
+ .pad = &risp_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links
+ */
+
+static int risp_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(isp->dev, "Failed to find pad for %s\n", subdev->name);
+ return pad;
+ }
+
+ isp->remote = subdev;
+
+ dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad);
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &isp->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void risp_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+
+ isp->remote = NULL;
+
+ dev_dbg(isp->dev, "Unbind %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations risp_notify_ops = {
+ .bound = risp_notify_bound,
+ .unbind = risp_notify_unbind,
+};
+
+static int risp_parse_dt(struct rcar_isp *isp)
+{
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ unsigned int id;
+ int ret;
+
+ for (id = 0; id < 2; id++) {
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev),
+ 0, id, 0);
+ if (ep)
+ break;
+ }
+
+ if (!ep) {
+ dev_err(isp->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ if (id == 1)
+ isp->csi_input = RISP_CSI_INPUT1;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ dev_dbg(isp->dev, "Found '%pOF'\n", to_of_node(fwnode));
+
+ v4l2_async_nf_init(&isp->notifier);
+ isp->notifier.ops = &risp_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&isp->notifier, fwnode,
+ struct v4l2_async_subdev);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ ret = v4l2_async_subdev_nf_register(&isp->subdev, &isp->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct media_entity_operations risp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int risp_probe_resources(struct rcar_isp *isp,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(isp->base))
+ return PTR_ERR(isp->base);
+
+ isp->rstc = devm_reset_control_get(&pdev->dev, NULL);
+
+ return PTR_ERR_OR_ZERO(isp->rstc);
+}
+
+static const struct of_device_id risp_of_id_table[] = {
+ { .compatible = "renesas,r8a779a0-isp" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, risp_of_id_table);
+
+static int risp_probe(struct platform_device *pdev)
+{
+ struct rcar_isp *isp;
+ unsigned int i;
+ int ret;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->dev = &pdev->dev;
+
+ mutex_init(&isp->lock);
+
+ ret = risp_probe_resources(isp, pdev);
+ if (ret) {
+ dev_err(isp->dev, "Failed to get resources\n");
+ goto error_mutex;
+ }
+
+ platform_set_drvdata(pdev, isp);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = risp_parse_dt(isp);
+ if (ret)
+ goto error_pm;
+
+ isp->subdev.owner = THIS_MODULE;
+ isp->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops);
+ v4l2_set_subdevdata(&isp->subdev, &pdev->dev);
+ snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+ isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ isp->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
+ isp->subdev.entity.ops = &risp_entity_ops;
+
+ isp->pads[RCAR_ISP_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = RCAR_ISP_PORT0; i < RCAR_ISP_NUM_PADS; i++)
+ isp->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&isp->subdev.entity, RCAR_ISP_NUM_PADS,
+ isp->pads);
+ if (ret)
+ goto error_notifier;
+
+ ret = v4l2_async_register_subdev(&isp->subdev);
+ if (ret < 0)
+ goto error_notifier;
+
+ dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input);
+
+ return 0;
+error_notifier:
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+error_pm:
+ pm_runtime_disable(&pdev->dev);
+error_mutex:
+ mutex_destroy(&isp->lock);
+
+ return ret;
+}
+
+static int risp_remove(struct platform_device *pdev)
+{
+ struct rcar_isp *isp = platform_get_drvdata(pdev);
+
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ v4l2_async_unregister_subdev(&isp->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ mutex_destroy(&isp->lock);
+
+ return 0;
+}
+
+static struct platform_driver rcar_isp_driver = {
+ .driver = {
+ .name = "rcar-isp",
+ .of_match_table = risp_of_id_table,
+ },
+ .probe = risp_probe,
+ .remove = risp_remove,
+};
+
+module_platform_driver(rcar_isp_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car ISP Channel Selector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 33957cc9118c..1d92cc8ede8f 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -45,188 +45,7 @@
#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
/* -----------------------------------------------------------------------------
- * Media Controller link notification
- */
-
-/* group lock should be held when calling this function. */
-static int rvin_group_entity_to_csi_id(struct rvin_group *group,
- struct media_entity *entity)
-{
- struct v4l2_subdev *sd;
- unsigned int i;
-
- sd = media_entity_to_v4l2_subdev(entity);
-
- for (i = 0; i < RVIN_CSI_MAX; i++)
- if (group->csi[i].subdev == sd)
- return i;
-
- return -ENODEV;
-}
-
-static unsigned int rvin_group_get_mask(struct rvin_dev *vin,
- enum rvin_csi_id csi_id,
- unsigned char channel)
-{
- const struct rvin_group_route *route;
- unsigned int mask = 0;
-
- for (route = vin->info->routes; route->mask; route++) {
- if (route->vin == vin->id &&
- route->csi == csi_id &&
- route->channel == channel) {
- vin_dbg(vin,
- "Adding route: vin: %d csi: %d channel: %d\n",
- route->vin, route->csi, route->channel);
- mask |= route->mask;
- }
- }
-
- return mask;
-}
-
-/*
- * Link setup for the links between a VIN and a CSI-2 receiver is a bit
- * complex. The reason for this is that the register controlling routing
- * is not present in each VIN instance. There are special VINs which
- * control routing for themselves and other VINs. There are not many
- * different possible links combinations that can be enabled at the same
- * time, therefor all already enabled links which are controlled by a
- * master VIN need to be taken into account when making the decision
- * if a new link can be enabled or not.
- *
- * 1. Find out which VIN the link the user tries to enable is connected to.
- * 2. Lookup which master VIN controls the links for this VIN.
- * 3. Start with a bitmask with all bits set.
- * 4. For each previously enabled link from the master VIN bitwise AND its
- * route mask (see documentation for mask in struct rvin_group_route)
- * with the bitmask.
- * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
- * 6. If the bitmask is not empty at this point the new link can be enabled
- * while keeping all previous links enabled. Update the CHSEL value of the
- * master VIN and inform the user that the link could be enabled.
- *
- * Please note that no link can be enabled if any VIN in the group is
- * currently open.
- */
-static int rvin_group_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct rvin_group *group = container_of(link->graph_obj.mdev,
- struct rvin_group, mdev);
- unsigned int master_id, channel, mask_new, i;
- unsigned int mask = ~0;
- struct media_entity *entity;
- struct video_device *vdev;
- struct media_pad *csi_pad;
- struct rvin_dev *vin = NULL;
- int csi_id, ret;
-
- ret = v4l2_pipeline_link_notify(link, flags, notification);
- if (ret)
- return ret;
-
- /* Only care about link enablement for VIN nodes. */
- if (!(flags & MEDIA_LNK_FL_ENABLED) ||
- !is_media_entity_v4l2_video_device(link->sink->entity))
- return 0;
-
- /*
- * Don't allow link changes if any entity in the graph is
- * streaming, modifying the CHSEL register fields can disrupt
- * running streams.
- */
- media_device_for_each_entity(entity, &group->mdev)
- if (entity->stream_count)
- return -EBUSY;
-
- mutex_lock(&group->lock);
-
- /* Find the master VIN that controls the routes. */
- vdev = media_entity_to_video_device(link->sink->entity);
- vin = container_of(vdev, struct rvin_dev, vdev);
- master_id = rvin_group_id_to_master(vin->id);
-
- if (WARN_ON(!group->vin[master_id])) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Build a mask for already enabled links. */
- for (i = master_id; i < master_id + 4; i++) {
- if (!group->vin[i])
- continue;
-
- /* Get remote CSI-2, if any. */
- csi_pad = media_entity_remote_pad(
- &group->vin[i]->vdev.entity.pads[0]);
- if (!csi_pad)
- continue;
-
- csi_id = rvin_group_entity_to_csi_id(group, csi_pad->entity);
- channel = rvin_group_csi_pad_to_channel(csi_pad->index);
-
- mask &= rvin_group_get_mask(group->vin[i], csi_id, channel);
- }
-
- /* Add the new link to the existing mask and check if it works. */
- csi_id = rvin_group_entity_to_csi_id(group, link->source->entity);
-
- if (csi_id == -ENODEV) {
- struct v4l2_subdev *sd;
-
- /*
- * Make sure the source entity subdevice is registered as
- * a parallel input of one of the enabled VINs if it is not
- * one of the CSI-2 subdevices.
- *
- * No hardware configuration required for parallel inputs,
- * we can return here.
- */
- sd = media_entity_to_v4l2_subdev(link->source->entity);
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (group->vin[i] &&
- group->vin[i]->parallel.subdev == sd) {
- group->vin[i]->is_csi = false;
- ret = 0;
- goto out;
- }
- }
-
- vin_err(vin, "Subdevice %s not registered to any VIN\n",
- link->source->entity->name);
- ret = -ENODEV;
- goto out;
- }
-
- channel = rvin_group_csi_pad_to_channel(link->source->index);
- mask_new = mask & rvin_group_get_mask(vin, csi_id, channel);
- vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
-
- if (!mask_new) {
- ret = -EMLINK;
- goto out;
- }
-
- /* New valid CHSEL found, set the new value. */
- ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
- if (ret)
- goto out;
-
- vin->is_csi = true;
-
-out:
- mutex_unlock(&group->lock);
-
- return ret;
-}
-
-static const struct media_device_ops rvin_media_ops = {
- .link_notify = rvin_group_link_notify,
-};
-
-/* -----------------------------------------------------------------------------
- * Gen3 CSI2 Group Allocator
+ * Gen3 Group Allocator
*/
/* FIXME: This should if we find a system that supports more
@@ -247,7 +66,9 @@ static void rvin_group_cleanup(struct rvin_group *group)
mutex_destroy(&group->lock);
}
-static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
+static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
{
struct media_device *mdev = &group->mdev;
const struct of_device_id *match;
@@ -263,8 +84,10 @@ static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
vin_dbg(vin, "found %u enabled VIN's in DT", group->count);
+ group->link_setup = link_setup;
+
mdev->dev = vin->dev;
- mdev->ops = &rvin_media_ops;
+ mdev->ops = ops;
match = of_match_node(vin->dev->driver->of_match_table,
vin->dev->of_node);
@@ -295,7 +118,9 @@ static void rvin_group_release(struct kref *kref)
mutex_unlock(&rvin_group_lock);
}
-static int rvin_group_get(struct rvin_dev *vin)
+static int rvin_group_get(struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
{
struct rvin_group *group;
u32 id;
@@ -327,7 +152,7 @@ static int rvin_group_get(struct rvin_dev *vin)
goto err_group;
}
- ret = rvin_group_init(group, vin);
+ ret = rvin_group_init(group, vin, link_setup, ops);
if (ret) {
kfree(group);
vin_err(vin, "Failed to initialize group\n");
@@ -383,6 +208,213 @@ out:
kref_put(&group->refcount, rvin_group_release);
}
+/* group lock should be held when calling this function. */
+static int rvin_group_entity_to_remote_id(struct rvin_group *group,
+ struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ for (i = 0; i < RVIN_REMOTES_MAX; i++)
+ if (group->remotes[i].subdev == sd)
+ return i;
+
+ return -ENODEV;
+}
+
+static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+ int ret;
+
+ ret = media_device_register(&vin->group->mdev);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ /* Register all video nodes for the group. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
+ ret = rvin_v4l2_register(vin->group->vin[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return vin->group->link_setup(vin);
+}
+
+static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++)
+ if (vin->group->vin[i])
+ rvin_v4l2_unregister(vin->group->vin[i]);
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asd != asd)
+ continue;
+ vin->group->remotes[i].subdev = NULL;
+ vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ media_device_unregister(&vin->group->mdev);
+}
+
+static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asd != asd)
+ continue;
+ vin->group->remotes[i].subdev = subdev;
+ vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
+ .bound = rvin_group_notify_bound,
+ .unbind = rvin_group_notify_unbind,
+ .complete = rvin_group_notify_complete,
+};
+
+static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port,
+ unsigned int id)
+{
+ struct fwnode_handle *ep, *fwnode;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), port, id, 0);
+ if (!ep)
+ return 0;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ asd = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out;
+ }
+
+ vin->group->remotes[vep.base.id].asd = asd;
+
+ vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
+ to_of_node(fwnode), vep.base.id);
+out:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static void rvin_group_notifier_cleanup(struct rvin_dev *vin)
+{
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_nf_unregister(&vin->group->notifier);
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+}
+
+static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
+ unsigned int max_id)
+{
+ unsigned int count = 0, vin_mask = 0;
+ unsigned int i, id;
+ int ret;
+
+ mutex_lock(&vin->group->lock);
+
+ /* If not all VIN's are registered don't register the notifier. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i]) {
+ count++;
+ vin_mask |= BIT(i);
+ }
+ }
+
+ if (vin->group->count != count) {
+ mutex_unlock(&vin->group->lock);
+ return 0;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ v4l2_async_nf_init(&vin->group->notifier);
+
+ /*
+ * Some subdevices may overlap but the parser function can handle it and
+ * each subdevice will only be registered once with the group notifier.
+ */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (!(vin_mask & BIT(i)))
+ continue;
+
+ for (id = 0; id < max_id; id++) {
+ if (vin->group->remotes[id].asd)
+ continue;
+
+ ret = rvin_group_parse_of(vin->group->vin[i], port, id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (list_empty(&vin->group->notifier.asd_list))
+ return 0;
+
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->group->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Controls
*/
@@ -405,6 +437,45 @@ static const struct v4l2_ctrl_ops rvin_ctrl_ops = {
.s_ctrl = rvin_s_ctrl,
};
+static void rvin_free_controls(struct rvin_dev *vin)
+{
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+}
+
+static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev)
+{
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ /* The VIN directly deals with alpha component. */
+ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (vin->ctrl_handler.error) {
+ ret = vin->ctrl_handler.error;
+ rvin_free_controls(vin);
+ return ret;
+ }
+
+ /* For the non-MC mode add controls from the subdevice. */
+ if (subdev) {
+ ret = v4l2_ctrl_add_handler(&vin->ctrl_handler,
+ subdev->ctrl_handler, NULL, true);
+ if (ret < 0) {
+ rvin_free_controls(vin);
+ return ret;
+ }
+ }
+
+ vin->vdev.ctrl_handler = &vin->ctrl_handler;
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Async notifier
*/
@@ -490,28 +561,10 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
return ret;
/* Add the controls */
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ ret = rvin_create_controls(vin, subdev);
if (ret < 0)
return ret;
- v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
-
- if (vin->ctrl_handler.error) {
- ret = vin->ctrl_handler.error;
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
-
- ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
- NULL, true);
- if (ret < 0) {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
-
- vin->vdev.ctrl_handler = &vin->ctrl_handler;
-
vin->parallel.subdev = subdev;
return 0;
@@ -522,10 +575,8 @@ static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
rvin_v4l2_unregister(vin);
vin->parallel.subdev = NULL;
- if (!vin->info->use_mc) {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- vin->vdev.ctrl_handler = NULL;
- }
+ if (!vin->info->use_mc)
+ rvin_free_controls(vin);
}
static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
@@ -641,8 +692,8 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
goto out;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(&vin->notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -657,28 +708,33 @@ out:
return ret;
}
+static void rvin_parallel_cleanup(struct rvin_dev *vin)
+{
+ v4l2_async_nf_unregister(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
+}
+
static int rvin_parallel_init(struct rvin_dev *vin)
{
int ret;
- v4l2_async_notifier_init(&vin->notifier);
+ v4l2_async_nf_init(&vin->notifier);
ret = rvin_parallel_parse_of(vin);
if (ret)
return ret;
- /* If using mc, it's fine not to have any input registered. */
if (!vin->parallel.asd)
- return vin->info->use_mc ? 0 : -ENODEV;
+ return -ENODEV;
vin_dbg(vin, "Found parallel subdevice %pOF\n",
to_of_node(vin->parallel.asd->match.fwnode));
vin->notifier.ops = &rvin_parallel_notify_ops;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
return ret;
}
@@ -686,36 +742,175 @@ static int rvin_parallel_init(struct rvin_dev *vin)
}
/* -----------------------------------------------------------------------------
- * Group async notifier
+ * CSI-2
*/
-static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+static unsigned int rvin_csi2_get_mask(struct rvin_dev *vin,
+ enum rvin_csi_id csi_id,
+ unsigned char channel)
{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
const struct rvin_group_route *route;
- unsigned int i;
- int ret;
+ unsigned int mask = 0;
- ret = media_device_register(&vin->group->mdev);
+ for (route = vin->info->routes; route->mask; route++) {
+ if (route->vin == vin->id &&
+ route->csi == csi_id &&
+ route->channel == channel) {
+ vin_dbg(vin,
+ "Adding route: vin: %d csi: %d channel: %d\n",
+ route->vin, route->csi, route->channel);
+ mask |= route->mask;
+ }
+ }
+
+ return mask;
+}
+
+/*
+ * Link setup for the links between a VIN and a CSI-2 receiver is a bit
+ * complex. The reason for this is that the register controlling routing
+ * is not present in each VIN instance. There are special VINs which
+ * control routing for themselves and other VINs. There are not many
+ * different possible links combinations that can be enabled at the same
+ * time, therefor all already enabled links which are controlled by a
+ * master VIN need to be taken into account when making the decision
+ * if a new link can be enabled or not.
+ *
+ * 1. Find out which VIN the link the user tries to enable is connected to.
+ * 2. Lookup which master VIN controls the links for this VIN.
+ * 3. Start with a bitmask with all bits set.
+ * 4. For each previously enabled link from the master VIN bitwise AND its
+ * route mask (see documentation for mask in struct rvin_group_route)
+ * with the bitmask.
+ * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
+ * 6. If the bitmask is not empty at this point the new link can be enabled
+ * while keeping all previous links enabled. Update the CHSEL value of the
+ * master VIN and inform the user that the link could be enabled.
+ *
+ * Please note that no link can be enabled if any VIN in the group is
+ * currently open.
+ */
+static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct rvin_group *group = container_of(link->graph_obj.mdev,
+ struct rvin_group, mdev);
+ unsigned int master_id, channel, mask_new, i;
+ unsigned int mask = ~0;
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct media_pad *csi_pad;
+ struct rvin_dev *vin = NULL;
+ int csi_id, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
if (ret)
return ret;
- ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
- if (ret) {
- vin_err(vin, "Failed to register subdev nodes\n");
- return ret;
+ /* Only care about link enablement for VIN nodes. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED) ||
+ !is_media_entity_v4l2_video_device(link->sink->entity))
+ return 0;
+
+ /*
+ * Don't allow link changes if any entity in the graph is
+ * streaming, modifying the CHSEL register fields can disrupt
+ * running streams.
+ */
+ media_device_for_each_entity(entity, &group->mdev)
+ if (entity->stream_count)
+ return -EBUSY;
+
+ mutex_lock(&group->lock);
+
+ /* Find the master VIN that controls the routes. */
+ vdev = media_entity_to_video_device(link->sink->entity);
+ vin = container_of(vdev, struct rvin_dev, vdev);
+ master_id = rvin_group_id_to_master(vin->id);
+
+ if (WARN_ON(!group->vin[master_id])) {
+ ret = -ENODEV;
+ goto out;
}
- /* Register all video nodes for the group. */
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i] &&
- !video_is_registered(&vin->group->vin[i]->vdev)) {
- ret = rvin_v4l2_register(vin->group->vin[i]);
- if (ret)
- return ret;
+ /* Build a mask for already enabled links. */
+ for (i = master_id; i < master_id + 4; i++) {
+ if (!group->vin[i])
+ continue;
+
+ /* Get remote CSI-2, if any. */
+ csi_pad = media_entity_remote_pad(
+ &group->vin[i]->vdev.entity.pads[0]);
+ if (!csi_pad)
+ continue;
+
+ csi_id = rvin_group_entity_to_remote_id(group, csi_pad->entity);
+ channel = rvin_group_csi_pad_to_channel(csi_pad->index);
+
+ mask &= rvin_csi2_get_mask(group->vin[i], csi_id, channel);
+ }
+
+ /* Add the new link to the existing mask and check if it works. */
+ csi_id = rvin_group_entity_to_remote_id(group, link->source->entity);
+
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] &&
+ group->vin[i]->parallel.subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
}
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ goto out;
}
+ channel = rvin_group_csi_pad_to_channel(link->source->index);
+ mask_new = mask & rvin_csi2_get_mask(vin, csi_id, channel);
+ vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
+
+ if (!mask_new) {
+ ret = -EMLINK;
+ goto out;
+ }
+
+ /* New valid CHSEL found, set the new value. */
+ ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+
+out:
+ mutex_unlock(&group->lock);
+
+ return ret;
+}
+
+static const struct media_device_ops rvin_csi2_media_ops = {
+ .link_notify = rvin_csi2_link_notify,
+};
+
+static int rvin_csi2_setup_links(struct rvin_dev *vin)
+{
+ const struct rvin_group_route *route;
+ int ret = -EINVAL;
+
/* Create all media device links between VINs and CSI-2's. */
mutex_lock(&vin->group->lock);
for (route = vin->info->routes; route->mask; route++) {
@@ -732,10 +927,10 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
continue;
/* Check that CSI-2 is part of the group. */
- if (!vin->group->csi[route->csi].subdev)
+ if (!vin->group->remotes[route->csi].subdev)
continue;
- source = &vin->group->csi[route->csi].subdev->entity;
+ source = &vin->group->remotes[route->csi].subdev->entity;
source_idx = rvin_group_csi_channel_to_pad(route->channel);
source_pad = &source->pads[source_idx];
@@ -758,167 +953,107 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
-static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
-{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
-
- for (i = 0; i < RCAR_VIN_NUM; i++)
- if (vin->group->vin[i])
- rvin_v4l2_unregister(vin->group->vin[i]);
-
- mutex_lock(&vin->group->lock);
-
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->csi[i].asd != asd)
- continue;
- vin->group->csi[i].subdev = NULL;
- vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
- break;
- }
-
- mutex_unlock(&vin->group->lock);
-
- media_device_unregister(&vin->group->mdev);
-}
-
-static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void rvin_csi2_cleanup(struct rvin_dev *vin)
{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
-
- mutex_lock(&vin->group->lock);
-
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->csi[i].asd != asd)
- continue;
- vin->group->csi[i].subdev = subdev;
- vin_dbg(vin, "Bound CSI-2 %s to slot %u\n", subdev->name, i);
- break;
- }
-
- mutex_unlock(&vin->group->lock);
-
- return 0;
+ rvin_parallel_cleanup(vin);
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
}
-static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
- .bound = rvin_group_notify_bound,
- .unbind = rvin_group_notify_unbind,
- .complete = rvin_group_notify_complete,
-};
-
-static int rvin_mc_parse_of(struct rvin_dev *vin, unsigned int id)
+static int rvin_csi2_init(struct rvin_dev *vin)
{
- struct fwnode_handle *ep, *fwnode;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY,
- };
- struct v4l2_async_subdev *asd;
int ret;
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 1, id, 0);
- if (!ep)
- return 0;
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ return ret;
- fwnode = fwnode_graph_get_remote_endpoint(ep);
- ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- fwnode_handle_put(ep);
- if (ret) {
- vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
- ret = -EINVAL;
- goto out;
- }
+ ret = rvin_create_controls(vin, NULL);
+ if (ret < 0)
+ return ret;
- if (!of_device_is_available(to_of_node(fwnode))) {
- vin_dbg(vin, "OF device %pOF disabled, ignoring\n",
- to_of_node(fwnode));
- ret = -ENOTCONN;
- goto out;
- }
+ ret = rvin_group_get(vin, rvin_csi2_setup_links, &rvin_csi2_media_ops);
+ if (ret)
+ goto err_controls;
- asd = v4l2_async_notifier_add_fwnode_subdev(&vin->group->notifier,
- fwnode,
- struct v4l2_async_subdev);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
- goto out;
- }
+ /* It's OK to not have a parallel subdevice. */
+ ret = rvin_parallel_init(vin);
+ if (ret && ret != -ENODEV)
+ goto err_group;
- vin->group->csi[vep.base.id].asd = asd;
+ ret = rvin_group_notifier_init(vin, 1, RVIN_CSI_MAX);
+ if (ret)
+ goto err_parallel;
- vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
- to_of_node(fwnode), vep.base.id);
-out:
- fwnode_handle_put(fwnode);
+ return 0;
+err_parallel:
+ rvin_parallel_cleanup(vin);
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
return ret;
}
-static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
+/* -----------------------------------------------------------------------------
+ * ISP
+ */
+
+static int rvin_isp_setup_links(struct rvin_dev *vin)
{
- unsigned int count = 0, vin_mask = 0;
- unsigned int i, id;
- int ret;
+ unsigned int i;
+ int ret = -EINVAL;
+ /* Create all media device links between VINs and ISP's. */
mutex_lock(&vin->group->lock);
-
- /* If not all VIN's are registered don't register the notifier. */
for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i]) {
- count++;
- vin_mask |= BIT(i);
- }
- }
+ struct media_pad *source_pad, *sink_pad;
+ struct media_entity *source, *sink;
+ unsigned int source_slot = i / 8;
+ unsigned int source_idx = i % 8 + 1;
- if (vin->group->count != count) {
- mutex_unlock(&vin->group->lock);
- return 0;
- }
+ if (!vin->group->vin[i])
+ continue;
- mutex_unlock(&vin->group->lock);
+ /* Check that ISP is part of the group. */
+ if (!vin->group->remotes[source_slot].subdev)
+ continue;
- v4l2_async_notifier_init(&vin->group->notifier);
+ source = &vin->group->remotes[source_slot].subdev->entity;
+ source_pad = &source->pads[source_idx];
- /*
- * Have all VIN's look for CSI-2 subdevices. Some subdevices will
- * overlap but the parser function can handle it, so each subdevice
- * will only be registered once with the group notifier.
- */
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (!(vin_mask & BIT(i)))
- continue;
+ sink = &vin->group->vin[i]->vdev.entity;
+ sink_pad = &sink->pads[0];
- for (id = 0; id < RVIN_CSI_MAX; id++) {
- if (vin->group->csi[id].asd)
- continue;
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
- ret = rvin_mc_parse_of(vin->group->vin[i], id);
- if (ret)
- return ret;
+ ret = media_create_pad_link(source, source_idx, sink, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ vin_err(vin, "Error adding link from %s to %s\n",
+ source->name, sink->name);
+ break;
}
}
+ mutex_unlock(&vin->group->lock);
- if (list_empty(&vin->group->notifier.asd_list))
- return 0;
-
- vin->group->notifier.ops = &rvin_group_notify_ops;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev,
- &vin->group->notifier);
- if (ret < 0) {
- vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- return ret;
- }
+ return ret;
+}
- return 0;
+static void rvin_isp_cleanup(struct rvin_dev *vin)
+{
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
}
-static int rvin_mc_init(struct rvin_dev *vin)
+static int rvin_isp_init(struct rvin_dev *vin)
{
int ret;
@@ -927,28 +1062,23 @@ static int rvin_mc_init(struct rvin_dev *vin)
if (ret)
return ret;
- ret = rvin_group_get(vin);
- if (ret)
- return ret;
-
- ret = rvin_mc_parse_of_graph(vin);
- if (ret)
- rvin_group_put(vin);
-
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 1);
+ ret = rvin_create_controls(vin, NULL);
if (ret < 0)
return ret;
- v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+ ret = rvin_group_get(vin, rvin_isp_setup_links, NULL);
+ if (ret)
+ goto err_controls;
- if (vin->ctrl_handler.error) {
- ret = vin->ctrl_handler.error;
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
+ ret = rvin_group_notifier_init(vin, 2, RVIN_ISP_MAX);
+ if (ret)
+ goto err_group;
- vin->vdev.ctrl_handler = &vin->ctrl_handler;
+ return 0;
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
return ret;
}
@@ -1325,6 +1455,15 @@ static const struct rvin_info rcar_info_r8a77995 = {
.routes = rcar_info_r8a77995_routes,
};
+static const struct rvin_info rcar_info_r8a779a0 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .use_isp = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+};
+
static const struct of_device_id rvin_of_id_table[] = {
{
.compatible = "renesas,vin-r8a774a1",
@@ -1386,6 +1525,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.compatible = "renesas,vin-r8a77995",
.data = &rcar_info_r8a77995,
},
+ {
+ .compatible = "renesas,vin-r8a779a0",
+ .data = &rcar_info_r8a779a0,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
@@ -1434,38 +1577,22 @@ static int rcar_vin_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vin);
- if (vin->info->use_mc) {
- ret = rvin_mc_init(vin);
- if (ret)
- goto error_dma_unregister;
- }
+ if (vin->info->use_isp)
+ ret = rvin_isp_init(vin);
+ else if (vin->info->use_mc)
+ ret = rvin_csi2_init(vin);
+ else
+ ret = rvin_parallel_init(vin);
- ret = rvin_parallel_init(vin);
- if (ret)
- goto error_group_unregister;
+ if (ret) {
+ rvin_dma_unregister(vin);
+ return ret;
+ }
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
return 0;
-
-error_group_unregister:
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
-
- if (vin->info->use_mc) {
- mutex_lock(&vin->group->lock);
- if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- }
- mutex_unlock(&vin->group->lock);
- rvin_group_put(vin);
- }
-
-error_dma_unregister:
- rvin_dma_unregister(vin);
-
- return ret;
}
static int rcar_vin_remove(struct platform_device *pdev)
@@ -1476,16 +1603,12 @@ static int rcar_vin_remove(struct platform_device *pdev)
rvin_v4l2_unregister(vin);
- v4l2_async_notifier_unregister(&vin->notifier);
- v4l2_async_notifier_cleanup(&vin->notifier);
-
- if (vin->info->use_mc) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- rvin_group_put(vin);
- }
-
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ if (vin->info->use_isp)
+ rvin_isp_cleanup(vin);
+ else if (vin->info->use_mc)
+ rvin_csi2_cleanup(vin);
+ else
+ rvin_parallel_cleanup(vin);
rvin_dma_unregister(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index e28eff039688..11848d0c4a55 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -126,6 +126,12 @@ struct rcar_csi2;
#define PHTW_CWEN BIT(8)
#define PHTW_TESTDIN_CODE(n) ((n & 0xff))
+#define PHYFRX_REG 0x64
+#define PHYFRX_FORCERX_MODE_3 BIT(3)
+#define PHYFRX_FORCERX_MODE_2 BIT(2)
+#define PHYFRX_FORCERX_MODE_1 BIT(1)
+#define PHYFRX_FORCERX_MODE_0 BIT(0)
+
struct phtw_value {
u16 data;
u16 code;
@@ -136,6 +142,31 @@ struct rcsi2_mbps_reg {
u16 reg;
};
+static const struct rcsi2_mbps_reg phtw_mbps_v3u[] = {
+ { .mbps = 1500, .reg = 0xcc },
+ { .mbps = 1550, .reg = 0x1d },
+ { .mbps = 1600, .reg = 0x27 },
+ { .mbps = 1650, .reg = 0x30 },
+ { .mbps = 1700, .reg = 0x39 },
+ { .mbps = 1750, .reg = 0x42 },
+ { .mbps = 1800, .reg = 0x4b },
+ { .mbps = 1850, .reg = 0x55 },
+ { .mbps = 1900, .reg = 0x5e },
+ { .mbps = 1950, .reg = 0x67 },
+ { .mbps = 2000, .reg = 0x71 },
+ { .mbps = 2050, .reg = 0x79 },
+ { .mbps = 2100, .reg = 0x83 },
+ { .mbps = 2150, .reg = 0x8c },
+ { .mbps = 2200, .reg = 0x95 },
+ { .mbps = 2250, .reg = 0x9e },
+ { .mbps = 2300, .reg = 0xa7 },
+ { .mbps = 2350, .reg = 0xb0 },
+ { .mbps = 2400, .reg = 0xba },
+ { .mbps = 2450, .reg = 0xc3 },
+ { .mbps = 2500, .reg = 0xcc },
+ { /* sentinel */ },
+};
+
static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
{ .mbps = 80, .reg = 0x86 },
{ .mbps = 90, .reg = 0x86 },
@@ -200,6 +231,72 @@ static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
#define PHYPLL_REG 0x68
#define PHYPLL_HSFREQRANGE(n) ((n) << 16)
+static const struct rcsi2_mbps_reg hsfreqrange_v3u[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x25 },
+ { .mbps = 350, .reg = 0x35 },
+ { .mbps = 400, .reg = 0x05 },
+ { .mbps = 450, .reg = 0x16 },
+ { .mbps = 500, .reg = 0x26 },
+ { .mbps = 550, .reg = 0x37 },
+ { .mbps = 600, .reg = 0x07 },
+ { .mbps = 650, .reg = 0x18 },
+ { .mbps = 700, .reg = 0x28 },
+ { .mbps = 750, .reg = 0x39 },
+ { .mbps = 800, .reg = 0x09 },
+ { .mbps = 850, .reg = 0x19 },
+ { .mbps = 900, .reg = 0x29 },
+ { .mbps = 950, .reg = 0x3a },
+ { .mbps = 1000, .reg = 0x0a },
+ { .mbps = 1050, .reg = 0x1a },
+ { .mbps = 1100, .reg = 0x2a },
+ { .mbps = 1150, .reg = 0x3b },
+ { .mbps = 1200, .reg = 0x0b },
+ { .mbps = 1250, .reg = 0x1b },
+ { .mbps = 1300, .reg = 0x2b },
+ { .mbps = 1350, .reg = 0x3c },
+ { .mbps = 1400, .reg = 0x0c },
+ { .mbps = 1450, .reg = 0x1c },
+ { .mbps = 1500, .reg = 0x2c },
+ { .mbps = 1550, .reg = 0x3d },
+ { .mbps = 1600, .reg = 0x0d },
+ { .mbps = 1650, .reg = 0x1d },
+ { .mbps = 1700, .reg = 0x2e },
+ { .mbps = 1750, .reg = 0x3e },
+ { .mbps = 1800, .reg = 0x0e },
+ { .mbps = 1850, .reg = 0x1e },
+ { .mbps = 1900, .reg = 0x2f },
+ { .mbps = 1950, .reg = 0x3f },
+ { .mbps = 2000, .reg = 0x0f },
+ { .mbps = 2050, .reg = 0x40 },
+ { .mbps = 2100, .reg = 0x41 },
+ { .mbps = 2150, .reg = 0x42 },
+ { .mbps = 2200, .reg = 0x43 },
+ { .mbps = 2300, .reg = 0x45 },
+ { .mbps = 2350, .reg = 0x46 },
+ { .mbps = 2400, .reg = 0x47 },
+ { .mbps = 2450, .reg = 0x48 },
+ { .mbps = 2500, .reg = 0x49 },
+ { /* sentinel */ },
+};
+
static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = {
{ .mbps = 80, .reg = 0x00 },
{ .mbps = 90, .reg = 0x10 },
@@ -355,6 +452,7 @@ struct rcar_csi2_info {
unsigned int csi0clkfreqrange;
unsigned int num_channels;
bool clear_ulps;
+ bool use_isp;
};
struct rcar_csi2 {
@@ -370,9 +468,8 @@ struct rcar_csi2 {
struct v4l2_subdev *remote;
unsigned int remote_pad;
+ struct mutex lock; /* Protects mf and stream_count. */
struct v4l2_mbus_framefmt mf;
-
- struct mutex lock;
int stream_count;
unsigned short lanes;
@@ -553,6 +650,8 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
/* Code is validated in set_fmt. */
format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
/*
* Enable all supported CSI-2 channels with virtual channel and
@@ -609,9 +708,12 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHTC_REG, 0);
/* Configure */
- rcsi2_write(priv, VCDT_REG, vcdt);
- if (vcdt2)
- rcsi2_write(priv, VCDT2_REG, vcdt2);
+ if (!priv->info->use_isp) {
+ rcsi2_write(priv, VCDT_REG, vcdt);
+ if (vcdt2)
+ rcsi2_write(priv, VCDT2_REG, vcdt2);
+ }
+
/* Lanes are zero indexed. */
rcsi2_write(priv, LSWAP_REG,
LSWAP_L0SEL(priv->lane_swap[0] - 1) |
@@ -636,6 +738,11 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, CSI0CLKFCPR_REG,
CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange));
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG,
+ PHYFRX_FORCERX_MODE_3 | PHYFRX_FORCERX_MODE_2 |
+ PHYFRX_FORCERX_MODE_1 | PHYFRX_FORCERX_MODE_0);
+
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
@@ -647,6 +754,9 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
if (ret)
return ret;
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG, 0);
+
/* Run post PHY start initialization, if needed. */
if (priv->info->phy_post_init) {
ret = priv->info->phy_post_init(priv);
@@ -725,6 +835,8 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
struct rcar_csi2 *priv = sd_to_csi2(sd);
struct v4l2_mbus_framefmt *framefmt;
+ mutex_lock(&priv->lock);
+
if (!rcsi2_code_to_fmt(format->format.code))
format->format.code = rcar_csi2_formats[0].code;
@@ -735,6 +847,8 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
*framefmt = format->format;
}
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -744,11 +858,15 @@ static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
+ mutex_lock(&priv->lock);
+
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = priv->mf;
else
format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -917,19 +1035,18 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
priv->notifier.ops = &rcar_csi2_notify_ops;
- asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
- ret = v4l2_async_subdev_notifier_register(&priv->subdev,
- &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->subdev, &priv->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return ret;
}
@@ -1063,6 +1180,62 @@ static int rcsi2_phy_post_init_v3m_e3(struct rcar_csi2 *priv)
return rcsi2_phtw_write_array(priv, step1);
}
+static int rcsi2_init_phtw_v3u(struct rcar_csi2 *priv,
+ unsigned int mbps)
+{
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step1[] = {
+ { .data = 0xcc, .code = 0xe2 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step2[] = {
+ { .data = 0x01, .code = 0xe3 },
+ { .data = 0x11, .code = 0xe4 },
+ { .data = 0x01, .code = 0xe5 },
+ { /* sentinel */ },
+ };
+
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step3[] = {
+ { .data = 0x38, .code = 0x08 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step4[] = {
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x4b, .code = 0xac },
+ { .data = 0x03, .code = 0x00 },
+ { .data = 0x80, .code = 0x07 },
+ { /* sentinel */ },
+ };
+
+ int ret;
+
+ if (mbps != 0 && mbps <= 1500)
+ ret = rcsi2_phtw_write_array(priv, step1);
+ else
+ ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3u, 0xe2);
+ if (ret)
+ return ret;
+
+ ret = rcsi2_phtw_write_array(priv, step2);
+ if (ret)
+ return ret;
+
+ if (mbps != 0 && mbps <= 1500) {
+ ret = rcsi2_phtw_write_array(priv, step3);
+ if (ret)
+ return ret;
+ }
+
+ ret = rcsi2_phtw_write_array(priv, step4);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
/* -----------------------------------------------------------------------------
* Platform Device Driver.
*/
@@ -1074,11 +1247,9 @@ static const struct media_entity_operations rcar_csi2_entity_ops = {
static int rcsi2_probe_resources(struct rcar_csi2 *priv,
struct platform_device *pdev)
{
- struct resource *res;
int irq, ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1155,6 +1326,14 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
.num_channels = 2,
};
+static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = {
+ .init_phtw = rcsi2_init_phtw_v3u,
+ .hsfreqrange = hsfreqrange_v3u,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+ .use_isp = true,
+};
+
static const struct of_device_id rcar_csi2_of_table[] = {
{
.compatible = "renesas,r8a774a1-csi2",
@@ -1200,6 +1379,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.compatible = "renesas,r8a77990-csi2",
.data = &rcar_csi2_info_r8a77990,
},
+ {
+ .compatible = "renesas,r8a779a0-csi2",
+ .data = &rcar_csi2_info_r8a779a0,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
@@ -1220,7 +1403,7 @@ static int rcsi2_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_csi2 *priv;
- unsigned int i;
+ unsigned int i, num_pads;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -1245,14 +1428,14 @@ static int rcsi2_probe(struct platform_device *pdev)
ret = rcsi2_probe_resources(priv, pdev);
if (ret) {
dev_err(priv->dev, "Failed to get resources\n");
- return ret;
+ goto error_mutex;
}
platform_set_drvdata(pdev, priv);
ret = rcsi2_parse_dt(priv);
if (ret)
- return ret;
+ goto error_mutex;
priv->subdev.owner = THIS_MODULE;
priv->subdev.dev = &pdev->dev;
@@ -1265,28 +1448,32 @@ static int rcsi2_probe(struct platform_device *pdev)
priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
priv->subdev.entity.ops = &rcar_csi2_entity_ops;
+ num_pads = priv->info->use_isp ? 2 : NR_OF_RCAR_CSI2_PAD;
+
priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
- for (i = RCAR_CSI2_SOURCE_VC0; i < NR_OF_RCAR_CSI2_PAD; i++)
+ for (i = RCAR_CSI2_SOURCE_VC0; i < num_pads; i++)
priv->pads[i].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&priv->subdev.entity, NR_OF_RCAR_CSI2_PAD,
+ ret = media_entity_pads_init(&priv->subdev.entity, num_pads,
priv->pads);
if (ret)
- goto error;
+ goto error_async;
pm_runtime_enable(&pdev->dev);
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret < 0)
- goto error;
+ goto error_async;
dev_info(priv->dev, "%d lanes found\n", priv->lanes);
return 0;
-error:
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+error_async:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+error_mutex:
+ mutex_destroy(&priv->lock);
return ret;
}
@@ -1295,12 +1482,14 @@ static int rcsi2_remove(struct platform_device *pdev)
{
struct rcar_csi2 *priv = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
+ mutex_destroy(&priv->lock);
+
return 0;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index f5f722ab1d4e..25ead9333d00 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -114,6 +114,7 @@
/* Video n Data Mode Register bits */
#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
#define VNDMR_A8BIT_MASK (0xff << 24)
+#define VNDMR_YMODE_Y8 (1 << 12)
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
#define VNDMR_ABIT (1 << 2)
@@ -603,6 +604,7 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
stride /= 2;
break;
default:
@@ -695,6 +697,7 @@ static int rvin_setup(struct rvin_dev *vin)
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
vnmc |= VNMC_INF_RAW8;
break;
default:
@@ -774,6 +777,14 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_PIX_FMT_SRGGB8:
dmr = 0;
break;
+ case V4L2_PIX_FMT_GREY:
+ if (input_is_yuv) {
+ dmr = VNDMR_DTMD_YCSEP | VNDMR_YMODE_Y8;
+ output_is_yuv = true;
+ } else {
+ dmr = 0;
+ }
+ break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
vin->format.pixelformat);
@@ -783,16 +794,18 @@ static int rvin_setup(struct rvin_dev *vin)
/* Always update on field change */
vnmc |= VNMC_VUP;
- /* If input and output use the same colorspace, use bypass mode */
- if (input_is_yuv == output_is_yuv)
- vnmc |= VNMC_BPS;
-
- if (vin->info->model == RCAR_GEN3) {
- /* Select between CSI-2 and parallel input */
- if (vin->is_csi)
- vnmc &= ~VNMC_DPINE;
- else
- vnmc |= VNMC_DPINE;
+ if (!vin->info->use_isp) {
+ /* If input and output use the same colorspace, use bypass mode */
+ if (input_is_yuv == output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ if (vin->info->model == RCAR_GEN3) {
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
+ vnmc &= ~VNMC_DPINE;
+ else
+ vnmc |= VNMC_DPINE;
+ }
}
/* Progressive or interlaced mode */
@@ -904,7 +917,8 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
vin->format.sizeimage / 2;
break;
}
- } else if (vin->state != RUNNING || list_empty(&vin->buf_list)) {
+ } else if ((vin->state != STOPPED && vin->state != RUNNING) ||
+ list_empty(&vin->buf_list)) {
vin->buf_hw[slot].buffer = NULL;
vin->buf_hw[slot].type = FULL;
phys_addr = vin->scratch_phys;
@@ -1145,6 +1159,10 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
if (vin->format.pixelformat != V4L2_PIX_FMT_SRGGB8)
return -EPIPE;
break;
+ case MEDIA_BUS_FMT_Y8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_GREY)
+ return -EPIPE;
+ break;
default:
return -EPIPE;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 0d141155f0e3..a5bfa76fdac6 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -82,6 +82,10 @@ static const struct rvin_video_format rvin_formats[] = {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.bpp = 1,
},
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .bpp = 1,
+ },
};
const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
@@ -523,6 +527,24 @@ static int rvin_s_selection(struct file *file, void *fh,
return 0;
}
+static int rvin_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_g_parm_cap(&vin->vdev, sd, parm);
+}
+
+static int rvin_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_s_parm_cap(&vin->vdev, sd, parm);
+}
+
static int rvin_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
@@ -739,6 +761,9 @@ static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
.vidioc_g_selection = rvin_g_selection,
.vidioc_s_selection = rvin_s_selection,
+ .vidioc_g_parm = rvin_g_parm,
+ .vidioc_s_parm = rvin_s_parm,
+
.vidioc_g_pixelaspect = rvin_g_pixelaspect,
.vidioc_enum_input = rvin_enum_input,
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index b263ead4db2b..6c06320174a2 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -29,7 +29,7 @@
#define HW_BUFFER_MASK 0x7f
/* Max number on VIN instances that can be in a system */
-#define RCAR_VIN_NUM 8
+#define RCAR_VIN_NUM 32
struct rvin_group;
@@ -48,6 +48,18 @@ enum rvin_csi_id {
RVIN_CSI_MAX,
};
+enum rvin_isp_id {
+ RVIN_ISP0,
+ RVIN_ISP1,
+ RVIN_ISP2,
+ RVIN_ISP4,
+ RVIN_ISP_MAX,
+};
+
+#define RVIN_REMOTES_MAX \
+ (((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+ RVIN_CSI_MAX : RVIN_ISP_MAX)
+
/**
* enum rvin_dma_state - DMA states
* @STOPPED: No operation in progress
@@ -147,6 +159,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @use_isp: the VIN is connected to the ISP and not to the CSI-2
* @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
@@ -156,6 +169,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool use_isp;
bool nv12;
unsigned int max_width;
@@ -267,8 +281,9 @@ struct rvin_dev {
* @count: number of enabled VIN instances found in DT
* @notifier: group notifier for CSI-2 async subdevices
* @vin: VIN instances which are part of the group
- * @csi: array of pairs of fwnode and subdev pointers
- * to all CSI-2 subdevices.
+ * @link_setup: Callback to create all links for the media graph
+ * @remotes: array of pairs of fwnode and subdev pointers
+ * to all remote subdevices.
*/
struct rvin_group {
struct kref refcount;
@@ -280,10 +295,12 @@ struct rvin_group {
struct v4l2_async_notifier notifier;
struct rvin_dev *vin[RCAR_VIN_NUM];
+ int (*link_setup)(struct rvin_dev *vin);
+
struct {
struct v4l2_async_subdev *asd;
struct v4l2_subdev *subdev;
- } csi[RVIN_CSI_MAX];
+ } remotes[RVIN_REMOTES_MAX];
};
int rvin_dma_register(struct rvin_dev *vin, int irq);
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 1e3b68a8743a..9a0982fa5c6b 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1212,7 +1212,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
struct fwnode_handle *fwnode, *ep;
struct v4l2_async_subdev *asd;
- v4l2_async_notifier_init(notifier);
+ v4l2_async_nf_init(notifier);
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
NULL);
@@ -1229,8 +1229,8 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
return -EINVAL;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
@@ -1346,7 +1346,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
- ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
+ ret = v4l2_async_nf_register(&sdr->v4l2_dev, &sdr->notifier);
if (ret < 0) {
dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
goto cleanup;
@@ -1355,7 +1355,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
return ret;
cleanup:
- v4l2_async_notifier_cleanup(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
error:
v4l2_device_unregister(&sdr->v4l2_dev);
@@ -1365,8 +1365,8 @@ error:
/* V4L2 SDR device remove */
static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
{
- v4l2_async_notifier_unregister(&sdr->notifier);
- v4l2_async_notifier_cleanup(&sdr->notifier);
+ v4l2_async_nf_unregister(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
v4l2_device_unregister(&sdr->v4l2_dev);
}
@@ -1395,8 +1395,7 @@ static int rcar_drif_probe(struct platform_device *pdev)
}
/* Register map */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ch->base = devm_ioremap_resource(&pdev->dev, res);
+ ch->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ch->base))
return PTR_ERR(ch->base);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 89aac60066d9..19de3c19bcca 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2256,7 +2256,6 @@ static int fdp1_probe(struct platform_device *pdev)
struct fdp1_dev *fdp1;
struct video_device *vfd;
struct device_node *fcp_node;
- struct resource *res;
struct clk *clk;
unsigned int i;
@@ -2283,8 +2282,7 @@ static int fdp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdp1);
/* Memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fdp1->regs))
return PTR_ERR(fdp1->regs);
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index f57158bf2b11..56bb464629ed 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1590,7 +1590,6 @@ MODULE_DEVICE_TABLE(of, jpu_dt_ids);
static int jpu_probe(struct platform_device *pdev)
{
struct jpu *jpu;
- struct resource *res;
int ret;
unsigned int i;
@@ -1603,8 +1602,7 @@ static int jpu_probe(struct platform_device *pdev)
jpu->dev = &pdev->dev;
/* memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpu->regs = devm_ioremap_resource(&pdev->dev, res);
+ jpu->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpu->regs))
return PTR_ERR(jpu->regs);
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index f432032c7084..2e8dbacc414e 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -1513,12 +1513,12 @@ static int ceu_parse_platform_data(struct ceu_device *ceudev,
/* Setup the ceu subdevice and the async subdevice. */
async_sd = &pdata->subdevs[i];
- ceu_sd = v4l2_async_notifier_add_i2c_subdev(&ceudev->notifier,
- async_sd->i2c_adapter_id,
- async_sd->i2c_address,
- struct ceu_subdev);
+ ceu_sd = v4l2_async_nf_add_i2c(&ceudev->notifier,
+ async_sd->i2c_adapter_id,
+ async_sd->i2c_address,
+ struct ceu_subdev);
if (IS_ERR(ceu_sd)) {
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
return PTR_ERR(ceu_sd);
}
ceu_sd->mbus_flags = async_sd->flags;
@@ -1576,9 +1576,9 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
}
/* Setup the ceu subdevice and the async subdevice. */
- ceu_sd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &ceudev->notifier, of_fwnode_handle(ep),
- struct ceu_subdev);
+ ceu_sd = v4l2_async_nf_add_fwnode_remote(&ceudev->notifier,
+ of_fwnode_handle(ep),
+ struct ceu_subdev);
if (IS_ERR(ceu_sd)) {
ret = PTR_ERR(ceu_sd);
goto error_cleanup;
@@ -1592,7 +1592,7 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
return num_ep;
error_cleanup:
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
of_node_put(ep);
return ret;
}
@@ -1628,7 +1628,6 @@ static int ceu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ceu_data *ceu_data;
struct ceu_device *ceudev;
- struct resource *res;
unsigned int irq;
int num_subdevs;
int ret;
@@ -1644,8 +1643,7 @@ static int ceu_probe(struct platform_device *pdev)
spin_lock_init(&ceudev->lock);
mutex_init(&ceudev->mlock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ceudev->base = devm_ioremap_resource(dev, res);
+ ceudev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ceudev->base)) {
ret = PTR_ERR(ceudev->base);
goto error_free_ceudev;
@@ -1669,7 +1667,7 @@ static int ceu_probe(struct platform_device *pdev)
if (ret)
goto error_pm_disable;
- v4l2_async_notifier_init(&ceudev->notifier);
+ v4l2_async_nf_init(&ceudev->notifier);
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
ceu_data = of_device_get_match_data(dev);
@@ -1691,8 +1689,7 @@ static int ceu_probe(struct platform_device *pdev)
ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev;
ceudev->notifier.ops = &ceu_notify_ops;
- ret = v4l2_async_notifier_register(&ceudev->v4l2_dev,
- &ceudev->notifier);
+ ret = v4l2_async_nf_register(&ceudev->v4l2_dev, &ceudev->notifier);
if (ret)
goto error_cleanup;
@@ -1701,7 +1698,7 @@ static int ceu_probe(struct platform_device *pdev)
return 0;
error_cleanup:
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
error_v4l2_unregister:
v4l2_device_unregister(&ceudev->v4l2_dev);
error_pm_disable:
@@ -1718,9 +1715,9 @@ static int ceu_remove(struct platform_device *pdev)
pm_runtime_disable(ceudev->dev);
- v4l2_async_notifier_unregister(&ceudev->notifier);
+ v4l2_async_nf_unregister(&ceudev->notifier);
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
v4l2_device_unregister(&ceudev->v4l2_dev);
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 6759091b15e0..4de5e8d2b261 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -800,7 +800,6 @@ static int rga_probe(struct platform_device *pdev)
{
struct rockchip_rga *rga;
struct video_device *vfd;
- struct resource *res;
int ret = 0;
int irq;
@@ -821,9 +820,7 @@ static int rga_probe(struct platform_device *pdev)
pm_runtime_enable(rga->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- rga->regs = devm_ioremap_resource(rga->dev, res);
+ rga->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rga->regs)) {
ret = PTR_ERR(rga->regs);
goto err_put_clk;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 41988eb0ec0a..768987d5f2dd 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -685,12 +685,17 @@ static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
spin_unlock(&cap->buf.lock);
}
-void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
unsigned int i;
u32 status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+ if (!status)
+ return IRQ_NONE;
+
rkisp1_write(rkisp1, status, RKISP1_CIF_MI_ICR);
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
@@ -718,6 +723,8 @@ void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
cap->is_streaming = false;
wake_up(&cap->done);
}
+
+ return IRQ_HANDLED;
}
/* ----------------------------------------------------------------------------
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index bb73f4e17b66..d8fa3f1a5a85 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -12,6 +12,7 @@
#define _RKISP1_COMMON_H
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/rkisp1-config.h>
#include <media/media-device.h>
@@ -231,6 +232,16 @@ struct rkisp1_capture {
} pix;
};
+struct rkisp1_stats;
+struct rkisp1_stats_ops {
+ void (*get_awb_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+ void (*get_aec_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+ void (*get_hst_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+};
+
/*
* struct rkisp1_stats - ISP Statistics device
*
@@ -243,17 +254,42 @@ struct rkisp1_capture {
struct rkisp1_stats {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
+ const struct rkisp1_stats_ops *ops;
spinlock_t lock; /* locks the buffers list 'stats' */
struct list_head stat;
struct v4l2_format vdev_fmt;
};
+struct rkisp1_params;
+struct rkisp1_params_ops {
+ void (*lsc_matrix_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig);
+ void (*goc_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg);
+ void (*awb_meas_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg);
+ void (*awb_meas_enable)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en);
+ void (*awb_gain_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg);
+ void (*aec_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg);
+ void (*hst_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg);
+ void (*hst_enable)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en);
+ void (*afm_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg);
+};
+
/*
* struct rkisp1_params - ISP input parameters device
*
* @vnode: video node
* @rkisp1: pointer to the rkisp1 device
+ * @ops: pointer to the variant-specific operations
* @config_lock: locks the buffer list 'params'
* @params: queue of rkisp1_buffer
* @vdev_fmt: v4l2_format of the metadata format
@@ -263,6 +299,7 @@ struct rkisp1_stats {
struct rkisp1_params {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
+ const struct rkisp1_params_ops *ops;
spinlock_t config_lock; /* locks the buffers list 'params' */
struct list_head params;
@@ -348,7 +385,6 @@ struct rkisp1_debug {
*/
struct rkisp1_device {
void __iomem *base_addr;
- int irq;
struct device *dev;
unsigned int clk_size;
struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
@@ -456,9 +492,9 @@ void rkisp1_params_configure(struct rkisp1_params *params,
void rkisp1_params_disable(struct rkisp1_params *params);
/* irq handlers */
-void rkisp1_isp_isr(struct rkisp1_device *rkisp1);
-void rkisp1_mipi_isr(struct rkisp1_device *rkisp1);
-void rkisp1_capture_isr(struct rkisp1_device *rkisp1);
+irqreturn_t rkisp1_isp_isr(int irq, void *ctx);
+irqreturn_t rkisp1_mipi_isr(int irq, void *ctx);
+irqreturn_t rkisp1_capture_isr(int irq, void *ctx);
void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
void rkisp1_params_isr(struct rkisp1_device *rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 7474150b94ed..50b166c49a03 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -101,9 +101,16 @@
* +-----------+ +-----------+
*/
+struct rkisp1_isr_data {
+ const char *name;
+ irqreturn_t (*isr)(int irq, void *ctx);
+};
+
struct rkisp1_match_data {
const char * const *clks;
- unsigned int size;
+ unsigned int clk_size;
+ const struct rkisp1_isr_data *isrs;
+ unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
};
@@ -246,7 +253,7 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
unsigned int next_id = 0;
int ret;
- v4l2_async_notifier_init(ntf);
+ v4l2_async_nf_init(ntf);
while (1) {
struct v4l2_fwnode_endpoint vep = {
@@ -265,8 +272,9 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
if (ret)
goto err_parse;
- rk_asd = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
- struct rkisp1_sensor_async);
+ rk_asd = v4l2_async_nf_add_fwnode_remote(ntf, ep,
+ struct
+ rkisp1_sensor_async);
if (IS_ERR(rk_asd)) {
ret = PTR_ERR(rk_asd);
goto err_parse;
@@ -286,16 +294,16 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
continue;
err_parse:
fwnode_handle_put(ep);
- v4l2_async_notifier_cleanup(ntf);
+ v4l2_async_nf_cleanup(ntf);
return ret;
}
if (next_id == 0)
dev_dbg(rkisp1->dev, "no remote subdevice found\n");
ntf->ops = &rkisp1_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+ ret = v4l2_async_nf_register(&rkisp1->v4l2_dev, ntf);
if (ret) {
- v4l2_async_notifier_cleanup(ntf);
+ v4l2_async_nf_cleanup(ntf);
return ret;
}
return 0;
@@ -385,36 +393,64 @@ err_unreg_isp_subdev:
static irqreturn_t rkisp1_isr(int irq, void *ctx)
{
- struct device *dev = ctx;
- struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
-
/*
* Call rkisp1_capture_isr() first to handle the frame that
* potentially completed using the current frame_sequence number before
* it is potentially incremented by rkisp1_isp_isr() in the vertical
* sync.
*/
- rkisp1_capture_isr(rkisp1);
- rkisp1_isp_isr(rkisp1);
- rkisp1_mipi_isr(rkisp1);
+ rkisp1_capture_isr(irq, ctx);
+ rkisp1_isp_isr(irq, ctx);
+ rkisp1_mipi_isr(irq, ctx);
return IRQ_HANDLED;
}
+static const char * const px30_isp_clks[] = {
+ "isp",
+ "aclk",
+ "hclk",
+ "pclk",
+};
+
+static const struct rkisp1_isr_data px30_isp_isrs[] = {
+ { "isp", rkisp1_isp_isr },
+ { "mi", rkisp1_capture_isr },
+ { "mipi", rkisp1_mipi_isr },
+};
+
+static const struct rkisp1_match_data px30_isp_match_data = {
+ .clks = px30_isp_clks,
+ .clk_size = ARRAY_SIZE(px30_isp_clks),
+ .isrs = px30_isp_isrs,
+ .isr_size = ARRAY_SIZE(px30_isp_isrs),
+ .isp_ver = RKISP1_V12,
+};
+
static const char * const rk3399_isp_clks[] = {
"isp",
"aclk",
"hclk",
};
+static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
+ { NULL, rkisp1_isr },
+};
+
static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
- .size = ARRAY_SIZE(rk3399_isp_clks),
+ .clk_size = ARRAY_SIZE(rk3399_isp_clks),
+ .isrs = rk3399_isp_isrs,
+ .isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
+ .compatible = "rockchip,px30-cif-isp",
+ .data = &px30_isp_match_data,
+ },
+ {
.compatible = "rockchip,rk3399-cif-isp",
.data = &rk3399_isp_match_data,
},
@@ -478,25 +514,27 @@ static int rkisp1_probe(struct platform_device *pdev)
if (IS_ERR(rkisp1->base_addr))
return PTR_ERR(rkisp1->base_addr);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(dev, irq, rkisp1_isr, IRQF_SHARED,
- dev_driver_string(dev), dev);
- if (ret) {
- dev_err(dev, "request irq failed: %d\n", ret);
- return ret;
+ for (i = 0; i < match_data->isr_size; i++) {
+ irq = (match_data->isrs[i].name) ?
+ platform_get_irq_byname(pdev, match_data->isrs[i].name) :
+ platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, match_data->isrs[i].isr, IRQF_SHARED,
+ dev_driver_string(dev), dev);
+ if (ret) {
+ dev_err(dev, "request irq failed: %d\n", ret);
+ return ret;
+ }
}
- rkisp1->irq = irq;
-
- for (i = 0; i < match_data->size; i++)
+ for (i = 0; i < match_data->clk_size; i++)
rkisp1->clks[i].id = match_data->clks[i];
- ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
+ ret = devm_clk_bulk_get(dev, match_data->clk_size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = match_data->size;
+ rkisp1->clk_size = match_data->clk_size;
pm_runtime_enable(&pdev->dev);
@@ -542,8 +580,8 @@ static int rkisp1_remove(struct platform_device *pdev)
{
struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&rkisp1->notifier);
- v4l2_async_notifier_cleanup(&rkisp1->notifier);
+ v4l2_async_nf_unregister(&rkisp1->notifier);
+ v4l2_async_nf_cleanup(&rkisp1->notifier);
rkisp1_params_unregister(rkisp1);
rkisp1_stats_unregister(rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index d596bc040005..2a35bf24e54e 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -414,6 +414,10 @@ static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, mipi_ctrl, RKISP1_CIF_MIPI_CTRL);
+ /* V12 could also use a newer csi2-host, but we don't want that yet */
+ if (rkisp1->media_dev.hw_revision == RKISP1_V12)
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_CSI0_CTRL0);
+
/* Configure Data Type and Virtual Channel */
rkisp1_write(rkisp1,
RKISP1_CIF_MIPI_DATA_SEL_DT(sink_fmt->mipi_dt) |
@@ -533,6 +537,15 @@ static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
RKISP1_CIF_ICCL_DCROP_CLK;
rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
+
+ /* ensure sp and mp can run at the same time in V12 */
+ if (rkisp1->media_dev.hw_revision == RKISP1_V12) {
+ val = RKISP1_CIF_CLK_CTRL_MI_Y12 | RKISP1_CIF_CLK_CTRL_MI_SP |
+ RKISP1_CIF_CLK_CTRL_MI_RAW0 | RKISP1_CIF_CLK_CTRL_MI_RAW1 |
+ RKISP1_CIF_CLK_CTRL_MI_READ | RKISP1_CIF_CLK_CTRL_MI_RAWRD |
+ RKISP1_CIF_CLK_CTRL_CP | RKISP1_CIF_CLK_CTRL_IE;
+ rkisp1_write(rkisp1, val, RKISP1_CIF_VI_ISP_CLK_CTRL_V12);
+ }
}
static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
@@ -1106,13 +1119,15 @@ void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
* Interrupt handlers
*/
-void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_mipi_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
- return;
+ return IRQ_NONE;
rkisp1_write(rkisp1, status, RKISP1_CIF_MIPI_ICR);
@@ -1147,6 +1162,8 @@ void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
} else {
rkisp1->debug.mipi_error++;
}
+
+ return IRQ_HANDLED;
}
static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
@@ -1159,13 +1176,15 @@ static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
v4l2_event_queue(isp->sd.devnode, &event);
}
-void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
- return;
+ return IRQ_NONE;
rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
@@ -1207,4 +1226,6 @@ void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
*/
rkisp1_params_isr(rkisp1);
}
+
+ return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 8fa5b0abf1f9..8f62f09e635f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -185,8 +185,8 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
/* ISP LS correction interface function */
static void
-rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_lsc_config *pconfig)
+rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
{
unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
@@ -212,39 +212,111 @@ rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
* DWORDs (2nd value of last DWORD unused)
*/
for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j],
- pconfig->r_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j],
- pconfig->gr_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j],
- pconfig->gb_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j],
- pconfig->b_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+ isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_1;
+ rkisp1_write(params->rkisp1, isp_lsc_table_sel,
+ RKISP1_CIF_ISP_LSC_TABLE_SEL);
+}
+
+static void
+rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
+{
+ unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+
+ isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+
+ /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
+ sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+
+ /* program data tables (table size is 9 * 17 = 153) */
+ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
+ /*
+ * 17 sectors with 2 values in one DWORD = 9
+ * DWORDs (2nd value of last DWORD unused)
+ */
+ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
@@ -265,7 +337,7 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
- rkisp1_lsc_correct_matrix_config(params, arg);
+ params->ops->lsc_matrix_config(params, arg);
for (i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE / 2; i++) {
/* program x size tables */
@@ -382,18 +454,37 @@ static void rkisp1_sdg_config(struct rkisp1_params *params,
}
/* ISP GAMMA correction interface function */
-static void rkisp1_goc_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_goc_config *arg)
+static void rkisp1_goc_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
{
unsigned int i;
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
- rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 + i * 4);
+}
+
+static void rkisp1_goc_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
+{
+ unsigned int i;
+ u32 value;
+
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12);
+
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 / 2; i++) {
+ value = RKISP1_CIF_ISP_GAMMA_VALUE_V12(
+ arg->gamma_y[2 * i + 1],
+ arg->gamma_y[2 * i]);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 + i * 4);
+ }
}
/* ISP Cross Talk */
@@ -433,8 +524,8 @@ static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
}
/* ISP White Balance Mode */
-static void rkisp1_awb_meas_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_meas_config *arg)
+static void rkisp1_awb_meas_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
{
u32 reg_val = 0;
/* based on the mode,configure the awb module */
@@ -442,43 +533,111 @@ static void rkisp1_awb_meas_config(struct rkisp1_params *params,
/* Reference Cb and Cr */
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
- arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF);
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V10);
/* Yc Threshold */
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
- arg->min_c, RKISP1_CIF_ISP_AWB_THRESH);
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V10);
}
- reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
if (arg->enable_ymax_cmp)
reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
else
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
/* window offset */
rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS);
+ arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10);
rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS);
+ arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10);
/* AWB window size */
rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE);
+ arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10);
rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE);
+ arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10);
/* Number of frames */
rkisp1_write(params->rkisp1,
- arg->frames, RKISP1_CIF_ISP_AWB_FRAMES);
+ arg->frames, RKISP1_CIF_ISP_AWB_FRAMES_V10);
+}
+
+static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
+{
+ u32 reg_val = 0;
+ /* based on the mode,configure the awb module */
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
+ /* Reference Cb and Cr */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V12);
+ /* Yc Threshold */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
+ RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
+ RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V12);
+ }
+
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
+ if (arg->enable_ymax_cmp)
+ reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ else
+ reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ reg_val &= ~RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12;
+ reg_val |= RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(arg->frames);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
+
+ /* window offset */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_offs << 16 |
+ arg->awb_wnd.h_offs,
+ RKISP1_CIF_ISP_AWB_OFFS_V12);
+ /* AWB window size */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_size << 16 |
+ arg->awb_wnd.h_size,
+ RKISP1_CIF_ISP_AWB_SIZE_V12);
+}
+
+static void
+rkisp1_awb_meas_enable_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
+{
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
+
+ /* switch off */
+ reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
+
+ if (en) {
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
+ else
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
+
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+
+ /* Measurements require AWB block be active. */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ } else {
+ rkisp1_write(params->rkisp1,
+ reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ }
}
static void
-rkisp1_awb_meas_enable(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_meas_config *arg,
- bool en)
+rkisp1_awb_meas_enable_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
{
- u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
/* switch off */
reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
@@ -489,34 +648,47 @@ rkisp1_awb_meas_enable(struct rkisp1_params *params,
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
rkisp1_write(params->rkisp1,
- reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
}
static void
-rkisp1_awb_gain_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_gain_config *arg)
+rkisp1_awb_gain_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
+{
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V10);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V10);
+}
+
+static void
+rkisp1_awb_gain_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
{
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
- arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G);
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V12);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
- arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB);
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V12);
}
-static void rkisp1_aec_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_aec_config *arg)
+static void rkisp1_aec_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
{
unsigned int block_hsize, block_vsize;
u32 exp_ctrl;
@@ -531,21 +703,53 @@ static void rkisp1_aec_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
rkisp1_write(params->rkisp1,
- arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET);
+ arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET_V10);
rkisp1_write(params->rkisp1,
- arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET);
+ arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET_V10);
block_hsize = arg->meas_window.h_size /
- RKISP1_CIF_ISP_EXP_COLUMN_NUM - 1;
+ RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size /
- RKISP1_CIF_ISP_EXP_ROW_NUM - 1;
+ RKISP1_CIF_ISP_EXP_ROW_NUM_V10 - 1;
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_H_SIZE_SET(block_hsize),
- RKISP1_CIF_ISP_EXP_H_SIZE);
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(block_hsize),
+ RKISP1_CIF_ISP_EXP_H_SIZE_V10);
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_V_SIZE_SET(block_vsize),
- RKISP1_CIF_ISP_EXP_V_SIZE);
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(block_vsize),
+ RKISP1_CIF_ISP_EXP_V_SIZE_V10);
+}
+
+static void rkisp1_aec_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
+{
+ u32 exp_ctrl;
+ u32 block_hsize, block_vsize;
+ u32 wnd_num_idx = 1;
+ const u32 ae_wnd_num[] = { 5, 9, 15, 15 };
+
+ /* avoid to override the old enable value */
+ exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
+ exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
+ if (arg->autostop)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
+ if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(wnd_num_idx);
+ rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(arg->meas_window.v_offs) |
+ RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(arg->meas_window.h_offs),
+ RKISP1_CIF_ISP_EXP_OFFS_V12);
+
+ block_hsize = arg->meas_window.h_size / ae_wnd_num[wnd_num_idx] - 1;
+ block_vsize = arg->meas_window.v_size / ae_wnd_num[wnd_num_idx] - 1;
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(block_vsize) |
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(block_hsize),
+ RKISP1_CIF_ISP_EXP_SIZE_V12);
}
static void rkisp1_cproc_config(struct rkisp1_params *params,
@@ -578,73 +782,151 @@ static void rkisp1_cproc_config(struct rkisp1_params *params,
}
}
-static void rkisp1_hst_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_hst_config *arg)
+static void rkisp1_hst_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
{
unsigned int block_hsize, block_vsize;
static const u32 hist_weight_regs[] = {
- RKISP1_CIF_ISP_HIST_WEIGHT_00TO30,
- RKISP1_CIF_ISP_HIST_WEIGHT_40TO21,
- RKISP1_CIF_ISP_HIST_WEIGHT_31TO12,
- RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
- RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
- RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
+ RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10,
};
const u8 *weight;
unsigned int i;
u32 hist_prop;
/* avoid to override the old enable value */
- hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP);
- hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
- hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider);
- rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP);
+ hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10);
+ hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
+ hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP_V10);
rkisp1_write(params->rkisp1,
arg->meas_window.h_offs,
- RKISP1_CIF_ISP_HIST_H_OFFS);
+ RKISP1_CIF_ISP_HIST_H_OFFS_V10);
rkisp1_write(params->rkisp1,
arg->meas_window.v_offs,
- RKISP1_CIF_ISP_HIST_V_OFFS);
+ RKISP1_CIF_ISP_HIST_V_OFFS_V10);
block_hsize = arg->meas_window.h_size /
- RKISP1_CIF_ISP_HIST_COLUMN_NUM - 1;
- block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM - 1;
+ RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 - 1;
+ block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM_V10 - 1;
- rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE);
- rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE);
+ rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE_V10);
+ rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE_V10);
weight = arg->hist_weight;
for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_HIST_WEIGHT_SET(weight[0],
+ RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(weight[0],
weight[1],
weight[2],
weight[3]),
hist_weight_regs[i]);
- rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10);
+}
+
+static void rkisp1_hst_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
+{
+ unsigned int i, j;
+ u32 block_hsize, block_vsize;
+ u32 wnd_num_idx, hist_weight_num, hist_ctrl, value;
+ u8 weight15x15[RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12];
+ const u32 hist_wnd_num[] = { 5, 9, 15, 15 };
+
+ /* now we just support 9x9 window */
+ wnd_num_idx = 1;
+ memset(weight15x15, 0x00, sizeof(weight15x15));
+ /* avoid to override the old enable value */
+ hist_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_CTRL_V12);
+ hist_ctrl &= RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
+ RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12;
+ hist_ctrl = hist_ctrl |
+ RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(1) |
+ RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(1) |
+ RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_ctrl, RKISP1_CIF_ISP_HIST_CTRL_V12);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_OFFS_SET_V12(arg->meas_window.h_offs,
+ arg->meas_window.v_offs),
+ RKISP1_CIF_ISP_HIST_OFFS_V12);
+
+ block_hsize = arg->meas_window.h_size / hist_wnd_num[wnd_num_idx] - 1;
+ block_vsize = arg->meas_window.v_size / hist_wnd_num[wnd_num_idx] - 1;
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_SIZE_SET_V12(block_hsize, block_vsize),
+ RKISP1_CIF_ISP_HIST_SIZE_V12);
+
+ for (i = 0; i < hist_wnd_num[wnd_num_idx]; i++) {
+ for (j = 0; j < hist_wnd_num[wnd_num_idx]; j++) {
+ weight15x15[i * RKISP1_CIF_ISP_HIST_ROW_NUM_V12 + j] =
+ arg->hist_weight[i * hist_wnd_num[wnd_num_idx] + j];
+ }
+ }
+
+ hist_weight_num = RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12;
+ for (i = 0; i < (hist_weight_num / 4); i++) {
+ value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(
+ weight15x15[4 * i + 0],
+ weight15x15[4 * i + 1],
+ weight15x15[4 * i + 2],
+ weight15x15[4 * i + 3]);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
+ }
+ value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(weight15x15[4 * i + 0], 0, 0, 0);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
}
static void
-rkisp1_hst_enable(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_hst_config *arg, bool en)
+rkisp1_hst_enable_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
{
if (en) {
u32 hist_prop = rkisp1_read(params->rkisp1,
- RKISP1_CIF_ISP_HIST_PROP);
+ RKISP1_CIF_ISP_HIST_PROP_V10);
- hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+ hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
hist_prop |= arg->mode;
- rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
hist_prop);
} else {
- rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP,
- RKISP1_CIF_ISP_HIST_PROP_MODE_MASK);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
+ RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10);
+ }
+}
+
+static void
+rkisp1_hst_enable_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
+{
+ if (en) {
+ u32 hist_ctrl = rkisp1_read(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_CTRL_V12);
+
+ hist_ctrl &= ~RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12;
+ hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(arg->mode);
+ hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(1);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
+ hist_ctrl);
+ } else {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
+ RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
+ RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12);
}
}
-static void rkisp1_afm_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_afc_config *arg)
+static void rkisp1_afm_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
{
size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
arg->num_afm_win);
@@ -674,6 +956,45 @@ static void rkisp1_afm_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
}
+static void rkisp1_afm_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
+{
+ size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
+ arg->num_afm_win);
+ u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
+ u32 lum_var_shift, afm_var_shift;
+ unsigned int i;
+
+ /* Switch off to configure. */
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+
+ for (i = 0; i < num_of_win; i++) {
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_LT_A + i * 8);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
+ arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
+ arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+ }
+ rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
+
+ lum_var_shift = RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(arg->var_shift);
+ afm_var_shift = RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(arg->var_shift);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(lum_var_shift, afm_var_shift) |
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(lum_var_shift, afm_var_shift) |
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(lum_var_shift, afm_var_shift),
+ RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+
+ /* restore afm status */
+ rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+}
+
static void rkisp1_ie_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_ie_config *arg)
{
@@ -955,7 +1276,7 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
/* update awb gains */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
- rkisp1_awb_gain_config(params, &new_params->others.awb_gain_config);
+ params->ops->awb_gain_config(params, &new_params->others.awb_gain_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
@@ -1010,8 +1331,7 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
/* update goc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC)
- rkisp1_goc_config(params,
- &new_params->others.goc_config);
+ params->ops->goc_config(params, &new_params->others.goc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_GOC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_GOC)
@@ -1081,17 +1401,17 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
/* update awb config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB)
- rkisp1_awb_meas_config(params, &new_params->meas.awb_meas_config);
+ params->ops->awb_meas_config(params, &new_params->meas.awb_meas_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB)
- rkisp1_awb_meas_enable(params,
- &new_params->meas.awb_meas_config,
- !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
+ params->ops->awb_meas_enable(params,
+ &new_params->meas.awb_meas_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
/* update afc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC)
- rkisp1_afm_config(params,
- &new_params->meas.afc_config);
+ params->ops->afm_config(params,
+ &new_params->meas.afc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AFC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AFC)
@@ -1106,18 +1426,18 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
/* update hst config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_HST)
- rkisp1_hst_config(params,
- &new_params->meas.hst_config);
+ params->ops->hst_config(params,
+ &new_params->meas.hst_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_HST)
- rkisp1_hst_enable(params,
- &new_params->meas.hst_config,
- !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
+ params->ops->hst_enable(params,
+ &new_params->meas.hst_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
/* update aec config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC)
- rkisp1_aec_config(params,
- &new_params->meas.aec_config);
+ params->ops->aec_config(params,
+ &new_params->meas.aec_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AEC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AEC)
@@ -1218,21 +1538,21 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
- rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config);
- rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config,
- true);
+ params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config);
+ params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config,
+ true);
- rkisp1_aec_config(params, &rkisp1_aec_params_default_config);
+ params->ops->aec_config(params, &rkisp1_aec_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
- rkisp1_afm_config(params, &rkisp1_afc_params_default_config);
+ params->ops->afm_config(params, &rkisp1_afc_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
- rkisp1_hst_config(params, &hst);
- rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ params->ops->hst_config(params, &hst);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
rkisp1_hst_params_default_config.mode);
/* set the range */
@@ -1278,7 +1598,7 @@ void rkisp1_params_disable(struct rkisp1_params *params)
RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
RKISP1_CIF_ISP_FLT_ENA);
- rkisp1_awb_meas_enable(params, NULL, false);
+ params->ops->awb_meas_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
@@ -1286,7 +1606,7 @@ void rkisp1_params_disable(struct rkisp1_params *params)
rkisp1_ctk_enable(params, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_CTR_ENABLE);
- rkisp1_hst_enable(params, NULL, false);
+ params->ops->hst_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
rkisp1_ie_enable(params, false);
@@ -1294,6 +1614,30 @@ void rkisp1_params_disable(struct rkisp1_params *params)
RKISP1_CIF_ISP_DPF_MODE_EN);
}
+static const struct rkisp1_params_ops rkisp1_v10_params_ops = {
+ .lsc_matrix_config = rkisp1_lsc_matrix_config_v10,
+ .goc_config = rkisp1_goc_config_v10,
+ .awb_meas_config = rkisp1_awb_meas_config_v10,
+ .awb_meas_enable = rkisp1_awb_meas_enable_v10,
+ .awb_gain_config = rkisp1_awb_gain_config_v10,
+ .aec_config = rkisp1_aec_config_v10,
+ .hst_config = rkisp1_hst_config_v10,
+ .hst_enable = rkisp1_hst_enable_v10,
+ .afm_config = rkisp1_afm_config_v10,
+};
+
+static struct rkisp1_params_ops rkisp1_v12_params_ops = {
+ .lsc_matrix_config = rkisp1_lsc_matrix_config_v12,
+ .goc_config = rkisp1_goc_config_v12,
+ .awb_meas_config = rkisp1_awb_meas_config_v12,
+ .awb_meas_enable = rkisp1_awb_meas_enable_v12,
+ .awb_gain_config = rkisp1_awb_gain_config_v12,
+ .aec_config = rkisp1_aec_config_v12,
+ .hst_config = rkisp1_hst_config_v12,
+ .hst_enable = rkisp1_hst_enable_v12,
+ .afm_config = rkisp1_afm_config_v12,
+};
+
static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -1459,6 +1803,11 @@ static void rkisp1_init_params(struct rkisp1_params *params)
V4L2_META_FMT_RK_ISP1_PARAMS;
params->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_params_cfg);
+
+ if (params->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ params->ops = &rkisp1_v12_params_ops;
+ else
+ params->ops = &rkisp1_v10_params_ops;
}
int rkisp1_params_register(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index fa33080f51db..d326214c7e07 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -212,6 +212,35 @@
/* CCL */
#define RKISP1_CIF_CCL_CIF_CLK_DIS BIT(2)
+/* VI_ISP_CLK_CTRL */
+#define RKISP1_CIF_CLK_CTRL_ISP_RAW BIT(0)
+#define RKISP1_CIF_CLK_CTRL_ISP_RGB BIT(1)
+#define RKISP1_CIF_CLK_CTRL_ISP_YUV BIT(2)
+#define RKISP1_CIF_CLK_CTRL_ISP_3A BIT(3)
+#define RKISP1_CIF_CLK_CTRL_MIPI_RAW BIT(4)
+#define RKISP1_CIF_CLK_CTRL_ISP_IE BIT(5)
+#define RKISP1_CIF_CLK_CTRL_RSZ_RAM BIT(6)
+#define RKISP1_CIF_CLK_CTRL_JPEG_RAM BIT(7)
+#define RKISP1_CIF_CLK_CTRL_ACLK_ISP BIT(8)
+#define RKISP1_CIF_CLK_CTRL_MI_IDC BIT(9)
+#define RKISP1_CIF_CLK_CTRL_MI_MP BIT(10)
+#define RKISP1_CIF_CLK_CTRL_MI_JPEG BIT(11)
+#define RKISP1_CIF_CLK_CTRL_MI_DP BIT(12)
+#define RKISP1_CIF_CLK_CTRL_MI_Y12 BIT(13)
+#define RKISP1_CIF_CLK_CTRL_MI_SP BIT(14)
+#define RKISP1_CIF_CLK_CTRL_MI_RAW0 BIT(15)
+#define RKISP1_CIF_CLK_CTRL_MI_RAW1 BIT(16)
+#define RKISP1_CIF_CLK_CTRL_MI_READ BIT(17)
+#define RKISP1_CIF_CLK_CTRL_MI_RAWRD BIT(18)
+#define RKISP1_CIF_CLK_CTRL_CP BIT(19)
+#define RKISP1_CIF_CLK_CTRL_IE BIT(20)
+#define RKISP1_CIF_CLK_CTRL_SI BIT(21)
+#define RKISP1_CIF_CLK_CTRL_RSZM BIT(22)
+#define RKISP1_CIF_CLK_CTRL_DPMUX BIT(23)
+#define RKISP1_CIF_CLK_CTRL_JPEG BIT(24)
+#define RKISP1_CIF_CLK_CTRL_RSZS BIT(25)
+#define RKISP1_CIF_CLK_CTRL_MIPI BIT(26)
+#define RKISP1_CIF_CLK_CTRL_MARVINMI BIT(27)
/* ICCL */
#define RKISP1_CIF_ICCL_ISP_CLK BIT(0)
#define RKISP1_CIF_ICCL_CP_CLK BIT(1)
@@ -346,26 +375,58 @@
#define RKISP1_CIF_SUPER_IMP_CTRL_TRANSP_DIS BIT(2)
/* ISP HISTOGRAM CALCULATION : ISP_HIST_PROP */
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS (0 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB BIT(0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED (2 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN (3 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE (4 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM (5 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK 0x7
-#define RKISP1_CIF_ISP_HIST_PREDIV_SET(x) (((x) & 0x7F) << 3)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_SET(v0, v1, v2, v3) \
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS_V10 (0 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB_V10 BIT(0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED_V10 (2 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN_V10 (3 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE_V10 (4 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM_V10 (5 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10 0x7
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x) (((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(v0, v1, v2, v3) \
(((v0) & 0x1F) | (((v1) & 0x1F) << 8) |\
(((v2) & 0x1F) << 16) | \
(((v3) & 0x1F) << 24))
-#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED 0xFFFFF000
-#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED 0xFFFFF800
-#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED 0xE0E0E0E0
-#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
-#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
-#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
-#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xFFFFF000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10 0xFFFFF800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10 0xE0E0E0E0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10 0x0000007F
+#define RKISP1_CIF_ISP_HIST_ROW_NUM_V10 5
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x) ((x) & 0x000FFFFF)
+
+/* ISP HISTOGRAM CALCULATION : CIF_ISP_HIST */
+#define RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(x) (((x) & 0x01) << 0)
+#define RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(0x01)
+#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x) (((x) & 0x7F) << 1)
+#define RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(x) (((x) & 0x07) << 8)
+#define RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(0x07)
+#define RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(x) (((x) & 0x01) << 11)
+#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x) (((x) & 0xFFF) << 12)
+#define RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(x) (((x) & 0x07) << 24)
+#define RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(x) (((x) & 0x01) << 27)
+#define RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 28)
+#define RKISP1_CIF_ISP_HIST_CTRL_DBGEN_SET_V12(x) (((x) & 0x01) << 30)
+#define RKISP1_CIF_ISP_HIST_ROW_NUM_V12 15
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12 15
+#define RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12 \
+ (RKISP1_CIF_ISP_HIST_ROW_NUM_V12 * RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12)
+
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(v0, v1, v2, v3) \
+ (((v0) & 0x3F) | (((v1) & 0x3F) << 8) |\
+ (((v2) & 0x3F) << 16) |\
+ (((v3) & 0x3F) << 24))
+
+#define RKISP1_CIF_ISP_HIST_OFFS_SET_V12(v0, v1) \
+ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 16))
+#define RKISP1_CIF_ISP_HIST_SIZE_SET_V12(v0, v1) \
+ (((v0) & 0x7FF) | (((v1) & 0x7FF) << 16))
+
+#define RKISP1_CIF_ISP_HIST_GET_BIN0_V12(x) \
+ ((x) & 0xFFFF)
+#define RKISP1_CIF_ISP_HIST_GET_BIN1_V12(x) \
+ (((x) >> 16) & 0xFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
@@ -401,6 +462,8 @@
#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN ((0 << 31) | (0x2 << 0))
#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE 0xFFFFFFFC
#define RKISP1_CIF_ISP_AWB_MODE_READ(x) ((x) & 3)
+#define RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(x) (((x) & 0x07) << 28)
+#define RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12 RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(0x07)
/* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G */
#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x) (((x) & 0x3FF) << 16)
#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x) (((x) >> 16) & 0x3FF)
@@ -435,6 +498,7 @@
/* ISP_EXP_CTRL */
#define RKISP1_CIF_ISP_EXP_ENA BIT(0)
#define RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP BIT(1)
+#define RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 2)
/*
*'1' luminance calculation according to Y=(R+G+B) x 0.332 (85/256)
*'0' luminance calculation according to Y=16+0.25R+0.5G+0.1094B
@@ -442,42 +506,76 @@
#define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1 BIT(31)
/* ISP_EXP_H_SIZE */
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET(x) ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12 0x000007FF
/* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET(x) ((x) & 0x7FE)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x) ((x) & 0x7FE)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x) (((x) & 0x7FE) << 16)
/* ISP_EXP_H_OFFSET */
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_HOFFS 2424
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V10 2424
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12 0x1FFF
/* ISP_EXP_V_OFFSET */
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_VOFFS 1806
-
-#define RKISP1_CIF_ISP_EXP_ROW_NUM 5
-#define RKISP1_CIF_ISP_EXP_COLUMN_NUM 5
-#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS \
- (RKISP1_CIF_ISP_EXP_ROW_NUM * RKISP1_CIF_ISP_EXP_COLUMN_NUM)
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE 516
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE 35
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE 390
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE 28
-#define RKISP1_CIF_ISP_EXP_MAX_HSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MIN_HSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MAX_VSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MIN_VSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V10 1806
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x) (((x) & 0x1FFF) << 16)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12 0x1FFF
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM_V10 5
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 5
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V10 \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10)
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V10 516
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V10 35
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V10 390
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V10 28
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V10 * RKISP1_CIF_ISP_EXP_ROW_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V10 * RKISP1_CIF_ISP_EXP_ROW_NUM_V10 + 1)
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM_V12 15
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 15
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V12 \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12)
+
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 0x7FF
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 0x7FE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
+
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x) (((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x) (((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x) (((x) >> 24) & 0xFF)
/* LSC: ISP_LSC_CTRL */
#define RKISP1_CIF_ISP_LSC_CTRL_ENA BIT(0)
#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xFC00FC00
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_TABLE_DATA(v0, v1) \
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10 0xF000F000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10 0xF000F000
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(v0, v1) \
+ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13))
#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \
@@ -550,6 +648,10 @@
(1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED 0xFFFFF000
+/* GAMMA-OUT */
+#define RKISP1_CIF_ISP_GAMMA_VALUE_V12(x, y) \
+ (((x) & 0xFFF) << 16 | ((y) & 0xFFF) << 0)
+
/* AFM */
#define RKISP1_CIF_ISP_AFM_ENA BIT(0)
#define RKISP1_CIF_ISP_AFM_THRES_RESERVED 0xFFFF0000
@@ -560,6 +662,11 @@
#define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN 0x2
#define RKISP1_CIF_ISP_AFM_WINDOW_X(x) (((x) & 0x1FFF) << 16)
#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(x, y) (((x) & 0x7) << 16 | ((y) & 0x7) << 0)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(x, y) (((x) & 0x7) << 20 | ((y) & 0x7) << 4)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(x, y) (((x) & 0x7) << 24 | ((y) & 0x7) << 8)
+#define RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(x) (((x) & 0x70000) >> 16)
+#define RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(x) ((x) & 0x7)
/* DPF */
#define RKISP1_CIF_ISP_DPF_MODE_EN BIT(0)
@@ -582,6 +689,7 @@
#define RKISP1_CIF_CTRL_BASE 0x00000000
#define RKISP1_CIF_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
#define RKISP1_CIF_VI_ID (RKISP1_CIF_CTRL_BASE + 0x00000008)
+#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12 (RKISP1_CIF_CTRL_BASE + 0x0000000C)
#define RKISP1_CIF_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
#define RKISP1_CIF_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
#define RKISP1_CIF_VI_DPCL (RKISP1_CIF_CTRL_BASE + 0x00000018)
@@ -667,18 +775,35 @@
#define RKISP1_CIF_ISP_GAMMA_B_Y14 (RKISP1_CIF_ISP_BASE + 0x000000E4)
#define RKISP1_CIF_ISP_GAMMA_B_Y15 (RKISP1_CIF_ISP_BASE + 0x000000E8)
#define RKISP1_CIF_ISP_GAMMA_B_Y16 (RKISP1_CIF_ISP_BASE + 0x000000EC)
-#define RKISP1_CIF_ISP_AWB_PROP (RKISP1_CIF_ISP_BASE + 0x00000110)
-#define RKISP1_CIF_ISP_AWB_WND_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000114)
-#define RKISP1_CIF_ISP_AWB_WND_V_OFFS (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_WND_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000011C)
-#define RKISP1_CIF_ISP_AWB_WND_V_SIZE (RKISP1_CIF_ISP_BASE + 0x00000120)
-#define RKISP1_CIF_ISP_AWB_FRAMES (RKISP1_CIF_ISP_BASE + 0x00000124)
-#define RKISP1_CIF_ISP_AWB_REF (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_THRESH (RKISP1_CIF_ISP_BASE + 0x0000012C)
-#define RKISP1_CIF_ISP_AWB_GAIN_G (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB (RKISP1_CIF_ISP_BASE + 0x0000013C)
-#define RKISP1_CIF_ISP_AWB_WHITE_CNT (RKISP1_CIF_ISP_BASE + 0x00000140)
-#define RKISP1_CIF_ISP_AWB_MEAN (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_AWB_PROP_V10 (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_AWB_FRAMES_V10 (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_AWB_REF_V10 (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_THRESH_V10 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_GAIN_G_V10 (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V10 (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_AWB_MEAN_V10 (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_AWB_PROP_V12 (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_SIZE_V12 (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_OFFS_V12 (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_REF_V12 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_THRESH_V12 (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_X_COOR12_V12 (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_X_COOR34_V12 (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_MEAN_V12 (RKISP1_CIF_ISP_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_DEGAIN_V12 (RKISP1_CIF_ISP_BASE + 0x00000134)
+#define RKISP1_CIF_ISP_AWB_GAIN_G_V12 (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_REGION_LINE_V12 (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_WP_CNT_REGION0_V12 (RKISP1_CIF_ISP_BASE + 0x00000160)
+#define RKISP1_CIF_ISP_WP_CNT_REGION1_V12 (RKISP1_CIF_ISP_BASE + 0x00000164)
+#define RKISP1_CIF_ISP_WP_CNT_REGION2_V12 (RKISP1_CIF_ISP_BASE + 0x00000168)
+#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12 (RKISP1_CIF_ISP_BASE + 0x0000016C)
#define RKISP1_CIF_ISP_CC_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x00000170)
#define RKISP1_CIF_ISP_CC_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x00000174)
#define RKISP1_CIF_ISP_CC_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x00000178)
@@ -712,30 +837,32 @@
#define RKISP1_CIF_ISP_CT_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x000001E8)
#define RKISP1_CIF_ISP_CT_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x000001EC)
#define RKISP1_CIF_ISP_CT_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x000001F0)
-#define RKISP1_CIF_ISP_GAMMA_OUT_MODE (RKISP1_CIF_ISP_BASE + 0x000001F4)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0 (RKISP1_CIF_ISP_BASE + 0x000001F8)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1 (RKISP1_CIF_ISP_BASE + 0x000001FC)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2 (RKISP1_CIF_ISP_BASE + 0x00000200)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3 (RKISP1_CIF_ISP_BASE + 0x00000204)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4 (RKISP1_CIF_ISP_BASE + 0x00000208)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5 (RKISP1_CIF_ISP_BASE + 0x0000020C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6 (RKISP1_CIF_ISP_BASE + 0x00000210)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7 (RKISP1_CIF_ISP_BASE + 0x00000214)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8 (RKISP1_CIF_ISP_BASE + 0x00000218)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9 (RKISP1_CIF_ISP_BASE + 0x0000021C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10 (RKISP1_CIF_ISP_BASE + 0x00000220)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11 (RKISP1_CIF_ISP_BASE + 0x00000224)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12 (RKISP1_CIF_ISP_BASE + 0x00000228)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13 (RKISP1_CIF_ISP_BASE + 0x0000022C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14 (RKISP1_CIF_ISP_BASE + 0x00000230)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15 (RKISP1_CIF_ISP_BASE + 0x00000234)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16 (RKISP1_CIF_ISP_BASE + 0x00000238)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10 (RKISP1_CIF_ISP_BASE + 0x000001F4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 (RKISP1_CIF_ISP_BASE + 0x000001F8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10 (RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2_V10 (RKISP1_CIF_ISP_BASE + 0x00000200)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3_V10 (RKISP1_CIF_ISP_BASE + 0x00000204)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4_V10 (RKISP1_CIF_ISP_BASE + 0x00000208)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10 (RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6_V10 (RKISP1_CIF_ISP_BASE + 0x00000210)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7_V10 (RKISP1_CIF_ISP_BASE + 0x00000214)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8_V10 (RKISP1_CIF_ISP_BASE + 0x00000218)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10 (RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10_V10 (RKISP1_CIF_ISP_BASE + 0x00000220)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11_V10 (RKISP1_CIF_ISP_BASE + 0x00000224)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12_V10 (RKISP1_CIF_ISP_BASE + 0x00000228)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10 (RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14_V10 (RKISP1_CIF_ISP_BASE + 0x00000230)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15_V10 (RKISP1_CIF_ISP_BASE + 0x00000234)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16_V10 (RKISP1_CIF_ISP_BASE + 0x00000238)
#define RKISP1_CIF_ISP_ERR (RKISP1_CIF_ISP_BASE + 0x0000023C)
#define RKISP1_CIF_ISP_ERR_CLR (RKISP1_CIF_ISP_BASE + 0x00000240)
#define RKISP1_CIF_ISP_FRAME_COUNT (RKISP1_CIF_ISP_BASE + 0x00000244)
#define RKISP1_CIF_ISP_CT_OFFSET_R (RKISP1_CIF_ISP_BASE + 0x00000248)
#define RKISP1_CIF_ISP_CT_OFFSET_G (RKISP1_CIF_ISP_BASE + 0x0000024C)
#define RKISP1_CIF_ISP_CT_OFFSET_B (RKISP1_CIF_ISP_BASE + 0x00000250)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12 (RKISP1_CIF_ISP_BASE + 0x00000300)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 (RKISP1_CIF_ISP_BASE + 0x00000304)
#define RKISP1_CIF_ISP_FLASH_BASE 0x00000660
#define RKISP1_CIF_ISP_FLASH_CMD (RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
@@ -1005,36 +1132,35 @@
#define RKISP1_CIF_ISP_IS_H_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
#define RKISP1_CIF_ISP_IS_V_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_HIST_BASE 0x00002400
-
-#define RKISP1_CIF_ISP_HIST_PROP (RKISP1_CIF_ISP_HIST_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_HIST_H_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_HIST_V_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_H_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x0000000C)
-#define RKISP1_CIF_ISP_HIST_V_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_HIST_BIN_0 (RKISP1_CIF_ISP_HIST_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_HIST_BIN_1 (RKISP1_CIF_ISP_HIST_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_HIST_BIN_2 (RKISP1_CIF_ISP_HIST_BASE + 0x0000001C)
-#define RKISP1_CIF_ISP_HIST_BIN_3 (RKISP1_CIF_ISP_HIST_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_HIST_BIN_4 (RKISP1_CIF_ISP_HIST_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_HIST_BIN_5 (RKISP1_CIF_ISP_HIST_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_HIST_BIN_6 (RKISP1_CIF_ISP_HIST_BASE + 0x0000002C)
-#define RKISP1_CIF_ISP_HIST_BIN_7 (RKISP1_CIF_ISP_HIST_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_HIST_BIN_8 (RKISP1_CIF_ISP_HIST_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_HIST_BIN_9 (RKISP1_CIF_ISP_HIST_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_HIST_BIN_10 (RKISP1_CIF_ISP_HIST_BASE + 0x0000003C)
-#define RKISP1_CIF_ISP_HIST_BIN_11 (RKISP1_CIF_ISP_HIST_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_HIST_BIN_12 (RKISP1_CIF_ISP_HIST_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_HIST_BIN_13 (RKISP1_CIF_ISP_HIST_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_HIST_BIN_14 (RKISP1_CIF_ISP_HIST_BASE + 0x0000004C)
-#define RKISP1_CIF_ISP_HIST_BIN_15 (RKISP1_CIF_ISP_HIST_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30 (RKISP1_CIF_ISP_HIST_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21 (RKISP1_CIF_ISP_HIST_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12 (RKISP1_CIF_ISP_HIST_BASE + 0x0000005C)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03 (RKISP1_CIF_ISP_HIST_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43 (RKISP1_CIF_ISP_HIST_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34 (RKISP1_CIF_ISP_HIST_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_44 (RKISP1_CIF_ISP_HIST_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_HIST_BASE_V10 0x00002400
+#define RKISP1_CIF_ISP_HIST_PROP_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_H_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_V_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_H_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_V_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000010)
+#define RKISP1_CIF_ISP_HIST_BIN_0_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000014)
+#define RKISP1_CIF_ISP_HIST_BIN_1_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000018)
+#define RKISP1_CIF_ISP_HIST_BIN_2_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_3_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000020)
+#define RKISP1_CIF_ISP_HIST_BIN_4_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000024)
+#define RKISP1_CIF_ISP_HIST_BIN_5_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000028)
+#define RKISP1_CIF_ISP_HIST_BIN_6_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_7_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000030)
+#define RKISP1_CIF_ISP_HIST_BIN_8_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000034)
+#define RKISP1_CIF_ISP_HIST_BIN_9_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000038)
+#define RKISP1_CIF_ISP_HIST_BIN_10_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_11_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000040)
+#define RKISP1_CIF_ISP_HIST_BIN_12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000044)
+#define RKISP1_CIF_ISP_HIST_BIN_13_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000048)
+#define RKISP1_CIF_ISP_HIST_BIN_14_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_15_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000050)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000054)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000058)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000060)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000064)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000068)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006C)
#define RKISP1_CIF_ISP_FILT_BASE 0x00002500
#define RKISP1_CIF_ISP_FILT_MODE (RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
@@ -1060,35 +1186,38 @@
#define RKISP1_CIF_ISP_EXP_BASE 0x00002600
#define RKISP1_CIF_ISP_EXP_CTRL (RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_EXP_H_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_EXP_V_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_EXP_H_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
-#define RKISP1_CIF_ISP_EXP_V_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_EXP_MEAN_00 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_EXP_MEAN_10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_EXP_MEAN_20 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
-#define RKISP1_CIF_ISP_EXP_MEAN_30 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_EXP_MEAN_40 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_EXP_MEAN_01 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_EXP_MEAN_11 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
-#define RKISP1_CIF_ISP_EXP_MEAN_21 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_EXP_MEAN_31 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_EXP_MEAN_41 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_EXP_MEAN_02 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
-#define RKISP1_CIF_ISP_EXP_MEAN_12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_EXP_MEAN_22 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_EXP_MEAN_32 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_EXP_MEAN_42 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
-#define RKISP1_CIF_ISP_EXP_MEAN_03 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_EXP_MEAN_13 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_EXP_MEAN_23 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_EXP_MEAN_33 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
-#define RKISP1_CIF_ISP_EXP_MEAN_43 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_EXP_MEAN_04 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_EXP_MEAN_14 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_EXP_MEAN_24 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
-#define RKISP1_CIF_ISP_EXP_MEAN_34 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
-#define RKISP1_CIF_ISP_EXP_MEAN_44 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_H_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_EXP_MEAN_00_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_EXP_MEAN_10_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_EXP_MEAN_20_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_EXP_MEAN_30_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_EXP_MEAN_40_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_EXP_MEAN_01_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_EXP_MEAN_11_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_EXP_MEAN_21_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_EXP_MEAN_31_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_EXP_MEAN_41_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_EXP_MEAN_02_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_EXP_MEAN_12_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_EXP_MEAN_22_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_EXP_MEAN_32_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_EXP_MEAN_42_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
+#define RKISP1_CIF_ISP_EXP_MEAN_03_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_EXP_MEAN_13_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_EXP_MEAN_23_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_EXP_MEAN_33_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
+#define RKISP1_CIF_ISP_EXP_MEAN_43_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_EXP_MEAN_04_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_EXP_MEAN_14_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_EXP_MEAN_24_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_EXP_MEAN_34_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_EXP_MEAN_44_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_EXP_SIZE_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_OFFS_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_MEAN_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_BLS_BASE 0x00002700
#define RKISP1_CIF_ISP_BLS_CTRL (RKISP1_CIF_ISP_BLS_BASE + 0x00000000)
@@ -1249,6 +1378,16 @@
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_HIST_BASE_V12 0x00002C00
+#define RKISP1_CIF_ISP_HIST_CTRL_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_SIZE_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_OFFS_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_DBG1_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_DBG2_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_DBG3_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000120)
+
#define RKISP1_CIF_ISP_VSM_BASE 0x00002F00
#define RKISP1_CIF_ISP_VSM_MODE (RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
#define RKISP1_CIF_ISP_VSM_H_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
@@ -1260,4 +1399,7 @@
#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_CSI0_BASE 0x00007000
+#define RKISP1_CIF_ISP_CSI0_CTRL0 (RKISP1_CIF_ISP_CSI0_BASE + 0x00000000)
+
#endif /* _RKISP1_REGS_H */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index e88bdd612d71..be5777c65bfb 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -174,18 +174,18 @@ rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
return vb2_queue_init(q);
}
-static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_awb_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
/* Protect against concurrent access from ISR? */
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 reg_val;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
- reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V10);
pbuf->params.awb.awb_mean[0].cnt =
RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
- reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V10);
pbuf->params.awb.awb_mean[0].mean_cr_or_r =
RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
@@ -195,8 +195,29 @@ static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
}
-static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_awb_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ /* Protect against concurrent access from ISR? */
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 reg_val;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V12);
+ pbuf->params.awb.awb_mean[0].cnt =
+ RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V12);
+
+ pbuf->params.awb.awb_mean[0].mean_cr_or_r =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_cb_or_b =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_y_or_g =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
+}
+
+static void rkisp1_stats_get_aec_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
@@ -205,7 +226,31 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
+ RKISP1_CIF_ISP_EXP_MEAN_00_V10 + i * 4);
+}
+
+static void rkisp1_stats_get_aec_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 value;
+ int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V12 / 4; i++) {
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
+ pbuf->params.ae.exp_mean[4 * i + 0] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 1] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 2] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 3] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(value);
+ }
+
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
+ pbuf->params.ae.exp_mean[4 * i + 0] = RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
}
static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
@@ -225,17 +270,34 @@ static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
af->window[2].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_C);
}
-static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_hst_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
- u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0_V10 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN_V10(reg_val);
+ }
+}
- pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 value;
+ int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 / 2; i++) {
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_V12 + i * 4);
+ pbuf->params.hist.hist_bins[2 * i] =
+ RKISP1_CIF_ISP_HIST_GET_BIN0_V12(value);
+ pbuf->params.hist.hist_bins[2 * i + 1] =
+ RKISP1_CIF_ISP_HIST_GET_BIN1_V12(value);
}
}
@@ -286,6 +348,18 @@ static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
}
}
+static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
+ .get_awb_meas = rkisp1_stats_get_awb_meas_v10,
+ .get_aec_meas = rkisp1_stats_get_aec_meas_v10,
+ .get_hst_meas = rkisp1_stats_get_hst_meas_v10,
+};
+
+static struct rkisp1_stats_ops rkisp1_v12_stats_ops = {
+ .get_awb_meas = rkisp1_stats_get_awb_meas_v12,
+ .get_aec_meas = rkisp1_stats_get_aec_meas_v12,
+ .get_hst_meas = rkisp1_stats_get_hst_meas_v12,
+};
+
static void
rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
{
@@ -307,18 +381,18 @@ rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
cur_stat_buf = (struct rkisp1_stat_buffer *)
vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0);
if (isp_ris & RKISP1_CIF_ISP_AWB_DONE)
- rkisp1_stats_get_awb_meas(stats, cur_stat_buf);
+ stats->ops->get_awb_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_AFM_FIN)
rkisp1_stats_get_afc_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_EXP_END) {
- rkisp1_stats_get_aec_meas(stats, cur_stat_buf);
+ stats->ops->get_aec_meas(stats, cur_stat_buf);
rkisp1_stats_get_bls_meas(stats, cur_stat_buf);
}
if (isp_ris & RKISP1_CIF_ISP_HIST_MEASURE_RDY)
- rkisp1_stats_get_hst_meas(stats, cur_stat_buf);
+ stats->ops->get_hst_meas(stats, cur_stat_buf);
vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
sizeof(struct rkisp1_stat_buffer));
@@ -352,6 +426,11 @@ static void rkisp1_init_stats(struct rkisp1_stats *stats)
V4L2_META_FMT_RK_ISP1_STAT_3A;
stats->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_stat_buffer);
+
+ if (stats->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ stats->ops = &rkisp1_v12_stats_ops;
+ else
+ stats->ops = &rkisp1_v10_stats_ops;
}
int rkisp1_stats_register(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index e1d51fd3e700..32892ab359ee 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -23,7 +23,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
@@ -402,7 +401,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
struct s3c_camif_plat_data *pdata = dev->platform_data;
struct s3c_camif_drvdata *drvdata;
struct camif_dev *camif;
- struct resource *mres;
int ret = 0;
camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
@@ -423,9 +421,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
drvdata = (void *)platform_get_device_id(pdev)->driver_data;
camif->variant = drvdata->variant;
- mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- camif->io_base = devm_ioremap_resource(dev, mres);
+ camif->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(camif->io_base))
return PTR_ERR(camif->io_base);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 1cb5eaabf340..fa0bb31bd2b9 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -635,9 +635,7 @@ static int g2d_probe(struct platform_device *pdev)
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 7d0ab19c38bb..ebdfd24e9cd5 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2850,7 +2850,6 @@ static void *jpeg_get_drv_data(struct device *dev);
static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
- struct resource *res;
int i, ret;
/* JPEG IP abstraction struct */
@@ -2867,9 +2866,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->dev = &pdev->dev;
/* memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- jpeg->regs = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->regs))
return PTR_ERR(jpeg->regs);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index eba2b9f040df..fc85e4e2d020 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1283,14 +1283,17 @@ static int s5p_mfc_probe(struct platform_device *pdev)
spin_lock_init(&dev->condlock);
dev->plat_dev = pdev;
if (!dev->plat_dev) {
- dev_err(&pdev->dev, "No platform data specified\n");
+ mfc_err("No platform data specified\n");
return -ENODEV;
}
dev->variant = of_device_get_match_data(&pdev->dev);
+ if (!dev->variant) {
+ dev_err(&pdev->dev, "Failed to get device MFC hardware variant information\n");
+ return -ENOENT;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 6413cd279125..7d467f2ba072 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1315,8 +1315,7 @@ static int bdisp_probe(struct platform_device *pdev)
mutex_init(&bdisp->lock);
/* get resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdisp->regs = devm_ioremap_resource(dev, res);
+ bdisp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdisp->regs)) {
ret = PTR_ERR(bdisp->regs);
goto err_wq;
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 338b205ae3a7..02dc78bd7fab 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -28,7 +28,6 @@
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/wait.h>
#include <linux/pinctrl/pinctrl.h>
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index 0560a9cb004b..feb48cb546d7 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/version.h>
#include <dt-bindings/media/c8sectpfe.h>
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 30fb1aa4a351..15e8f83b1b56 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -298,15 +298,13 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
{
struct device *dev = &pdev->dev;
- struct resource *regs;
struct resource *esram;
int ret;
WARN_ON(!hva);
/* get memory for registers */
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hva->regs = devm_ioremap_resource(dev, regs);
+ hva->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hva->regs)) {
dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
return PTR_ERR(hva->regs);
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index d914ccef9831..e1b17c05229c 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -128,6 +128,7 @@ struct stm32_dcmi {
int sequence;
struct list_head buffers;
struct dcmi_buf *active;
+ int irq;
struct v4l2_device v4l2_dev;
struct video_device *vdev;
@@ -1759,6 +1760,14 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
+ ret = devm_request_threaded_irq(dcmi->dev, dcmi->irq, dcmi_irq_callback,
+ dcmi_irq_thread, IRQF_ONESHOT,
+ dev_name(dcmi->dev), dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Unable to request irq %d\n", dcmi->irq);
+ return ret;
+ }
+
return 0;
}
@@ -1824,11 +1833,11 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
return -EINVAL;
}
- v4l2_async_notifier_init(&dcmi->notifier);
+ v4l2_async_nf_init(&dcmi->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &dcmi->notifier, of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&dcmi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
@@ -1839,10 +1848,10 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.ops = &dcmi_graph_notify_ops;
- ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ ret = v4l2_async_nf_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
dev_err(dcmi->dev, "Failed to register notifier\n");
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
return ret;
}
@@ -1914,6 +1923,8 @@ static int dcmi_probe(struct platform_device *pdev)
if (irq <= 0)
return irq ? irq : -ENXIO;
+ dcmi->irq = irq;
+
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!dcmi->res) {
dev_err(&pdev->dev, "Could not get resource\n");
@@ -1926,14 +1937,6 @@ static int dcmi_probe(struct platform_device *pdev)
return PTR_ERR(dcmi->regs);
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
- dcmi_irq_thread, IRQF_ONESHOT,
- dev_name(&pdev->dev), dcmi);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- return ret;
- }
-
mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(mclk)) {
if (PTR_ERR(mclk) != -EPROBE_DEFER)
@@ -2060,7 +2063,7 @@ static int dcmi_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
err_media_entity_cleanup:
media_entity_cleanup(&dcmi->vdev->entity);
err_device_release:
@@ -2080,8 +2083,8 @@ static int dcmi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&dcmi->notifier);
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_unregister(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
media_entity_cleanup(&dcmi->vdev->entity);
v4l2_device_unregister(&dcmi->v4l2_dev);
media_device_cleanup(&dcmi->mdev);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index 8d40a7acba9c..80a10f238bbe 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -122,7 +122,7 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -135,8 +135,8 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
csi->bus = vep.bus.parallel;
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier, ep,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -154,7 +154,6 @@ static int sun4i_csi_probe(struct platform_device *pdev)
struct v4l2_subdev *subdev;
struct video_device *vdev;
struct sun4i_csi *csi;
- struct resource *res;
int ret;
int irq;
@@ -179,8 +178,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
media_device_init(&csi->mdev);
csi->v4l.mdev = &csi->mdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi->regs = devm_ioremap_resource(&pdev->dev, res);
+ csi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->regs))
return PTR_ERR(csi->regs);
@@ -244,7 +242,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_media;
- ret = v4l2_async_notifier_register(&csi->v4l, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l, &csi->notifier);
if (ret) {
dev_err(csi->dev, "Couldn't register our notifier.\n");
goto err_unregister_media;
@@ -268,8 +266,8 @@ static int sun4i_csi_remove(struct platform_device *pdev)
{
struct sun4i_csi *csi = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
vb2_video_unregister_device(&csi->vdev);
media_device_unregister(&csi->mdev);
sun4i_csi_dma_unregister(csi);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 27935f1e9555..fc96921b0583 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -61,7 +61,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
|| sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656)
&& sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) {
switch (pixformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -124,7 +124,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
case V4L2_PIX_FMT_VYUY:
return (mbus_code == MEDIA_BUS_FMT_VYUY8_2X8);
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -269,7 +269,7 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
case V4L2_PIX_FMT_VYUY:
return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
return buf_interlaced ? CSI_FRAME_MB_YUV420 :
CSI_FIELD_MB_YUV420;
case V4L2_PIX_FMT_NV12:
@@ -311,7 +311,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
return 0;
switch (pixformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_YUV420:
@@ -526,7 +526,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
planar_offset[0] = 0;
switch (config->pixelformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -717,8 +717,8 @@ static int sun6i_csi_fwnode_parse(struct device *dev,
static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi)
{
media_device_unregister(&csi->media_dev);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
sun6i_video_cleanup(&csi->video);
v4l2_device_unregister(&csi->v4l2_dev);
v4l2_ctrl_handler_free(&csi->ctrl_handler);
@@ -737,7 +737,7 @@ static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
"platform:%s", dev_name(csi->dev));
media_device_init(&csi->media_dev);
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0);
if (ret) {
@@ -759,16 +759,17 @@ static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
if (ret)
goto unreg_v4l2;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(csi->dev,
- &csi->notifier,
- sizeof(struct v4l2_async_subdev),
- sun6i_csi_fwnode_parse);
+ ret = v4l2_async_nf_parse_fwnode_endpoints(csi->dev,
+ &csi->notifier,
+ sizeof(struct
+ v4l2_async_subdev),
+ sun6i_csi_fwnode_parse);
if (ret)
goto clean_video;
csi->notifier.ops = &sun6i_csi_async_ops;
- ret = v4l2_async_notifier_register(&csi->v4l2_dev, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
if (ret) {
dev_err(csi->dev, "notifier registration failed\n");
goto clean_video;
@@ -783,7 +784,7 @@ unreg_v4l2:
free_ctrl:
v4l2_ctrl_handler_free(&csi->ctrl_handler);
clean_media:
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
media_device_cleanup(&csi->media_dev);
return ret;
@@ -832,13 +833,11 @@ static const struct regmap_config sun6i_csi_regmap_config = {
static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
struct platform_device *pdev)
{
- struct resource *res;
void __iomem *io_base;
int ret;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(&pdev->dev, res);
+ io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
index c626821aaedb..3a38d107ae3f 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
@@ -105,7 +105,7 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat)
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_YUV420:
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 07b2161392d2..607a8d39fbe2 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -48,7 +48,7 @@ static const u32 supported_pixformats[] = {
V4L2_PIX_FMT_YVYU,
V4L2_PIX_FMT_UYVY,
V4L2_PIX_FMT_VYUY,
- V4L2_PIX_FMT_HM12,
+ V4L2_PIX_FMT_NV12_16L16,
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_YUV420,
@@ -467,7 +467,7 @@ static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = {
static int sun6i_video_open(struct file *file)
{
struct sun6i_video *video = video_drvdata(file);
- int ret;
+ int ret = 0;
if (mutex_lock_interruptible(&video->lock))
return -ERESTARTSYS;
@@ -481,10 +481,8 @@ static int sun6i_video_open(struct file *file)
goto fh_release;
/* check if already powered */
- if (!v4l2_fh_is_singular_file(file)) {
- ret = -EBUSY;
+ if (!v4l2_fh_is_singular_file(file))
goto unlock;
- }
ret = sun6i_csi_set_power(video->csi, true);
if (ret < 0)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 671e4a928993..aa65d70b6270 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -803,7 +803,6 @@ static int deinterlace_probe(struct platform_device *pdev)
{
struct deinterlace_dev *dev;
struct video_device *vfd;
- struct resource *res;
int irq, ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -825,8 +824,7 @@ static int deinterlace_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(&pdev->dev, res);
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->base))
return PTR_ERR(dev->base);
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 8e469d518a74..4a4a6c5983f7 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -781,7 +781,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&cal->notifier);
+ v4l2_async_nf_init(&cal->notifier);
cal->notifier.ops = &cal_async_notifier_ops;
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
@@ -793,9 +793,9 @@ static int cal_async_notifier_register(struct cal_dev *cal)
continue;
fwnode = of_fwnode_handle(phy->source_node);
- casd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
- fwnode,
- struct cal_v4l2_async_subdev);
+ casd = v4l2_async_nf_add_fwnode(&cal->notifier,
+ fwnode,
+ struct cal_v4l2_async_subdev);
if (IS_ERR(casd)) {
phy_err(phy, "Failed to add subdev to notifier\n");
ret = PTR_ERR(casd);
@@ -805,7 +805,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
casd->phy = phy;
}
- ret = v4l2_async_notifier_register(&cal->v4l2_dev, &cal->notifier);
+ ret = v4l2_async_nf_register(&cal->v4l2_dev, &cal->notifier);
if (ret) {
cal_err(cal, "Error registering async notifier\n");
goto error;
@@ -814,14 +814,14 @@ static int cal_async_notifier_register(struct cal_dev *cal)
return 0;
error:
- v4l2_async_notifier_cleanup(&cal->notifier);
+ v4l2_async_nf_cleanup(&cal->notifier);
return ret;
}
static void cal_async_notifier_unregister(struct cal_dev *cal)
{
- v4l2_async_notifier_unregister(&cal->notifier);
- v4l2_async_notifier_cleanup(&cal->notifier);
+ v4l2_async_nf_unregister(&cal->notifier);
+ v4l2_async_nf_cleanup(&cal->notifier);
}
/* ------------------------------------------------------------------
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 3655573e8581..95483c84c3f2 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -132,11 +132,11 @@ static struct via_camera *via_cam_info;
* Debugging and related.
*/
#define cam_err(cam, fmt, arg...) \
- dev_err(&(cam)->platdev->dev, fmt, ##arg);
+ dev_err(&(cam)->platdev->dev, fmt, ##arg)
#define cam_warn(cam, fmt, arg...) \
- dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+ dev_warn(&(cam)->platdev->dev, fmt, ##arg)
#define cam_dbg(cam, fmt, arg...) \
- dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+ dev_dbg(&(cam)->platdev->dev, fmt, ##arg)
/*
* Format handling. This is ripped almost directly from Hans's changes
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 905005e271ca..fda8fc0e4814 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -360,7 +360,7 @@ static int video_mux_async_register(struct video_mux *vmux,
unsigned int i;
int ret;
- v4l2_async_notifier_init(&vmux->notifier);
+ v4l2_async_nf_init(&vmux->notifier);
for (i = 0; i < num_input_pads; i++) {
struct v4l2_async_subdev *asd;
@@ -380,8 +380,8 @@ static int video_mux_async_register(struct video_mux *vmux,
}
fwnode_handle_put(remote_ep);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &vmux->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&vmux->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -395,8 +395,7 @@ static int video_mux_async_register(struct video_mux *vmux,
vmux->notifier.ops = &video_mux_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&vmux->subdev,
- &vmux->notifier);
+ ret = v4l2_async_subdev_nf_register(&vmux->subdev, &vmux->notifier);
if (ret)
return ret;
@@ -477,8 +476,8 @@ static int video_mux_probe(struct platform_device *pdev)
ret = video_mux_async_register(vmux, num_pads - 1);
if (ret) {
- v4l2_async_notifier_unregister(&vmux->notifier);
- v4l2_async_notifier_cleanup(&vmux->notifier);
+ v4l2_async_nf_unregister(&vmux->notifier);
+ v4l2_async_nf_cleanup(&vmux->notifier);
}
return ret;
@@ -489,8 +488,8 @@ static int video_mux_remove(struct platform_device *pdev)
struct video_mux *vmux = platform_get_drvdata(pdev);
struct v4l2_subdev *sd = &vmux->subdev;
- v4l2_async_notifier_unregister(&vmux->notifier);
- v4l2_async_notifier_cleanup(&vmux->notifier);
+ v4l2_async_nf_unregister(&vmux->notifier);
+ v4l2_async_nf_cleanup(&vmux->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 06f74d410973..0c2507dc03d6 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -455,6 +455,10 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
dev_err(vsp1->dev, "%s: failed to setup UIF after %s\n",
__func__, BRX_NAME(pipe->brx));
+ /* If the DRM pipe does not have a UIF there is nothing we can update. */
+ if (!drm_pipe->uif)
+ return 0;
+
/*
* If the UIF is not in use schedule it for removal by setting its pipe
* pointer to NULL, vsp1_du_pipeline_configure() will remove it from the
@@ -462,9 +466,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
* make sure it is present in the pipeline's list of entities if it
* wasn't already.
*/
- if (drm_pipe->uif && !use_uif) {
+ if (!use_uif) {
drm_pipe->uif->pipe = NULL;
- } else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
+ } else if (!drm_pipe->uif->pipe) {
drm_pipe->uif->pipe = pipe;
list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
}
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index de442d6c9926..c9044785b903 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -44,7 +44,7 @@
static irqreturn_t vsp1_irq_handler(int irq, void *data)
{
- u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE;
+ u32 mask = VI6_WPF_IRQ_STA_DFE | VI6_WPF_IRQ_STA_FRE;
struct vsp1_device *vsp1 = data;
irqreturn_t ret = IRQ_NONE;
unsigned int i;
@@ -59,7 +59,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
- if (status & VI6_WFP_IRQ_STA_DFE) {
+ if (status & VI6_WPF_IRQ_STA_DFE) {
vsp1_pipeline_frame_end(wpf->entity.pipe);
ret = IRQ_HANDLED;
}
@@ -777,6 +777,16 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uif_count = 2,
.wpf_count = 2,
.num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3U,
+ .model = "VSP2-D",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 2,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
},
};
@@ -785,7 +795,6 @@ static int vsp1_probe(struct platform_device *pdev)
struct vsp1_device *vsp1;
struct device_node *fcp_node;
struct resource *irq;
- struct resource *io;
unsigned int i;
int ret;
@@ -800,8 +809,7 @@ static int vsp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vsp1);
/* I/O and IRQ resources (clock managed by the clock PM domain). */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
+ vsp1->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vsp1->mmio))
return PTR_ERR(vsp1->mmio);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index fe3130db1fa2..fae7286eb01e 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -32,12 +32,12 @@
#define VI6_STATUS_SYS_ACT(n) BIT((n) + 8)
#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
-#define VI6_WFP_IRQ_ENB_DFEE BIT(1)
-#define VI6_WFP_IRQ_ENB_FREE BIT(0)
+#define VI6_WPF_IRQ_ENB_DFEE BIT(1)
+#define VI6_WPF_IRQ_ENB_FREE BIT(0)
#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
-#define VI6_WFP_IRQ_STA_DFE BIT(1)
-#define VI6_WFP_IRQ_STA_FRE BIT(0)
+#define VI6_WPF_IRQ_STA_DFE BIT(1)
+#define VI6_WPF_IRQ_STA_FRE BIT(0)
#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
#define VI6_DISP_IRQ_ENB_DSTE BIT(8)
@@ -766,6 +766,8 @@
#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V3U (0x1c << 8)
+
#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
#define VI6_IP_VERSION_SOC_V2H (0x01 << 0)
@@ -777,6 +779,7 @@
#define VI6_IP_VERSION_SOC_D3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
+#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
/* -----------------------------------------------------------------------------
* RPF CLUT Registers
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 208498fa6ed7..94e91d7bb56c 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -342,7 +342,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
/* Enable interrupts. */
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0);
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
- VI6_WFP_IRQ_ENB_DFEE);
+ VI6_WPF_IRQ_ENB_DFEE);
/*
* Configure writeback for display pipelines (the wpf writeback flag is
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 425a32dd5d19..a0073122798f 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -205,10 +205,8 @@ EXPORT_SYMBOL_GPL(xvip_clr_and_set);
int xvip_init_resources(struct xvip_device *xvip)
{
struct platform_device *pdev = to_platform_device(xvip->dev);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xvip->iomem = devm_ioremap_resource(xvip->dev, res);
+ xvip->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xvip->iomem))
return PTR_ERR(xvip->iomem);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 2ce31d7ce1a6..f34f8b077e03 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -382,9 +382,8 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
continue;
}
- xge = v4l2_async_notifier_add_fwnode_subdev(
- &xdev->notifier, remote,
- struct xvip_graph_entity);
+ xge = v4l2_async_nf_add_fwnode(&xdev->notifier, remote,
+ struct xvip_graph_entity);
fwnode_handle_put(remote);
if (IS_ERR(xge)) {
ret = PTR_ERR(xge);
@@ -395,7 +394,7 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
return 0;
err_notifier_cleanup:
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
fwnode_handle_put(ep);
return ret;
}
@@ -420,7 +419,7 @@ static int xvip_graph_parse(struct xvip_composite_device *xdev)
entity = to_xvip_entity(asd);
ret = xvip_graph_parse_one(xdev, entity->asd.match.fwnode);
if (ret < 0) {
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
break;
}
}
@@ -496,8 +495,8 @@ static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
struct xvip_dma *dmap;
struct xvip_dma *dma;
- v4l2_async_notifier_unregister(&xdev->notifier);
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_unregister(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
list_for_each_entry_safe(dma, dmap, &xdev->dmas, list) {
xvip_dma_cleanup(dma);
@@ -532,7 +531,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
/* Register the subdevices notifier. */
xdev->notifier.ops = &xvip_graph_notify_ops;
- ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ ret = v4l2_async_nf_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
dev_err(xdev->dev, "notifier registration failed\n");
goto done;
@@ -596,7 +595,7 @@ static int xvip_composite_probe(struct platform_device *pdev)
xdev->dev = &pdev->dev;
INIT_LIST_HEAD(&xdev->dmas);
- v4l2_async_notifier_init(&xdev->notifier);
+ v4l2_async_nf_init(&xdev->notifier);
ret = xvip_composite_v4l2_init(xdev);
if (ret < 0)
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 112376873167..484046471c03 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1279,7 +1279,7 @@ static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->driver, WL1273_FM_DRIVER_NAME,
sizeof(capability->driver));
- strscpy(capability->card, "Texas Instruments Wl1273 FM Radio",
+ strscpy(capability->card, "TI Wl1273 FM Radio",
sizeof(capability->card));
strscpy(capability->bus_info, radio->bus_type,
sizeof(capability->bus_info));
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index f491420d7b53..a972c0705ac7 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -11,7 +11,7 @@
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
-#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
+#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.2"
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index fedff68d8c49..3f8634a46573 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -16,7 +16,7 @@
/* driver definitions */
#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
-#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
+#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.10"
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index fd5a7a058714..9506baf3c4c1 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -453,14 +453,6 @@ config IR_SERIAL_TRANSMITTER
help
Serial Port Transmitter support
-config IR_SIR
- tristate "Built-in SIR IrDA port"
- help
- Say Y if you want to use a IrDA SIR port Transceivers.
-
- To compile this driver as a module, choose M here: the module will
- be called sir-ir.
-
config RC_XBOX_DVD
tristate "Xbox DVD Movie Playback Kit"
depends on USB
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 0db51fad27d6..378d62d21e06 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
-obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_RC_XBOX_DVD) += xbox_remote.o
obj-$(CONFIG_IR_TOY) += ir_toy.o
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 094aa6a06315..6f8464872033 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -76,7 +76,6 @@ static void img_ir_ident(struct img_ir_priv *priv)
static int img_ir_probe(struct platform_device *pdev)
{
struct img_ir_priv *priv;
- struct resource *res_regs;
int irq, error, error2;
/* Get resources from platform device */
@@ -94,8 +93,7 @@ static int img_ir_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
/* Ioremap the registers */
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res_regs);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 2ca4e86c7b9f..54da6f60079b 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2358,8 +2358,10 @@ urb_submit_failed:
touch_setup_failed:
find_endpoint_failed:
usb_put_dev(ictx->usbdev_intf1);
+ ictx->usbdev_intf1 = NULL;
mutex_unlock(&ictx->lock);
usb_free_urb(rx_urb);
+ ictx->rx_urb_intf1 = NULL;
rx_urb_alloc_failed:
dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret);
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 4609fb4519e9..e0be6471afe5 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -249,7 +249,6 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
{
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
- struct resource *res;
struct hix5hd2_ir_priv *priv;
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *of_id;
@@ -274,8 +273,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
priv->regmap = NULL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 48d52baec1a1..7e98e7e3aace 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -4,7 +4,9 @@
* Infrared Toy and IR Droid RC core driver
*
* Copyright (C) 2020 Sean Young <sean@mess.org>
-
+ *
+ * http://dangerousprototypes.com/docs/USB_IR_Toy:_Sampling_mode
+ *
* This driver is based on the lirc driver which can be found here:
* https://sourceforge.net/p/lirc/git/ci/master/tree/plugins/irtoy.c
* Copyright (C) 2011 Peter Kooiman <pkooiman@gmail.com>
@@ -46,7 +48,7 @@ static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
enum state {
STATE_IRDATA,
- STATE_RESET,
+ STATE_COMMAND_NO_RESP,
STATE_COMMAND,
STATE_TX,
};
@@ -121,6 +123,7 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
len, irtoy->in);
}
break;
+ case STATE_COMMAND_NO_RESP:
case STATE_IRDATA: {
struct ir_raw_event rawir = { .pulse = irtoy->pulse };
__be16 *in = (__be16 *)irtoy->in;
@@ -166,10 +169,8 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
int err;
if (len != 1 || space > MAX_PACKET || space == 0) {
- dev_err(irtoy->dev, "packet length expected: %*phN\n",
+ dev_dbg(irtoy->dev, "packet length expected: %*phN\n",
len, irtoy->in);
- irtoy->state = STATE_IRDATA;
- complete(&irtoy->command_done);
break;
}
@@ -193,9 +194,6 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
irtoy->tx_len -= buf_len;
}
break;
- case STATE_RESET:
- dev_err(irtoy->dev, "unexpected response to reset: %*phN\n",
- len, irtoy->in);
}
}
@@ -204,7 +202,7 @@ static void irtoy_out_callback(struct urb *urb)
struct irtoy *irtoy = urb->context;
if (urb->status == 0) {
- if (irtoy->state == STATE_RESET)
+ if (irtoy->state == STATE_COMMAND_NO_RESP)
complete(&irtoy->command_done);
} else {
dev_warn(irtoy->dev, "out urb status: %d\n", urb->status);
@@ -216,10 +214,20 @@ static void irtoy_in_callback(struct urb *urb)
struct irtoy *irtoy = urb->context;
int ret;
- if (urb->status == 0)
+ switch (urb->status) {
+ case 0:
irtoy_response(irtoy, urb->actual_length);
- else
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ case -EPIPE:
+ usb_unlink_urb(urb);
+ return;
+ default:
dev_dbg(irtoy->dev, "in urb status: %d\n", urb->status);
+ }
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret && ret != -ENODEV)
@@ -256,7 +264,7 @@ static int irtoy_setup(struct irtoy *irtoy)
int err;
err = irtoy_command(irtoy, COMMAND_RESET, sizeof(COMMAND_RESET),
- STATE_RESET);
+ STATE_COMMAND_NO_RESP);
if (err != 0) {
dev_err(irtoy->dev, "could not write reset command: %d\n",
err);
@@ -310,7 +318,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
buf[i] = cpu_to_be16(v);
}
- buf[count] = 0xffff;
+ buf[count] = cpu_to_be16(0xffff);
irtoy->tx_buf = buf;
irtoy->tx_len = size;
@@ -321,7 +329,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
// with its led on. It does not respond to any command when this
// happens. To work around this, re-enter sample mode.
err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
- sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+ sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
return err;
@@ -357,6 +365,27 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
return count;
}
+static int irtoy_tx_carrier(struct rc_dev *rc, uint32_t carrier)
+{
+ struct irtoy *irtoy = rc->priv;
+ u8 buf[3];
+ int err;
+
+ if (carrier < 11800)
+ return -EINVAL;
+
+ buf[0] = 0x06;
+ buf[1] = DIV_ROUND_CLOSEST(48000000, 16 * carrier) - 1;
+ buf[2] = 0;
+
+ err = irtoy_command(irtoy, buf, sizeof(buf), STATE_COMMAND_NO_RESP);
+ if (err)
+ dev_err(irtoy->dev, "could not write carrier command: %d\n",
+ err);
+
+ return err;
+}
+
static int irtoy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -436,8 +465,9 @@ static int irtoy_probe(struct usb_interface *intf,
if (err)
goto free_rcdev;
- dev_info(irtoy->dev, "version: hardware %u, firmware %u, protocol %u",
- irtoy->hw_version, irtoy->sw_version, irtoy->proto_version);
+ dev_info(irtoy->dev, "version: hardware %u, firmware %u.%u, protocol %u",
+ irtoy->hw_version, irtoy->sw_version / 10,
+ irtoy->sw_version % 10, irtoy->proto_version);
if (irtoy->sw_version < MIN_FW_VERSION) {
dev_err(irtoy->dev, "need firmware V%02u or higher",
@@ -455,6 +485,7 @@ static int irtoy_probe(struct usb_interface *intf,
rc->dev.parent = &intf->dev;
rc->priv = irtoy;
rc->tx_ir = irtoy_tx;
+ rc->s_tx_carrier = irtoy_tx_carrier;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->map_name = RC_MAP_RC6_MCE;
rc->rx_resolution = UNIT_US;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 5bc23e8c6d91..4f77d4ebacdc 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -242,7 +242,7 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
}
/* check for the receive interrupt */
- if (iflags & ITE_IRQ_RX_FIFO) {
+ if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
/* read the FIFO bytes */
rx_bytes = dev->params->get_rx_bytes(dev, rx_buf,
ITE_RX_FIFO_LEN);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index e03dd1f0144f..d09bee82c04c 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1386,6 +1386,7 @@ static void mceusb_dev_recv(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -EILSEQ:
+ case -EPROTO:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
@@ -1612,6 +1613,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ rc->rx_resolution = MCE_TIME_UNIT;
rc->min_timeout = MCE_TIME_UNIT;
rc->timeout = MS_TO_US(100);
if (!mceusb_model[ir->model].broken_irtimeout) {
diff --git a/drivers/media/rc/meson-ir-tx.c b/drivers/media/rc/meson-ir-tx.c
index 3055f8e1b6ff..c22cd26a5c07 100644
--- a/drivers/media/rc/meson-ir-tx.c
+++ b/drivers/media/rc/meson-ir-tx.c
@@ -395,7 +395,6 @@ static struct platform_driver meson_irtx_pd = {
.remove = meson_irtx_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = meson_irtx_dt_match,
},
};
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index dad55950dfc6..4b769111f78e 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -102,7 +102,6 @@ static int meson_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
const char *map_name;
struct meson_ir *ir;
int irq, ret;
@@ -111,8 +110,7 @@ static int meson_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->reg = devm_ioremap_resource(dev, res);
+ ir->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->reg))
return PTR_ERR(ir->reg);
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 65a136c0fac2..840e7aec5c21 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -292,7 +292,6 @@ static int mtk_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
- struct resource *res;
struct mtk_ir *ir;
u32 val;
int ret = 0;
@@ -320,8 +319,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
ir->bus = ir->clk;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->base = devm_ioremap_resource(dev, res);
+ ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base))
return PTR_ERR(ir->base);
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
deleted file mode 100644
index 6ec96dc34586..000000000000
--- a/drivers/media/rc/sir_ir.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * IR SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
- *
- * sir_ir - Device driver for use with SIR (serial infra red)
- * mode of IrDA on many notebooks.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-
-#include <media/rc-core.h>
-
-/* SECTION: Definitions */
-#define PULSE '['
-
-/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/
-#define TIME_CONST (9000000ul / 115200ul)
-
-/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */
-#define SIR_TIMEOUT (HZ * 5 / 100)
-
-/* onboard sir ports are typically com3 */
-static int io = 0x3e8;
-static int irq = 4;
-static int threshold = 3;
-
-static DEFINE_SPINLOCK(timer_lock);
-static struct timer_list timerlist;
-/* time of last signal change detected */
-static ktime_t last;
-/* time of last UART data ready interrupt */
-static ktime_t last_intr_time;
-static int last_value;
-static struct rc_dev *rcdev;
-
-static struct platform_device *sir_ir_dev;
-
-static DEFINE_SPINLOCK(hardware_lock);
-
-/* SECTION: Prototypes */
-
-/* Communication with user-space */
-static void add_read_queue(int flag, unsigned long val);
-/* Hardware */
-static irqreturn_t sir_interrupt(int irq, void *dev_id);
-static void send_space(unsigned long len);
-static void send_pulse(unsigned long len);
-static int init_hardware(void);
-static void drop_hardware(void);
-/* Initialisation */
-
-static inline unsigned int sinp(int offset)
-{
- return inb(io + offset);
-}
-
-static inline void soutp(int offset, int value)
-{
- outb(value, io + offset);
-}
-
-/* SECTION: Communication with user-space */
-static int sir_tx_ir(struct rc_dev *dev, unsigned int *tx_buf,
- unsigned int count)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < count;) {
- if (tx_buf[i])
- send_pulse(tx_buf[i]);
- i++;
- if (i >= count)
- break;
- if (tx_buf[i])
- send_space(tx_buf[i]);
- i++;
- }
- local_irq_restore(flags);
-
- return count;
-}
-
-static void add_read_queue(int flag, unsigned long val)
-{
- struct ir_raw_event ev = {};
-
- pr_debug("add flag %d with val %lu\n", flag, val);
-
- /*
- * statistically, pulses are ~TIME_CONST/2 too long. we could
- * maybe make this more exact, but this is good enough
- */
- if (flag) {
- /* pulse */
- if (val > TIME_CONST / 2)
- val -= TIME_CONST / 2;
- else /* should not ever happen */
- val = 1;
- ev.pulse = true;
- } else {
- val += TIME_CONST / 2;
- }
- ev.duration = val;
-
- ir_raw_event_store_with_filter(rcdev, &ev);
-}
-
-/* SECTION: Hardware */
-static void sir_timeout(struct timer_list *unused)
-{
- /*
- * if last received signal was a pulse, but receiving stopped
- * within the 9 bit frame, we need to finish this pulse and
- * simulate a signal change to from pulse to space. Otherwise
- * upper layers will receive two sequences next time.
- */
-
- unsigned long flags;
- unsigned long pulse_end;
-
- /* avoid interference with interrupt */
- spin_lock_irqsave(&timer_lock, flags);
- if (last_value) {
- /* clear unread bits in UART and restart */
- outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
- /* determine 'virtual' pulse end: */
- pulse_end = min_t(unsigned long,
- ktime_us_delta(last, last_intr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "timeout add %d for %lu usec\n",
- last_value, pulse_end);
- add_read_queue(last_value, pulse_end);
- last_value = 0;
- last = last_intr_time;
- }
- spin_unlock_irqrestore(&timer_lock, flags);
- ir_raw_event_handle(rcdev);
-}
-
-static irqreturn_t sir_interrupt(int irq, void *dev_id)
-{
- unsigned char data;
- ktime_t curr_time;
- unsigned long delt;
- unsigned long deltintr;
- unsigned long flags;
- int counter = 0;
- int iir, lsr;
-
- while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
- if (++counter > 256) {
- dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
- break;
- }
-
- switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
- case UART_IIR_MSI:
- (void)inb(io + UART_MSR);
- break;
- case UART_IIR_RLSI:
- case UART_IIR_THRI:
- (void)inb(io + UART_LSR);
- break;
- case UART_IIR_RDI:
- /* avoid interference with timer */
- spin_lock_irqsave(&timer_lock, flags);
- do {
- del_timer(&timerlist);
- data = inb(io + UART_RX);
- curr_time = ktime_get();
- delt = min_t(unsigned long,
- ktime_us_delta(last, curr_time),
- IR_MAX_DURATION);
- deltintr = min_t(unsigned long,
- ktime_us_delta(last_intr_time,
- curr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "t %lu, d %d\n",
- deltintr, (int)data);
- /*
- * if nothing came in last X cycles,
- * it was gap
- */
- if (deltintr > TIME_CONST * threshold) {
- if (last_value) {
- dev_dbg(&sir_ir_dev->dev, "GAP\n");
- /* simulate signal change */
- add_read_queue(last_value,
- delt -
- deltintr);
- last_value = 0;
- last = last_intr_time;
- delt = deltintr;
- }
- }
- data = 1;
- if (data ^ last_value) {
- /*
- * deltintr > 2*TIME_CONST, remember?
- * the other case is timeout
- */
- add_read_queue(last_value,
- delt - TIME_CONST);
- last_value = data;
- last = curr_time;
- last = ktime_sub_us(last,
- TIME_CONST);
- }
- last_intr_time = curr_time;
- if (data) {
- /*
- * start timer for end of
- * sequence detection
- */
- timerlist.expires = jiffies +
- SIR_TIMEOUT;
- add_timer(&timerlist);
- }
-
- lsr = inb(io + UART_LSR);
- } while (lsr & UART_LSR_DR); /* data ready */
- spin_unlock_irqrestore(&timer_lock, flags);
- break;
- default:
- break;
- }
- }
- ir_raw_event_handle(rcdev);
- return IRQ_RETVAL(IRQ_HANDLED);
-}
-
-static void send_space(unsigned long len)
-{
- usleep_range(len, len + 25);
-}
-
-static void send_pulse(unsigned long len)
-{
- long bytes_out = len / TIME_CONST;
-
- if (bytes_out == 0)
- bytes_out++;
-
- while (bytes_out--) {
- outb(PULSE, io + UART_TX);
- /* FIXME treba seriozne cakanie z char/serial.c */
- while (!(inb(io + UART_LSR) & UART_LSR_THRE))
- ;
- }
-}
-
-static int init_hardware(void)
-{
- u8 scratch, scratch2, scratch3;
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /*
- * This is a simple port existence test, borrowed from the autoconfig
- * function in drivers/tty/serial/8250/8250_port.c
- */
- scratch = sinp(UART_IER);
- soutp(UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, 0x0f);
-#ifdef __i386__
- outb(0x00, 0x080);
-#endif
- scratch3 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0f) {
- /* we fail, there's nothing here */
- spin_unlock_irqrestore(&hardware_lock, flags);
- pr_err("port existence test failed, cannot continue\n");
- return -ENODEV;
- }
-
- /* reset UART */
- outb(0, io + UART_MCR);
- outb(0, io + UART_IER);
- /* init UART */
- /* set DLAB, speed = 115200 */
- outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR);
- outb(1, io + UART_DLL); outb(0, io + UART_DLM);
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */
- outb(UART_LCR_WLEN7, io + UART_LCR);
- /* FIFO operation */
- outb(UART_FCR_ENABLE_FIFO, io + UART_FCR);
- /* interrupts */
- /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */
- outb(UART_IER_RDI, io + UART_IER);
- /* turn on UART */
- outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
- spin_unlock_irqrestore(&hardware_lock, flags);
-
- return 0;
-}
-
-static void drop_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /* turn off interrupts */
- outb(0, io + UART_IER);
-
- spin_unlock_irqrestore(&hardware_lock, flags);
-}
-
-/* SECTION: Initialisation */
-static int sir_ir_probe(struct platform_device *dev)
-{
- int retval;
-
- rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev)
- return -ENOMEM;
-
- rcdev->device_name = "SIR IrDA port";
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->tx_ir = sir_tx_ir;
- rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rcdev->driver_name = KBUILD_MODNAME;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->dev.parent = &sir_ir_dev->dev;
-
- timer_setup(&timerlist, sir_timeout, 0);
-
- /* get I/O port access and IRQ line */
- if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
- pr_err("i/o port 0x%.4x already in use.\n", io);
- return -EBUSY;
- }
- retval = devm_request_irq(&sir_ir_dev->dev, irq, sir_interrupt, 0,
- KBUILD_MODNAME, NULL);
- if (retval < 0) {
- pr_err("IRQ %d already in use.\n", irq);
- return retval;
- }
-
- retval = init_hardware();
- if (retval) {
- del_timer_sync(&timerlist);
- return retval;
- }
-
- pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
-
- retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-static int sir_ir_remove(struct platform_device *dev)
-{
- drop_hardware();
- del_timer_sync(&timerlist);
- return 0;
-}
-
-static struct platform_driver sir_ir_driver = {
- .probe = sir_ir_probe,
- .remove = sir_ir_remove,
- .driver = {
- .name = "sir_ir",
- },
-};
-
-static int __init sir_ir_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&sir_ir_driver);
- if (retval)
- return retval;
-
- sir_ir_dev = platform_device_alloc("sir_ir", 0);
- if (!sir_ir_dev) {
- retval = -ENOMEM;
- goto pdev_alloc_fail;
- }
-
- retval = platform_device_add(sir_ir_dev);
- if (retval)
- goto pdev_add_fail;
-
- return 0;
-
-pdev_add_fail:
- platform_device_put(sir_ir_dev);
-pdev_alloc_fail:
- platform_driver_unregister(&sir_ir_driver);
- return retval;
-}
-
-static void __exit sir_ir_exit(void)
-{
- platform_device_unregister(sir_ir_dev);
- platform_driver_unregister(&sir_ir_driver);
-}
-
-module_init(sir_ir_init);
-module_exit(sir_ir_exit);
-
-MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
-MODULE_AUTHOR("Milan Pikula");
-MODULE_LICENSE("GPL");
-
-module_param_hw(io, int, ioport, 0444);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-module_param_hw(irq, int, irq, 0444);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(threshold, int, 0444);
-MODULE_PARM_DESC(threshold, "space detection threshold (3)");
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index d79d1e3996b2..4e419dbbacd3 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -231,7 +231,6 @@ static int st_rc_probe(struct platform_device *pdev)
int ret = -EINVAL;
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
- struct resource *res;
struct st_rc_device *rc_dev;
struct device_node *np = pdev->dev.of_node;
const char *rx_mode;
@@ -274,9 +273,7 @@ static int st_rc_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- rc_dev->base = devm_ioremap_resource(dev, res);
+ rc_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc_dev->base)) {
ret = PTR_ERR(rc_dev->base);
goto err;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 9cd765e31c49..1cc5ebb85b6c 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -293,6 +293,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
+ rdev->rx_resolution = SZ_RESOLUTION;
ret = rc_register_device(rdev);
if (ret < 0) {
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 168e1d2c876a..391a591c1b75 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -255,7 +255,6 @@ static int sunxi_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
const struct sunxi_ir_quirks *quirks;
- struct resource *res;
struct sunxi_ir *ir;
u32 b_clk_freq = SUNXI_IR_BASE_CLK;
@@ -301,8 +300,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
dev_dbg(dev, "set base clock frequency to %d Hz.\n", b_clk_freq);
/* IO */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->base = devm_ioremap_resource(dev, res);
+ ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base)) {
return PTR_ERR(ir->base);
}
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index b91a1e845b97..506f52c1af10 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -618,7 +618,7 @@ fail_frontend:
fail_attach:
dvb_unregister_adapter(&dvb_spi->adapter);
fail_adapter:
- if (!dvb_spi->vcc_supply)
+ if (dvb_spi->vcc_supply)
regulator_disable(dvb_spi->vcc_supply);
fail_regulator:
kfree(dvb_spi);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index 75617709c8ce..82620613d56b 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -564,6 +564,10 @@ static int vidtv_bridge_remove(struct platform_device *pdev)
static void vidtv_bridge_dev_release(struct device *dev)
{
+ struct vidtv_dvb *dvb;
+
+ dvb = dev_get_drvdata(dev);
+ kfree(dvb);
}
static struct platform_device vidtv_bridge_dev = {
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index d714fe50afe5..47575490e74a 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -12,11 +12,6 @@
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 06880dd0b6ac..820b8f5b502f 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -6,6 +6,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-rect.h>
@@ -13,11 +14,11 @@
#include "vimc-common.h"
-static unsigned int sca_mult = 3;
-module_param(sca_mult, uint, 0000);
-MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-
-#define MAX_ZOOM 8
+/* Pad identifier */
+enum vic_sca_pad {
+ VIMC_SCA_SINK = 0,
+ VIMC_SCA_SRC = 1,
+};
#define VIMC_SCA_FMT_WIDTH_DEFAULT 640
#define VIMC_SCA_FMT_HEIGHT_DEFAULT 480
@@ -25,19 +26,16 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- /* NOTE: the source fmt is the same as the sink
- * with the width and hight multiplied by mult
- */
- struct v4l2_mbus_framefmt sink_fmt;
struct v4l2_rect crop_rect;
+ /* Frame format for both sink and src pad */
+ struct v4l2_mbus_framefmt fmt[2];
/* Values calculated when the stream starts */
u8 *src_frame;
- unsigned int src_line_size;
unsigned int bpp;
struct media_pad pads[2];
};
-static const struct v4l2_mbus_framefmt sink_fmt_default = {
+static const struct v4l2_mbus_framefmt fmt_default = {
.width = VIMC_SCA_FMT_WIDTH_DEFAULT,
.height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
.code = MEDIA_BUS_FMT_RGB888_1X24,
@@ -72,17 +70,6 @@ vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
return r;
}
-static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
- const struct v4l2_mbus_framefmt *sink_fmt)
-{
- const struct v4l2_rect sink_rect =
- vimc_sca_get_crop_bound_sink(sink_fmt);
-
- /* Disallow rectangles smaller than the minimal one. */
- v4l2_rect_set_min_size(r, &crop_rect_min);
- v4l2_rect_map_inside(r, &sink_rect);
-}
-
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
@@ -90,19 +77,14 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_rect *r;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
- *mf = sink_fmt_default;
-
- r = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- *r = crop_rect_default;
-
- for (i = 1; i < sd->entity.num_pads; i++) {
+ for (i = 0; i < sd->entity.num_pads; i++) {
mf = v4l2_subdev_get_try_format(sd, sd_state, i);
- *mf = sink_fmt_default;
- mf->width = mf->width * sca_mult;
- mf->height = mf->height * sca_mult;
+ *mf = fmt_default;
}
+ r = v4l2_subdev_get_try_crop(sd, sd_state, VIMC_SCA_SINK);
+ *r = crop_rect_default;
+
return 0;
}
@@ -144,112 +126,108 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (VIMC_IS_SINK(fse->pad)) {
- fse->max_width = VIMC_FRAME_MAX_WIDTH;
- fse->max_height = VIMC_FRAME_MAX_HEIGHT;
- } else {
- fse->max_width = VIMC_FRAME_MAX_WIDTH * MAX_ZOOM;
- fse->max_height = VIMC_FRAME_MAX_HEIGHT * MAX_ZOOM;
- }
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
return 0;
}
+static struct v4l2_mbus_framefmt *
+vimc_sca_pad_format(struct vimc_sca_device *vsca,
+ struct v4l2_subdev_state *sd_state, u32 pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&vsca->sd, sd_state, pad);
+ else
+ return &vsca->fmt[pad];
+}
+
+static struct v4l2_rect *
+vimc_sca_pad_crop(struct vimc_sca_device *vsca,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&vsca->sd, sd_state,
+ VIMC_SCA_SINK);
+ else
+ return &vsca->crop_rect;
+}
+
static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- struct v4l2_rect *crop_rect;
-
- /* Get the current sink format */
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- } else {
- format->format = vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- }
-
- /* Scale the frame size for the source pad */
- if (VIMC_IS_SRC(format->pad)) {
- format->format.width = crop_rect->width * sca_mult;
- format->format.height = crop_rect->height * sca_mult;
- }
+ format->format = *vimc_sca_pad_format(vsca, sd_state, format->pad,
+ format->which);
return 0;
}
-static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
-{
- const struct vimc_pix_map *vpix;
-
- /* Only accept code in the pix map table in non bayer format */
- vpix = vimc_pix_map_by_code(fmt->code);
- if (!vpix || vpix->bayer)
- fmt->code = sink_fmt_default.code;
-
- fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
- VIMC_FRAME_MAX_WIDTH) & ~1;
- fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
- VIMC_FRAME_MAX_HEIGHT) & ~1;
-
- if (fmt->field == V4L2_FIELD_ANY)
- fmt->field = sink_fmt_default.field;
-
- vimc_colorimetry_clamp(fmt);
-}
-
static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_rect *crop_rect;
+ struct v4l2_mbus_framefmt *fmt;
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- /* Do not change the format while stream is on */
- if (vsca->src_frame)
- return -EBUSY;
+ /* Do not change the active format while stream is on */
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ return -EBUSY;
- sink_fmt = &vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- } else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
+ fmt = vimc_sca_pad_format(vsca, sd_state, format->pad, format->which);
+
+ /*
+ * The media bus code and colorspace can only be changed on the sink
+ * pad, the source pad only follows.
+ */
+ if (format->pad == VIMC_SCA_SINK) {
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table in non bayer format. */
+ vpix = vimc_pix_map_by_code(format->format.code);
+ if (vpix && !vpix->bayer)
+ fmt->code = format->format.code;
+ else
+ fmt->code = fmt_default.code;
+
+ /* Clamp the colorspace to valid values. */
+ fmt->colorspace = format->format.colorspace;
+ fmt->ycbcr_enc = format->format.ycbcr_enc;
+ fmt->quantization = format->format.quantization;
+ fmt->xfer_func = format->format.xfer_func;
+ vimc_colorimetry_clamp(fmt);
}
+ /* Clamp and align the width and height */
+ fmt->width = clamp_t(u32, format->format.width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, format->format.height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
/*
- * Do not change the format of the source pad,
- * it is propagated from the sink
+ * Propagate the sink pad format to the crop rectangle and the source
+ * pad.
*/
- if (VIMC_IS_SRC(fmt->pad)) {
- fmt->format = *sink_fmt;
- fmt->format.width = crop_rect->width * sca_mult;
- fmt->format.height = crop_rect->height * sca_mult;
- } else {
- /* Set the new format in the sink pad */
- vimc_sca_adjust_sink_fmt(&fmt->format);
-
- dev_dbg(vsca->ved.dev, "%s: sink format update: "
- "old:%dx%d (0x%x, %d, %d, %d, %d) "
- "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
- /* old */
- sink_fmt->width, sink_fmt->height, sink_fmt->code,
- sink_fmt->colorspace, sink_fmt->quantization,
- sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
- /* new */
- fmt->format.width, fmt->format.height, fmt->format.code,
- fmt->format.colorspace, fmt->format.quantization,
- fmt->format.xfer_func, fmt->format.ycbcr_enc);
-
- *sink_fmt = fmt->format;
-
- /* Do the crop, but respect the current bounds */
- vimc_sca_adjust_sink_crop(crop_rect, sink_fmt);
+ if (format->pad == VIMC_SCA_SINK) {
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *crop;
+
+ crop = vimc_sca_pad_crop(vsca, sd_state, format->which);
+ crop->width = fmt->width;
+ crop->height = fmt->height;
+ crop->top = 0;
+ crop->left = 0;
+
+ src_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SRC,
+ format->which);
+ *src_fmt = *fmt;
}
+ format->format = *fmt;
+
return 0;
}
@@ -259,24 +237,17 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_rect *crop_rect;
if (VIMC_IS_SRC(sel->pad))
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- sink_fmt = &vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- } else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- }
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *crop_rect;
+ sel->r = *vimc_sca_pad_crop(vsca, sd_state, sel->which);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
+ sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sel->which);
sel->r = vimc_sca_get_crop_bound_sink(sink_fmt);
break;
default:
@@ -286,6 +257,17 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
return 0;
}
+static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
+ const struct v4l2_mbus_framefmt *sink_fmt)
+{
+ const struct v4l2_rect sink_rect =
+ vimc_sca_get_crop_bound_sink(sink_fmt);
+
+ /* Disallow rectangles smaller than the minimal one. */
+ v4l2_rect_set_min_size(r, &crop_rect_min);
+ v4l2_rect_map_inside(r, &sink_rect);
+}
+
static int vimc_sca_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
@@ -294,30 +276,18 @@ static int vimc_sca_set_selection(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *crop_rect;
- if (VIMC_IS_SRC(sel->pad))
+ /* Only support setting the crop of the sink pad */
+ if (VIMC_IS_SRC(sel->pad) || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- /* Do not change the format while stream is on */
- if (vsca->src_frame)
- return -EBUSY;
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ return -EBUSY;
- crop_rect = &vsca->crop_rect;
- sink_fmt = &vsca->sink_fmt;
- } else {
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- /* Do the crop, but respect the current bounds */
- vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
- *crop_rect = sel->r;
- break;
- default:
- return -EINVAL;
- }
+ crop_rect = vimc_sca_pad_crop(vsca, sd_state, sel->which);
+ sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sel->which);
+ vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
+ *crop_rect = sel->r;
return 0;
}
@@ -344,16 +314,12 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
/* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
+ vpix = vimc_pix_map_by_code(vsca->fmt[VIMC_SCA_SINK].code);
vsca->bpp = vpix->bpp;
- /* Calculate the width in bytes of the src frame */
- vsca->src_line_size = vsca->crop_rect.width *
- sca_mult * vsca->bpp;
-
/* Calculate the frame size of the source pad */
- frame_size = vsca->src_line_size * vsca->crop_rect.height *
- sca_mult;
+ frame_size = vsca->fmt[VIMC_SCA_SRC].width
+ * vsca->fmt[VIMC_SCA_SRC].height * vsca->bpp;
/* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
@@ -382,77 +348,32 @@ static const struct v4l2_subdev_ops vimc_sca_ops = {
.video = &vimc_sca_video_ops,
};
-static void vimc_sca_fill_pix(u8 *const ptr,
- const u8 *const pixel,
- const unsigned int bpp)
-{
- unsigned int i;
-
- /* copy the pixel to the pointer */
- for (i = 0; i < bpp; i++)
- ptr[i] = pixel[i];
-}
-
-static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
- unsigned int lin, unsigned int col,
- const u8 *const sink_frame)
-{
- const struct v4l2_rect crop_rect = vsca->crop_rect;
- unsigned int i, j, index;
- const u8 *pixel;
-
- /* Point to the pixel value in position (lin, col) in the sink frame */
- index = VIMC_FRAME_INDEX(lin, col,
- vsca->sink_fmt.width,
- vsca->bpp);
- pixel = &sink_frame[index];
-
- dev_dbg(vsca->ved.dev,
- "sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
- vsca->sd.name, lin, col, index);
-
- /* point to the place we are going to put the first pixel
- * in the scaled src frame
- */
- lin -= crop_rect.top;
- col -= crop_rect.left;
- index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
- crop_rect.width * sca_mult, vsca->bpp);
-
- dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
- vsca->sd.name, lin * sca_mult, col * sca_mult, index);
-
- /* Repeat this pixel mult times */
- for (i = 0; i < sca_mult; i++) {
- /* Iterate through each beginning of a
- * pixel repetition in a line
- */
- for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->ved.dev,
- "sca: %s: sca: scale_pix src pos %d\n",
- vsca->sd.name, index + j);
-
- /* copy the pixel to the position index + j */
- vimc_sca_fill_pix(&vsca->src_frame[index + j],
- pixel, vsca->bpp);
- }
-
- /* move the index to the next line */
- index += vsca->src_line_size;
- }
-}
-
static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
const u8 *const sink_frame)
{
- const struct v4l2_rect r = vsca->crop_rect;
- unsigned int i, j;
-
- /* Scale each pixel from the original sink frame */
- /* TODO: implement scale down, only scale up is supported for now */
- for (i = r.top; i < r.top + r.height; i++)
- for (j = r.left; j < r.left + r.width; j++)
- vimc_sca_scale_pix(vsca, i, j, sink_frame);
+ const struct v4l2_mbus_framefmt *src_fmt = &vsca->fmt[VIMC_SCA_SRC];
+ const struct v4l2_rect *r = &vsca->crop_rect;
+ unsigned int snk_width = vsca->fmt[VIMC_SCA_SINK].width;
+ unsigned int src_x, src_y;
+ u8 *walker = vsca->src_frame;
+
+ /* Set each pixel at the src_frame to its sink_frame equivalent */
+ for (src_y = 0; src_y < src_fmt->height; src_y++) {
+ unsigned int snk_y, y_offset;
+
+ snk_y = (src_y * r->height) / src_fmt->height + r->top;
+ y_offset = snk_y * snk_width * vsca->bpp;
+
+ for (src_x = 0; src_x < src_fmt->width; src_x++) {
+ unsigned int snk_x, x_offset, index;
+
+ snk_x = (src_x * r->width) / src_fmt->width + r->left;
+ x_offset = snk_x * vsca->bpp;
+ index = y_offset + x_offset;
+ memcpy(walker, &sink_frame[index], vsca->bpp);
+ walker += vsca->bpp;
+ }
+ }
}
static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
@@ -492,8 +413,8 @@ static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
return ERR_PTR(-ENOMEM);
/* Initialize ved and sd */
- vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
- vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ vsca->pads[VIMC_SCA_SINK].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[VIMC_SCA_SRC].flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
vcfg_name,
@@ -508,7 +429,8 @@ static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
vsca->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
- vsca->sink_fmt = sink_fmt_default;
+ vsca->fmt[VIMC_SCA_SINK] = fmt_default;
+ vsca->fmt[VIMC_SCA_SRC] = fmt_default;
/* Initialize the crop selection */
vsca->crop_rect = crop_rect_default;
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 55ea039fe5b2..1f7469ff04d5 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -5,40 +5,23 @@
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/delay.h>
#include <media/cec.h>
#include "vivid-core.h"
#include "vivid-cec.h"
-#define CEC_TIM_START_BIT_TOTAL 4500
-#define CEC_TIM_START_BIT_LOW 3700
-#define CEC_TIM_START_BIT_HIGH 800
-#define CEC_TIM_DATA_BIT_TOTAL 2400
-#define CEC_TIM_DATA_BIT_0_LOW 1500
-#define CEC_TIM_DATA_BIT_0_HIGH 900
-#define CEC_TIM_DATA_BIT_1_LOW 600
-#define CEC_TIM_DATA_BIT_1_HIGH 1800
+#define CEC_START_BIT_US 4500
+#define CEC_DATA_BIT_US 2400
+#define CEC_MARGIN_US 350
-void vivid_cec_bus_free_work(struct vivid_dev *dev)
-{
- spin_lock(&dev->cec_slock);
- while (!list_empty(&dev->cec_work_list)) {
- struct vivid_cec_work *cw =
- list_first_entry(&dev->cec_work_list,
- struct vivid_cec_work, list);
-
- spin_unlock(&dev->cec_slock);
- cancel_delayed_work_sync(&cw->work);
- spin_lock(&dev->cec_slock);
- list_del(&cw->list);
- cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE);
- kfree(cw);
- }
- spin_unlock(&dev->cec_slock);
-}
+struct xfer_on_bus {
+ struct cec_adapter *adap;
+ u8 status;
+};
-static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
- struct cec_adapter *adap, u8 dest)
+static bool find_dest_adap(struct vivid_dev *dev,
+ struct cec_adapter *adap, u8 dest)
{
unsigned int i;
@@ -61,116 +44,187 @@ static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
return false;
}
-static void vivid_cec_pin_adap_events(struct cec_adapter *adap, ktime_t ts,
- const struct cec_msg *msg, bool nacked)
+static bool xfer_ready(struct vivid_dev *dev)
{
- unsigned int len = nacked ? 1 : msg->len;
unsigned int i;
- bool bit;
-
- if (adap == NULL)
- return;
+ bool ready = false;
- /*
- * Suffix ULL on constant 10 makes the expression
- * CEC_TIM_START_BIT_TOTAL + 10ULL * len * CEC_TIM_DATA_BIT_TOTAL
- * to be evaluated using 64-bit unsigned arithmetic (u64), which
- * is what ktime_sub_us expects as second argument.
- */
- ts = ktime_sub_us(ts, CEC_TIM_START_BIT_TOTAL +
- 10ULL * len * CEC_TIM_DATA_BIT_TOTAL);
- cec_queue_pin_cec_event(adap, false, false, ts);
- ts = ktime_add_us(ts, CEC_TIM_START_BIT_LOW);
- cec_queue_pin_cec_event(adap, true, false, ts);
- ts = ktime_add_us(ts, CEC_TIM_START_BIT_HIGH);
-
- for (i = 0; i < 10 * len; i++) {
- switch (i % 10) {
- case 0 ... 7:
- bit = msg->msg[i / 10] & (0x80 >> (i % 10));
- break;
- case 8: /* EOM */
- bit = i / 10 == msg->len - 1;
- break;
- case 9: /* ACK */
- bit = cec_msg_is_broadcast(msg) ^ nacked;
+ spin_lock(&dev->cec_xfers_slock);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (dev->xfers[i].sft &&
+ dev->xfers[i].sft <= dev->cec_sft) {
+ ready = true;
break;
}
- cec_queue_pin_cec_event(adap, false, false, ts);
- if (bit)
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_LOW);
- else
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_LOW);
- cec_queue_pin_cec_event(adap, true, false, ts);
- if (bit)
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_HIGH);
- else
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_HIGH);
}
-}
+ spin_unlock(&dev->cec_xfers_slock);
-static void vivid_cec_pin_events(struct vivid_dev *dev,
- const struct cec_msg *msg, bool nacked)
-{
- ktime_t ts = ktime_get();
- unsigned int i;
-
- vivid_cec_pin_adap_events(dev->cec_rx_adap, ts, msg, nacked);
- for (i = 0; i < MAX_OUTPUTS; i++)
- vivid_cec_pin_adap_events(dev->cec_tx_adap[i], ts, msg, nacked);
+ return ready;
}
-static void vivid_cec_xfer_done_worker(struct work_struct *work)
+/*
+ * If an adapter tries to send successive messages, it must wait for the
+ * longest signal-free time between its transmissions. But, if another
+ * adapter sends a message in the interim, then the wait can be reduced
+ * because the messages are no longer successive. Make these adjustments
+ * if necessary. Should be called holding cec_xfers_slock.
+ */
+static void adjust_sfts(struct vivid_dev *dev)
{
- struct vivid_cec_work *cw =
- container_of(work, struct vivid_cec_work, work.work);
- struct vivid_dev *dev = cw->dev;
- struct cec_adapter *adap = cw->adap;
- u8 dest = cec_msg_destination(&cw->msg);
- bool valid_dest;
unsigned int i;
+ u8 initiator;
- valid_dest = cec_msg_is_broadcast(&cw->msg);
- if (!valid_dest)
- valid_dest = vivid_cec_find_dest_adap(dev, adap, dest);
-
- cw->tx_status = valid_dest ? CEC_TX_STATUS_OK : CEC_TX_STATUS_NACK;
- spin_lock(&dev->cec_slock);
- dev->cec_xfer_time_jiffies = 0;
- dev->cec_xfer_start_jiffies = 0;
- list_del(&cw->list);
- spin_unlock(&dev->cec_slock);
- vivid_cec_pin_events(dev, &cw->msg, !valid_dest);
- cec_transmit_attempt_done(cw->adap, cw->tx_status);
-
- /* Broadcast message */
- if (adap != dev->cec_rx_adap)
- cec_received_msg(dev->cec_rx_adap, &cw->msg);
- for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
- if (adap != dev->cec_tx_adap[i])
- cec_received_msg(dev->cec_tx_adap[i], &cw->msg);
- kfree(cw);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (dev->xfers[i].sft <= CEC_SIGNAL_FREE_TIME_RETRY)
+ continue;
+ initiator = dev->xfers[i].msg[0] >> 4;
+ if (initiator == dev->last_initiator)
+ dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ else
+ dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+ }
}
-static void vivid_cec_xfer_try_worker(struct work_struct *work)
+/*
+ * The main emulation of the bus on which CEC adapters attempt to send
+ * messages to each other. The bus keeps track of how long it has been
+ * signal-free and accepts a pending transmission only if the state of
+ * the bus matches the transmission's signal-free requirements. It calls
+ * cec_transmit_attempt_done() for all transmits that enter the bus and
+ * cec_received_msg() for successful transmits.
+ */
+int vivid_cec_bus_thread(void *_dev)
{
- struct vivid_cec_work *cw =
- container_of(work, struct vivid_cec_work, work.work);
- struct vivid_dev *dev = cw->dev;
-
- spin_lock(&dev->cec_slock);
- if (dev->cec_xfer_time_jiffies) {
- list_del(&cw->list);
- spin_unlock(&dev->cec_slock);
- cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_ARB_LOST);
- kfree(cw);
- } else {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
- dev->cec_xfer_start_jiffies = jiffies;
- dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
- spin_unlock(&dev->cec_slock);
- schedule_delayed_work(&cw->work, dev->cec_xfer_time_jiffies);
+ u32 last_sft;
+ unsigned int i;
+ unsigned int dest;
+ ktime_t start, end;
+ s64 delta_us, retry_us;
+ struct vivid_dev *dev = _dev;
+
+ dev->cec_sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ for (;;) {
+ bool first = true;
+ int wait_xfer_us = 0;
+ bool valid_dest = false;
+ int wait_arb_lost_us = 0;
+ unsigned int first_idx = 0;
+ unsigned int first_status = 0;
+ struct cec_msg first_msg = {};
+ struct xfer_on_bus xfers_on_bus[MAX_OUTPUTS] = {};
+
+ wait_event_interruptible(dev->kthread_waitq_cec, xfer_ready(dev) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+ last_sft = dev->cec_sft;
+ dev->cec_sft = 0;
+ /*
+ * Move the messages that are ready onto the bus. The adapter with
+ * the most leading zeros will win control of the bus and any other
+ * adapters will lose arbitration.
+ */
+ spin_lock(&dev->cec_xfers_slock);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (!dev->xfers[i].sft || dev->xfers[i].sft > last_sft)
+ continue;
+ if (first) {
+ first = false;
+ first_idx = i;
+ xfers_on_bus[first_idx].adap = dev->xfers[i].adap;
+ memcpy(first_msg.msg, dev->xfers[i].msg, dev->xfers[i].len);
+ first_msg.len = dev->xfers[i].len;
+ } else {
+ xfers_on_bus[i].adap = dev->xfers[i].adap;
+ xfers_on_bus[i].status = CEC_TX_STATUS_ARB_LOST;
+ /*
+ * For simplicity wait for all 4 bits of the initiator's
+ * address even though HDMI specification uses bit-level
+ * precision.
+ */
+ wait_arb_lost_us = 4 * CEC_DATA_BIT_US + CEC_START_BIT_US;
+ }
+ dev->xfers[i].sft = 0;
+ }
+ dev->last_initiator = cec_msg_initiator(&first_msg);
+ adjust_sfts(dev);
+ spin_unlock(&dev->cec_xfers_slock);
+
+ dest = cec_msg_destination(&first_msg);
+ valid_dest = cec_msg_is_broadcast(&first_msg);
+ if (!valid_dest)
+ valid_dest = find_dest_adap(dev, xfers_on_bus[first_idx].adap, dest);
+ if (valid_dest) {
+ first_status = CEC_TX_STATUS_OK;
+ /*
+ * Message length is in bytes, but each byte is transmitted in
+ * a block of 10 bits.
+ */
+ wait_xfer_us = first_msg.len * 10 * CEC_DATA_BIT_US;
+ } else {
+ first_status = CEC_TX_STATUS_NACK;
+ /*
+ * A message that is not acknowledged stops transmitting after
+ * the header block of 10 bits.
+ */
+ wait_xfer_us = 10 * CEC_DATA_BIT_US;
+ }
+ wait_xfer_us += CEC_START_BIT_US;
+ xfers_on_bus[first_idx].status = first_status;
+
+ /* Sleep as if sending messages on a real hardware bus. */
+ start = ktime_get();
+ if (wait_arb_lost_us) {
+ usleep_range(wait_arb_lost_us - CEC_MARGIN_US, wait_arb_lost_us);
+ for (i = 0; i < ARRAY_SIZE(xfers_on_bus); i++) {
+ if (xfers_on_bus[i].status != CEC_TX_STATUS_ARB_LOST)
+ continue;
+ cec_transmit_attempt_done(xfers_on_bus[i].adap,
+ CEC_TX_STATUS_ARB_LOST);
+ }
+ if (kthread_should_stop())
+ break;
+ }
+ wait_xfer_us -= wait_arb_lost_us;
+ usleep_range(wait_xfer_us - CEC_MARGIN_US, wait_xfer_us);
+ cec_transmit_attempt_done(xfers_on_bus[first_idx].adap, first_status);
+ if (kthread_should_stop())
+ break;
+ if (first_status == CEC_TX_STATUS_OK) {
+ if (xfers_on_bus[first_idx].adap != dev->cec_rx_adap)
+ cec_received_msg(dev->cec_rx_adap, &first_msg);
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ if (xfers_on_bus[first_idx].adap != dev->cec_tx_adap[i])
+ cec_received_msg(dev->cec_tx_adap[i], &first_msg);
+ }
+ end = ktime_get();
+ /*
+ * If the emulated transfer took more or less time than it should
+ * have, then compensate by adjusting the wait time needed for the
+ * bus to be signal-free for 3 bit periods (the retry time).
+ */
+ delta_us = div_s64(end - start, 1000);
+ delta_us -= wait_xfer_us + wait_arb_lost_us;
+ retry_us = CEC_SIGNAL_FREE_TIME_RETRY * CEC_DATA_BIT_US - delta_us;
+ if (retry_us > CEC_MARGIN_US)
+ usleep_range(retry_us - CEC_MARGIN_US, retry_us);
+ dev->cec_sft = CEC_SIGNAL_FREE_TIME_RETRY;
+ /*
+ * If there are no messages that need to be retried, check if any
+ * adapters that did not just transmit a message are ready to
+ * transmit. If none of these adapters are ready, then increase
+ * the signal-free time so that the bus is available to all
+ * adapters and go back to waiting for a transmission.
+ */
+ while (dev->cec_sft >= CEC_SIGNAL_FREE_TIME_RETRY &&
+ dev->cec_sft < CEC_SIGNAL_FREE_TIME_NEXT_XFER &&
+ !xfer_ready(dev) && !kthread_should_stop()) {
+ usleep_range(2 * CEC_DATA_BIT_US - CEC_MARGIN_US,
+ 2 * CEC_DATA_BIT_US);
+ dev->cec_sft += 2;
+ }
}
+ return 0;
}
static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -184,41 +238,26 @@ static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
return 0;
}
-/*
- * One data bit takes 2400 us, each byte needs 10 bits so that's 24000 us
- * per byte.
- */
-#define USECS_PER_BYTE 24000
-
static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct vivid_dev *dev = cec_get_drvdata(adap);
- struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
- long delta_jiffies = 0;
-
- if (cw == NULL)
- return -ENOMEM;
- cw->dev = dev;
- cw->adap = adap;
- cw->usecs = CEC_FREE_TIME_TO_USEC(signal_free_time) +
- msg->len * USECS_PER_BYTE;
- cw->msg = *msg;
-
- spin_lock(&dev->cec_slock);
- list_add(&cw->list, &dev->cec_work_list);
- if (dev->cec_xfer_time_jiffies == 0) {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
- dev->cec_xfer_start_jiffies = jiffies;
- dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
- delta_jiffies = dev->cec_xfer_time_jiffies;
- } else {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker);
- delta_jiffies = dev->cec_xfer_start_jiffies +
- dev->cec_xfer_time_jiffies - jiffies;
+ u8 idx = cec_msg_initiator(msg);
+
+ spin_lock(&dev->cec_xfers_slock);
+ dev->xfers[idx].adap = adap;
+ memcpy(dev->xfers[idx].msg, msg->msg, CEC_MAX_MSG_SIZE);
+ dev->xfers[idx].len = msg->len;
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_RETRY;
+ if (signal_free_time > CEC_SIGNAL_FREE_TIME_RETRY) {
+ if (idx == dev->last_initiator)
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ else
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
}
- spin_unlock(&dev->cec_slock);
- schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies);
+ spin_unlock(&dev->cec_xfers_slock);
+ wake_up_interruptible(&dev->kthread_waitq_cec);
+
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.h b/drivers/media/test-drivers/vivid/vivid-cec.h
index 7524ed48a914..b2bcddb50b83 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.h
+++ b/drivers/media/test-drivers/vivid/vivid-cec.h
@@ -9,12 +9,5 @@
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source);
-void vivid_cec_bus_free_work(struct vivid_dev *dev);
-
-#else
-
-static inline void vivid_cec_bus_free_work(struct vivid_dev *dev)
-{
-}
-
+int vivid_cec_bus_thread(void *_dev);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index d2bd2653cf54..04b75666bad4 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -177,6 +177,15 @@ MODULE_PARM_DESC(cache_hints, " user-space cache hints, default is 0.\n"
"\t\t 0 == forbid\n"
"\t\t 1 == allow");
+static unsigned int supports_requests[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 1
+};
+module_param_array(supports_requests, uint, NULL, 0444);
+MODULE_PARM_DESC(supports_requests, " support for requests, default is 1.\n"
+ "\t\t 0 == no support\n"
+ "\t\t 1 == supports requests\n"
+ "\t\t 2 == requires requests");
+
static struct vivid_dev *vivid_devs[VIVID_MAX_DEVS];
const struct v4l2_rect vivid_min_rect = {
@@ -883,10 +892,11 @@ static int vivid_create_queue(struct vivid_dev *dev,
q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
&vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = min_buffers_needed;
+ q->min_buffers_needed = supports_requests[dev->inst] ? 0 : min_buffers_needed;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
+ q->supports_requests = supports_requests[dev->inst];
+ q->requires_requests = supports_requests[dev->inst] >= 2;
q->allow_cache_hints = (cache_hints[dev->inst] == 1);
return vb2_queue_init(q);
@@ -1878,18 +1888,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->touch_cap_active);
- INIT_LIST_HEAD(&dev->cec_work_list);
- spin_lock_init(&dev->cec_slock);
- /*
- * Same as create_singlethread_workqueue, but now I can use the
- * string formatting of alloc_ordered_workqueue.
- */
- dev->cec_workqueue = alloc_ordered_workqueue("vivid-%03d-cec",
- WQ_MEM_RECLAIM, inst);
- if (!dev->cec_workqueue) {
- ret = -ENOMEM;
- goto unreg_dev;
- }
+ spin_lock_init(&dev->cec_xfers_slock);
if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -1929,6 +1928,19 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
cec_tx_bus_cnt++;
}
}
+
+ if (dev->cec_rx_adap || cec_tx_bus_cnt) {
+ init_waitqueue_head(&dev->kthread_waitq_cec);
+ dev->kthread_cec = kthread_run(vivid_cec_bus_thread, dev,
+ "vivid_cec-%s", dev->v4l2_dev.name);
+ if (IS_ERR(dev->kthread_cec)) {
+ ret = PTR_ERR(dev->kthread_cec);
+ dev->kthread_cec = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ goto unreg_dev;
+ }
+ }
+
#endif
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
@@ -1968,10 +1980,8 @@ unreg_dev:
cec_unregister_adapter(dev->cec_rx_adap);
for (i = 0; i < MAX_OUTPUTS; i++)
cec_unregister_adapter(dev->cec_tx_adap[i]);
- if (dev->cec_workqueue) {
- vivid_cec_bus_free_work(dev);
- destroy_workqueue(dev->cec_workqueue);
- }
+ if (dev->kthread_cec)
+ kthread_stop(dev->kthread_cec);
free_dev:
v4l2_device_put(&dev->v4l2_dev);
return ret;
@@ -2093,10 +2103,8 @@ static int vivid_remove(struct platform_device *pdev)
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
- if (dev->cec_workqueue) {
- vivid_cec_bus_free_work(dev);
- destroy_workqueue(dev->cec_workqueue);
- }
+ if (dev->kthread_cec)
+ kthread_stop(dev->kthread_cec);
v4l2_device_put(&dev->v4l2_dev);
vivid_devs[i] = NULL;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 1e3c4f5a9413..45f96706edde 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -110,15 +110,11 @@ enum vivid_colorspace {
#define VIVID_INVALID_SIGNAL(mode) \
((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
-struct vivid_cec_work {
- struct list_head list;
- struct delayed_work work;
+struct vivid_cec_xfer {
struct cec_adapter *adap;
- struct vivid_dev *dev;
- unsigned int usecs;
- unsigned int timeout_ms;
- u8 tx_status;
- struct cec_msg msg;
+ u8 msg[CEC_MAX_MSG_SIZE];
+ u32 len;
+ u32 sft;
};
struct vivid_dev {
@@ -560,12 +556,13 @@ struct vivid_dev {
/* CEC */
struct cec_adapter *cec_rx_adap;
struct cec_adapter *cec_tx_adap[MAX_OUTPUTS];
- struct workqueue_struct *cec_workqueue;
- spinlock_t cec_slock;
- struct list_head cec_work_list;
- unsigned int cec_xfer_time_jiffies;
- unsigned long cec_xfer_start_jiffies;
u8 cec_output2bus_map[MAX_OUTPUTS];
+ struct task_struct *kthread_cec;
+ wait_queue_head_t kthread_waitq_cec;
+ struct vivid_cec_xfer xfers[MAX_OUTPUTS];
+ spinlock_t cec_xfers_slock; /* read and write cec messages */
+ u32 cec_sft; /* bus signal free time, in bit periods */
+ u8 last_initiator;
/* CEC OSD String */
char osd[14];
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 26a277975cb1..03c46a62bf26 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -172,7 +172,6 @@ static void set_reg_bits(struct reg_pair_t *reg_pair, u8 reg, u8 mask, u8 val)
i++;
}
- return;
}
static void copy_reg_bits(struct reg_pair_t *reg_pair1,
@@ -193,7 +192,6 @@ static void copy_reg_bits(struct reg_pair_t *reg_pair1,
}
i++;
}
- return;
}
/* ------------------------------------------------------------------------- */
@@ -221,7 +219,6 @@ static void mxl5007t_set_mode_bits(struct mxl5007t_state *state,
default:
mxl_fail(-EINVAL);
}
- return;
}
static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
@@ -274,8 +271,6 @@ static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
set_reg_bits(state->tab_init, 0x02, 0x10, invert_if ? 0x10 : 0x00);
state->if_freq = if_freq;
-
- return;
}
static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
@@ -343,8 +338,6 @@ static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
mxl_fail(-EINVAL);
return;
}
-
- return;
}
static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state,
@@ -398,8 +391,6 @@ static void mxl5007t_set_bw_bits(struct mxl5007t_state *state,
return;
}
set_reg_bits(state->tab_rftune, 0x0c, 0x3f, val);
-
- return;
}
static struct
diff --git a/drivers/media/tuners/tuner-types.c b/drivers/media/tuners/tuner-types.c
index 01f61ebabd56..0ed2c5bc082e 100644
--- a/drivers/media/tuners/tuner-types.c
+++ b/drivers/media/tuners/tuner-types.c
@@ -1942,6 +1942,10 @@ struct tunertype tuners[] = {
.params = tuner_sony_btf_pg463z_params,
.count = ARRAY_SIZE(tuner_sony_btf_pg463z_params),
},
+ [TUNER_SI2157] = {
+ .name = "Silicon Labs Si2157 tuner",
+ /* see si2157.c for details */
+ },
};
EXPORT_SYMBOL(tuners);
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 7a81be7970b2..d568452618d1 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -415,8 +415,11 @@ static int airspy_alloc_urbs(struct airspy *s)
dev_dbg(s->dev, "alloc urb=%d\n", i);
s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!s->urb_list[i]) {
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
usb_free_urb(s->urb_list[j]);
+ s->urb_list[j] = NULL;
+ }
+ s->urbs_initialized = 0;
return -ENOMEM;
}
usb_fill_bulk_urb(s->urb_list[i],
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 7865fa0a8295..cd5861a30b6f 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -931,8 +931,6 @@ static int mxl111sf_init(struct dvb_usb_device *d)
.len = sizeof(eeprom), .buf = eeprom },
};
- mutex_init(&state->msg_lock);
-
ret = get_chip_info(state);
if (mxl_fail(ret))
pr_err("failed to get chip info during probe");
@@ -1074,6 +1072,14 @@ static int mxl111sf_get_stream_config_dvbt(struct dvb_frontend *fe,
return 0;
}
+static int mxl111sf_probe(struct dvb_usb_device *dev)
+{
+ struct mxl111sf_state *state = d_to_priv(dev);
+
+ mutex_init(&state->msg_lock);
+ return 0;
+}
+
static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
@@ -1083,6 +1089,7 @@ static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_dvbt,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1124,6 +1131,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_atsc,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1165,6 +1173,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mh,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1233,6 +1242,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_atsc_mh,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1311,6 +1321,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mercury,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1381,6 +1392,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mercury_mh,
.tuner_attach = mxl111sf_attach_tuner,
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 1c39b61cde29..86788771175b 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -391,6 +391,7 @@ static struct rc_map_table rc_map_az6027_table[] = {
/* remote control stuff (does not work with my box) */
static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
+ *state = REMOTE_NO_KEY_PRESSED;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 02b51d1a1b67..aff60c10cb0b 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,7 +223,7 @@ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
u8 *buf;
int rc;
- buf = kmalloc(2, GFP_KERNEL);
+ buf = kzalloc(2, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index c1e0dccb7408..b207f34af5c6 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2508,12 +2508,17 @@ const struct em28xx_board em28xx_boards[] = {
.def_i2c_bus = 1,
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = TUNER_SI2157,
.tuner_gpio = hauppauge_dualhd_dvb,
.has_dvb = 1,
.has_dual_ts = 1,
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
},
/*
* 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc.
@@ -4139,8 +4144,11 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
em28xx_close_extension(dev);
- if (dev->dev_next)
+ if (dev->dev_next) {
+ em28xx_close_extension(dev->dev_next);
em28xx_release_resources(dev->dev_next);
+ }
+
em28xx_release_resources(dev);
if (dev->dev_next) {
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 584fa400cd7d..acc0bf7dbe2b 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -1154,8 +1154,9 @@ int em28xx_suspend_extension(struct em28xx *dev)
dev_info(&dev->intf->dev, "Suspending extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
- if (ops->suspend)
- ops->suspend(dev);
+ if (!ops->suspend)
+ continue;
+ ops->suspend(dev);
if (dev->dev_next)
ops->suspend(dev->dev_next);
}
diff --git a/drivers/media/usb/gspca/gl860/gl860-mi1320.c b/drivers/media/usb/gspca/gl860/gl860-mi1320.c
index 0749fe13160f..d6a540ed378c 100644
--- a/drivers/media/usb/gspca/gl860/gl860-mi1320.c
+++ b/drivers/media/usb/gspca/gl860/gl860-mi1320.c
@@ -50,42 +50,69 @@ static struct validx tbl_post_unset_alt[] = {
};
static u8 *tbl_1280[] = {
- "\x0d\x80\xf1\x08\x03\x04\xf1\x00" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
- "\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
- "\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
- "\xa9\x04\xf1\x00\xa1\x05\xf1\x00" "\xa4\x04\xf1\x00\xae\x0a\xf1\x08"
- ,
- "\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
- "\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x28\xd5\x01\xd0\x02" "\xd1\x18\xd2\xc1"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x04, 0xf1, 0x00,
+ 0x04, 0x05, 0xf1, 0x02, 0x05, 0x00, 0xf1, 0xf1,
+ 0x06, 0x00, 0xf1, 0x0d, 0x20, 0x01, 0xf1, 0x00,
+ 0x21, 0x84, 0xf1, 0x00, 0x0d, 0x00, 0xf1, 0x08,
+ 0xf0, 0x00, 0xf1, 0x01, 0x34, 0x00, 0xf1, 0x00,
+ 0x9b, 0x43, 0xf1, 0x00, 0xa6, 0x05, 0xf1, 0x00,
+ 0xa9, 0x04, 0xf1, 0x00, 0xa1, 0x05, 0xf1, 0x00,
+ 0xa4, 0x04, 0xf1, 0x00, 0xae, 0x0a, 0xf1, 0x08
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x3a, 0x05, 0xf1, 0xf1,
+ 0x3c, 0x05, 0xf1, 0xf1, 0x59, 0x01, 0xf1, 0x47,
+ 0x5a, 0x01, 0xf1, 0x88, 0x5c, 0x0a, 0xf1, 0x06,
+ 0x5d, 0x0e, 0xf1, 0x0a, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0xcf, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x28, 0xd5, 0x01, 0xd0, 0x02,
+ 0xd1, 0x18, 0xd2, 0xc1
+ }
};
static u8 *tbl_800[] = {
- "\x0d\x80\xf1\x08\x03\x03\xf1\xc0" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
- "\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
- "\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
- "\xa9\x03\xf1\xc0\xa1\x03\xf1\x20" "\xa4\x02\xf1\x5a\xae\x0a\xf1\x08"
- ,
- "\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
- "\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x18\xd5\x21\xd0\x02" "\xd1\x10\xd2\x59"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x03, 0xf1, 0xc0,
+ 0x04, 0x05, 0xf1, 0x02, 0x05, 0x00, 0xf1, 0xf1,
+ 0x06, 0x00, 0xf1, 0x0d, 0x20, 0x01, 0xf1, 0x00,
+ 0x21, 0x84, 0xf1, 0x00, 0x0d, 0x00, 0xf1, 0x08,
+ 0xf0, 0x00, 0xf1, 0x01, 0x34, 0x00, 0xf1, 0x00,
+ 0x9b, 0x43, 0xf1, 0x00, 0xa6, 0x05, 0xf1, 0x00,
+ 0xa9, 0x03, 0xf1, 0xc0, 0xa1, 0x03, 0xf1, 0x20,
+ 0xa4, 0x02, 0xf1, 0x5a, 0xae, 0x0a, 0xf1, 0x08
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x3a, 0x05, 0xf1, 0xf1,
+ 0x3c, 0x05, 0xf1, 0xf1, 0x59, 0x01, 0xf1, 0x47,
+ 0x5a, 0x01, 0xf1, 0x88, 0x5c, 0x0a, 0xf1, 0x06,
+ 0x5d, 0x0e, 0xf1, 0x0a, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0xcf, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x18, 0xd5, 0x21, 0xd0, 0x02,
+ 0xd1, 0x10, 0xd2, 0x59
+ }
};
static u8 *tbl_640[] = {
- "\x0d\x80\xf1\x08\x03\x04\xf1\x04" "\x04\x05\xf1\x02\x07\x01\xf1\x7c"
- "\x08\x00\xf1\x0e\x21\x80\xf1\x00" "\x0d\x00\xf1\x08\xf0\x00\xf1\x01"
- "\x34\x10\xf1\x10\x3a\x43\xf1\x00" "\xa6\x05\xf1\x02\xa9\x04\xf1\x04"
- "\xa7\x02\xf1\x81\xaa\x01\xf1\xe2" "\xae\x0c\xf1\x09"
- ,
- "\xf0\x00\xf1\x02\x39\x03\xf1\xfc" "\x3b\x04\xf1\x04\x57\x01\xf1\xb6"
- "\x58\x02\xf1\x0d\x5c\x1f\xf1\x19" "\x5d\x24\xf1\x1e\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\x00\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x10\xd5\x81\xd0\x02" "\xd1\x08\xd2\xe1"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x04, 0xf1, 0x04,
+ 0x04, 0x05, 0xf1, 0x02, 0x07, 0x01, 0xf1, 0x7c,
+ 0x08, 0x00, 0xf1, 0x0e, 0x21, 0x80, 0xf1, 0x00,
+ 0x0d, 0x00, 0xf1, 0x08, 0xf0, 0x00, 0xf1, 0x01,
+ 0x34, 0x10, 0xf1, 0x10, 0x3a, 0x43, 0xf1, 0x00,
+ 0xa6, 0x05, 0xf1, 0x02, 0xa9, 0x04, 0xf1, 0x04,
+ 0xa7, 0x02, 0xf1, 0x81, 0xaa, 0x01, 0xf1, 0xe2,
+ 0xae, 0x0c, 0xf1, 0x09
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x39, 0x03, 0xf1, 0xfc,
+ 0x3b, 0x04, 0xf1, 0x04, 0x57, 0x01, 0xf1, 0xb6,
+ 0x58, 0x02, 0xf1, 0x0d, 0x5c, 0x1f, 0xf1, 0x19,
+ 0x5d, 0x24, 0xf1, 0x1e, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0x00, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x10, 0xd5, 0x81, 0xd0, 0x02,
+ 0xd1, 0x08, 0xd2, 0xe1
+ }
};
static s32 tbl_sat[] = {0x25, 0x1d, 0x15, 0x0d, 0x05, 0x4d, 0x55, 0x5d, 0x2d};
diff --git a/drivers/media/usb/gspca/gl860/gl860-ov9655.c b/drivers/media/usb/gspca/gl860/gl860-ov9655.c
index 59b87d066187..766677ebcb34 100644
--- a/drivers/media/usb/gspca/gl860/gl860-ov9655.c
+++ b/drivers/media/usb/gspca/gl860/gl860-ov9655.c
@@ -25,69 +25,118 @@ static struct validx tbl_commmon[] = {
static s32 tbl_length[] = {12, 56, 52, 54, 56, 42, 32, 12};
static u8 *tbl_640[] = {
- "\x00\x40\x07\x6a\x06\xf3\x0d\x6a" "\x10\x10\xc1\x01"
- ,
- "\x12\x80\x00\x00\x01\x98\x02\x80" "\x03\x12\x04\x03\x0b\x57\x0e\x61"
- "\x0f\x42\x11\x01\x12\x60\x13\x00" "\x14\x3a\x16\x24\x17\x14\x18\x00"
- "\x19\x01\x1a\x3d\x1e\x04\x24\x3c" "\x25\x36\x26\x72\x27\x08\x28\x08"
- "\x29\x15\x2a\x00\x2b\x00\x2c\x08"
- ,
- "\x32\xff\x33\x00\x34\x3d\x35\x00" "\x36\xfa\x38\x72\x39\x57\x3a\x00"
- "\x3b\x0c\x3d\x99\x3e\x0c\x3f\xc1" "\x40\xc0\x41\x00\x42\xc0\x43\x0a"
- "\x44\xf0\x45\x46\x46\x62\x47\x2a" "\x48\x3c\x4a\xee\x4b\xe7\x4c\xe7"
- "\x4d\xe7\x4e\xe7"
- ,
- "\x4f\x98\x50\x98\x51\x00\x52\x28" "\x53\x70\x54\x98\x58\x1a\x59\x85"
- "\x5a\xa9\x5b\x64\x5c\x84\x5d\x53" "\x5e\x0e\x5f\xf0\x60\xf0\x61\xf0"
- "\x62\x00\x63\x00\x64\x02\x65\x20" "\x66\x00\x69\x0a\x6b\x5a\x6c\x04"
- "\x6d\x55\x6e\x00\x6f\x9d"
- ,
- "\x70\x15\x71\x78\x72\x00\x73\x00" "\x74\x3a\x75\x35\x76\x01\x77\x02"
- "\x7a\x24\x7b\x04\x7c\x07\x7d\x10" "\x7e\x28\x7f\x36\x80\x44\x81\x52"
- "\x82\x60\x83\x6c\x84\x78\x85\x8c" "\x86\x9e\x87\xbb\x88\xd2\x89\xe5"
- "\x8a\x23\x8c\x8d\x90\x7c\x91\x7b"
- ,
- "\x9d\x02\x9e\x02\x9f\x74\xa0\x73" "\xa1\x40\xa4\x50\xa5\x68\xa6\x70"
- "\xa8\xc1\xa9\xef\xaa\x92\xab\x04" "\xac\x80\xad\x80\xae\x80\xaf\x80"
- "\xb2\xf2\xb3\x20\xb4\x20\xb5\x00" "\xb6\xaf"
- ,
- "\xbb\xae\xbc\x4f\xbd\x4e\xbe\x6a" "\xbf\x68\xc0\xaa\xc1\xc0\xc2\x01"
- "\xc3\x4e\xc6\x85\xc7\x81\xc9\xe0" "\xca\xe8\xcb\xf0\xcc\xd8\xcd\x93"
- ,
- "\xd0\x01\xd1\x08\xd2\xe0\xd3\x01" "\xd4\x10\xd5\x80"
+ (u8[]){
+ 0x00, 0x40, 0x07, 0x6a, 0x06, 0xf3, 0x0d, 0x6a,
+ 0x10, 0x10, 0xc1, 0x01
+ }, (u8[]){
+ 0x12, 0x80, 0x00, 0x00, 0x01, 0x98, 0x02, 0x80,
+ 0x03, 0x12, 0x04, 0x03, 0x0b, 0x57, 0x0e, 0x61,
+ 0x0f, 0x42, 0x11, 0x01, 0x12, 0x60, 0x13, 0x00,
+ 0x14, 0x3a, 0x16, 0x24, 0x17, 0x14, 0x18, 0x00,
+ 0x19, 0x01, 0x1a, 0x3d, 0x1e, 0x04, 0x24, 0x3c,
+ 0x25, 0x36, 0x26, 0x72, 0x27, 0x08, 0x28, 0x08,
+ 0x29, 0x15, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x08
+ }, (u8[]){
+ 0x32, 0xff, 0x33, 0x00, 0x34, 0x3d, 0x35, 0x00,
+ 0x36, 0xfa, 0x38, 0x72, 0x39, 0x57, 0x3a, 0x00,
+ 0x3b, 0x0c, 0x3d, 0x99, 0x3e, 0x0c, 0x3f, 0xc1,
+ 0x40, 0xc0, 0x41, 0x00, 0x42, 0xc0, 0x43, 0x0a,
+ 0x44, 0xf0, 0x45, 0x46, 0x46, 0x62, 0x47, 0x2a,
+ 0x48, 0x3c, 0x4a, 0xee, 0x4b, 0xe7, 0x4c, 0xe7,
+ 0x4d, 0xe7, 0x4e, 0xe7
+ }, (u8[]){
+ 0x4f, 0x98, 0x50, 0x98, 0x51, 0x00, 0x52, 0x28,
+ 0x53, 0x70, 0x54, 0x98, 0x58, 0x1a, 0x59, 0x85,
+ 0x5a, 0xa9, 0x5b, 0x64, 0x5c, 0x84, 0x5d, 0x53,
+ 0x5e, 0x0e, 0x5f, 0xf0, 0x60, 0xf0, 0x61, 0xf0,
+ 0x62, 0x00, 0x63, 0x00, 0x64, 0x02, 0x65, 0x20,
+ 0x66, 0x00, 0x69, 0x0a, 0x6b, 0x5a, 0x6c, 0x04,
+ 0x6d, 0x55, 0x6e, 0x00, 0x6f, 0x9d
+ }, (u8[]){
+ 0x70, 0x15, 0x71, 0x78, 0x72, 0x00, 0x73, 0x00,
+ 0x74, 0x3a, 0x75, 0x35, 0x76, 0x01, 0x77, 0x02,
+ 0x7a, 0x24, 0x7b, 0x04, 0x7c, 0x07, 0x7d, 0x10,
+ 0x7e, 0x28, 0x7f, 0x36, 0x80, 0x44, 0x81, 0x52,
+ 0x82, 0x60, 0x83, 0x6c, 0x84, 0x78, 0x85, 0x8c,
+ 0x86, 0x9e, 0x87, 0xbb, 0x88, 0xd2, 0x89, 0xe5,
+ 0x8a, 0x23, 0x8c, 0x8d, 0x90, 0x7c, 0x91, 0x7b
+ }, (u8[]){
+ 0x9d, 0x02, 0x9e, 0x02, 0x9f, 0x74, 0xa0, 0x73,
+ 0xa1, 0x40, 0xa4, 0x50, 0xa5, 0x68, 0xa6, 0x70,
+ 0xa8, 0xc1, 0xa9, 0xef, 0xaa, 0x92, 0xab, 0x04,
+ 0xac, 0x80, 0xad, 0x80, 0xae, 0x80, 0xaf, 0x80,
+ 0xb2, 0xf2, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x00,
+ 0xb6, 0xaf
+ }, (u8[]){
+ 0xbb, 0xae, 0xbc, 0x4f, 0xbd, 0x4e, 0xbe, 0x6a,
+ 0xbf, 0x68, 0xc0, 0xaa, 0xc1, 0xc0, 0xc2, 0x01,
+ 0xc3, 0x4e, 0xc6, 0x85, 0xc7, 0x81, 0xc9, 0xe0,
+ 0xca, 0xe8, 0xcb, 0xf0, 0xcc, 0xd8, 0xcd, 0x93
+ }, (u8[]){
+ 0xd0, 0x01, 0xd1, 0x08, 0xd2, 0xe0, 0xd3, 0x01,
+ 0xd4, 0x10, 0xd5, 0x80
+ }
};
static u8 *tbl_1280[] = {
- "\x00\x40\x07\x6a\x06\xf3\x0d\x6a" "\x10\x10\xc1\x01"
- ,
- "\x12\x80\x00\x00\x01\x98\x02\x80" "\x03\x12\x04\x01\x0b\x57\x0e\x61"
- "\x0f\x42\x11\x00\x12\x00\x13\x00" "\x14\x3a\x16\x24\x17\x1b\x18\xbb"
- "\x19\x01\x1a\x81\x1e\x04\x24\x3c" "\x25\x36\x26\x72\x27\x08\x28\x08"
- "\x29\x15\x2a\x00\x2b\x00\x2c\x08"
- ,
- "\x32\xa4\x33\x00\x34\x3d\x35\x00" "\x36\xf8\x38\x72\x39\x57\x3a\x00"
- "\x3b\x0c\x3d\x99\x3e\x0c\x3f\xc2" "\x40\xc0\x41\x00\x42\xc0\x43\x0a"
- "\x44\xf0\x45\x46\x46\x62\x47\x2a" "\x48\x3c\x4a\xec\x4b\xe8\x4c\xe8"
- "\x4d\xe8\x4e\xe8"
- ,
- "\x4f\x98\x50\x98\x51\x00\x52\x28" "\x53\x70\x54\x98\x58\x1a\x59\x85"
- "\x5a\xa9\x5b\x64\x5c\x84\x5d\x53" "\x5e\x0e\x5f\xf0\x60\xf0\x61\xf0"
- "\x62\x00\x63\x00\x64\x02\x65\x20" "\x66\x00\x69\x02\x6b\x5a\x6c\x04"
- "\x6d\x55\x6e\x00\x6f\x9d"
- ,
- "\x70\x08\x71\x78\x72\x00\x73\x01" "\x74\x3a\x75\x35\x76\x01\x77\x02"
- "\x7a\x24\x7b\x04\x7c\x07\x7d\x10" "\x7e\x28\x7f\x36\x80\x44\x81\x52"
- "\x82\x60\x83\x6c\x84\x78\x85\x8c" "\x86\x9e\x87\xbb\x88\xd2\x89\xe5"
- "\x8a\x23\x8c\x0d\x90\x90\x91\x90"
- ,
- "\x9d\x02\x9e\x02\x9f\x94\xa0\x94" "\xa1\x01\xa4\x50\xa5\x68\xa6\x70"
- "\xa8\xc1\xa9\xef\xaa\x92\xab\x04" "\xac\x80\xad\x80\xae\x80\xaf\x80"
- "\xb2\xf2\xb3\x20\xb4\x20\xb5\x00" "\xb6\xaf"
- ,
- "\xbb\xae\xbc\x38\xbd\x39\xbe\x01" "\xbf\x01\xc0\xe2\xc1\xc0\xc2\x01"
- "\xc3\x4e\xc6\x85\xc7\x81\xc9\xe0" "\xca\xe8\xcb\xf0\xcc\xd8\xcd\x93"
- ,
- "\xd0\x21\xd1\x18\xd2\xe0\xd3\x01" "\xd4\x28\xd5\x00"
+ (u8[]){
+ 0x00, 0x40, 0x07, 0x6a, 0x06, 0xf3, 0x0d, 0x6a,
+ 0x10, 0x10, 0xc1, 0x01
+ },
+ (u8[]){
+ 0x12, 0x80, 0x00, 0x00, 0x01, 0x98, 0x02, 0x80,
+ 0x03, 0x12, 0x04, 0x01, 0x0b, 0x57, 0x0e, 0x61,
+ 0x0f, 0x42, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00,
+ 0x14, 0x3a, 0x16, 0x24, 0x17, 0x1b, 0x18, 0xbb,
+ 0x19, 0x01, 0x1a, 0x81, 0x1e, 0x04, 0x24, 0x3c,
+ 0x25, 0x36, 0x26, 0x72, 0x27, 0x08, 0x28, 0x08,
+ 0x29, 0x15, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x08
+ },
+ (u8[]){
+ 0x32, 0xa4, 0x33, 0x00, 0x34, 0x3d, 0x35, 0x00,
+ 0x36, 0xf8, 0x38, 0x72, 0x39, 0x57, 0x3a, 0x00,
+ 0x3b, 0x0c, 0x3d, 0x99, 0x3e, 0x0c, 0x3f, 0xc2,
+ 0x40, 0xc0, 0x41, 0x00, 0x42, 0xc0, 0x43, 0x0a,
+ 0x44, 0xf0, 0x45, 0x46, 0x46, 0x62, 0x47, 0x2a,
+ 0x48, 0x3c, 0x4a, 0xec, 0x4b, 0xe8, 0x4c, 0xe8,
+ 0x4d, 0xe8, 0x4e, 0xe8
+ },
+ (u8[]){
+ 0x4f, 0x98, 0x50, 0x98, 0x51, 0x00, 0x52, 0x28,
+ 0x53, 0x70, 0x54, 0x98, 0x58, 0x1a, 0x59, 0x85,
+ 0x5a, 0xa9, 0x5b, 0x64, 0x5c, 0x84, 0x5d, 0x53,
+ 0x5e, 0x0e, 0x5f, 0xf0, 0x60, 0xf0, 0x61, 0xf0,
+ 0x62, 0x00, 0x63, 0x00, 0x64, 0x02, 0x65, 0x20,
+ 0x66, 0x00, 0x69, 0x02, 0x6b, 0x5a, 0x6c, 0x04,
+ 0x6d, 0x55, 0x6e, 0x00, 0x6f, 0x9d
+ },
+ (u8[]){
+ 0x70, 0x08, 0x71, 0x78, 0x72, 0x00, 0x73, 0x01,
+ 0x74, 0x3a, 0x75, 0x35, 0x76, 0x01, 0x77, 0x02,
+ 0x7a, 0x24, 0x7b, 0x04, 0x7c, 0x07, 0x7d, 0x10,
+ 0x7e, 0x28, 0x7f, 0x36, 0x80, 0x44, 0x81, 0x52,
+ 0x82, 0x60, 0x83, 0x6c, 0x84, 0x78, 0x85, 0x8c,
+ 0x86, 0x9e, 0x87, 0xbb, 0x88, 0xd2, 0x89, 0xe5,
+ 0x8a, 0x23, 0x8c, 0x0d, 0x90, 0x90, 0x91, 0x90
+ },
+ (u8[]){
+ 0x9d, 0x02, 0x9e, 0x02, 0x9f, 0x94, 0xa0, 0x94,
+ 0xa1, 0x01, 0xa4, 0x50, 0xa5, 0x68, 0xa6, 0x70,
+ 0xa8, 0xc1, 0xa9, 0xef, 0xaa, 0x92, 0xab, 0x04,
+ 0xac, 0x80, 0xad, 0x80, 0xae, 0x80, 0xaf, 0x80,
+ 0xb2, 0xf2, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x00,
+ 0xb6, 0xaf
+ },
+ (u8[]){
+ 0xbb, 0xae, 0xbc, 0x38, 0xbd, 0x39, 0xbe, 0x01,
+ 0xbf, 0x01, 0xc0, 0xe2, 0xc1, 0xc0, 0xc2, 0x01,
+ 0xc3, 0x4e, 0xc6, 0x85, 0xc7, 0x81, 0xc9, 0xe0,
+ 0xca, 0xe8, 0xcb, 0xf0, 0xcc, 0xd8, 0xcd, 0x93
+ },
+ (u8[]){
+ 0xd0, 0x21, 0xd1, 0x18, 0xd2, 0xe0, 0xd3, 0x01,
+ 0xd4, 0x28, 0xd5, 0x00
+ }
};
static u8 c04[] = {0x04};
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 47d8f28bfdfc..770714c34295 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -444,6 +444,8 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
* next first packet, wake up the application and advance
* in the queue */
if (packet_type == LAST_PACKET) {
+ if (gspca_dev->image_len > gspca_dev->pixfmt.sizeimage)
+ gspca_dev->image_len = gspca_dev->pixfmt.sizeimage;
spin_lock_irqsave(&gspca_dev->qlock, flags);
list_del(&buf->list);
spin_unlock_irqrestore(&gspca_dev->qlock, flags);
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov7660.h b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
index d60247e10c2c..6146e8ef17c0 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
@@ -86,7 +86,6 @@ extern bool dump_sensor;
int ov7660_probe(struct sd *sd);
int ov7660_init(struct sd *sd);
-int ov7660_init(struct sd *sd);
int ov7660_init_controls(struct sd *sd);
int ov7660_start(struct sd *sd);
int ov7660_stop(struct sd *sd);
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index bfd194c61819..da916127a896 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL");
#define HAS_NO_BUTTON 0x1
#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
#define FLIP_DETECT 0x4
+#define HAS_LED_TORCH 0x8
/* specific webcam descriptor */
struct sd {
@@ -77,6 +78,8 @@ struct sd {
};
struct v4l2_ctrl *jpegqual;
+ struct v4l2_ctrl *led_mode;
+
struct work_struct work;
u32 pktsz; /* (used by pkt_scan) */
@@ -1533,6 +1536,12 @@ static void set_gain(struct gspca_dev *gspca_dev, s32 g)
i2c_w(gspca_dev, gain);
}
+static void set_led_mode(struct gspca_dev *gspca_dev, s32 val)
+{
+ reg_w1(gspca_dev, 0x1007, 0x60);
+ reg_w1(gspca_dev, 0x1006, val ? 0x40 : 0x00);
+}
+
static void set_quality(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1699,6 +1708,9 @@ static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
set_quality(gspca_dev, ctrl->val);
break;
+ case V4L2_CID_FLASH_LED_MODE:
+ set_led_mode(gspca_dev, ctrl->val);
+ break;
}
return gspca_dev->usb_err;
}
@@ -1757,6 +1769,12 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+
+ if (sd->flags & HAS_LED_TORCH)
+ sd->led_mode = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE, V4L2_FLASH_LED_MODE_TORCH,
+ ~0x5, V4L2_FLASH_LED_MODE_NONE);
+
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
@@ -2048,6 +2066,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->pktsz = sd->npkt = 0;
sd->nchg = 0;
}
+ if (sd->led_mode)
+ v4l2_ctrl_s_ctrl(sd->led_mode, 0);
return gspca_dev->usb_err;
}
@@ -2325,7 +2345,7 @@ static const struct sd_desc sd_desc = {
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
- {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
+ {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, HAS_LED_TORCH)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, LED_REVERSE)},
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
index 9f71d8c2a3c6..8ae3ad80cccb 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
@@ -355,11 +355,8 @@ static int parse_token(const char *ptr,unsigned int len,
int *valptr,
const char * const *names, unsigned int namecnt)
{
- char buf[33];
unsigned int slen;
unsigned int idx;
- int negfl;
- char *p2;
*valptr = 0;
if (!names) namecnt = 0;
for (idx = 0; idx < namecnt; idx++) {
@@ -370,18 +367,7 @@ static int parse_token(const char *ptr,unsigned int len,
*valptr = idx;
return 0;
}
- negfl = 0;
- if ((*ptr == '-') || (*ptr == '+')) {
- negfl = (*ptr == '-');
- ptr++; len--;
- }
- if (len >= sizeof(buf)) return -EINVAL;
- memcpy(buf,ptr,len);
- buf[len] = 0;
- *valptr = simple_strtol(buf,&p2,0);
- if (negfl) *valptr = -(*valptr);
- if (*p2) return -EINVAL;
- return 1;
+ return kstrtoint(ptr, 0, valptr) ? -EINVAL : 1;
}
@@ -389,10 +375,8 @@ static int parse_mtoken(const char *ptr,unsigned int len,
int *valptr,
const char **names,int valid_bits)
{
- char buf[33];
unsigned int slen;
unsigned int idx;
- char *p2;
int msk;
*valptr = 0;
for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
@@ -405,12 +389,7 @@ static int parse_mtoken(const char *ptr,unsigned int len,
*valptr = msk;
return 0;
}
- if (len >= sizeof(buf)) return -EINVAL;
- memcpy(buf,ptr,len);
- buf[len] = 0;
- *valptr = simple_strtol(buf,&p2,0);
- if (*p2) return -EINVAL;
- return 0;
+ return kstrtoint(ptr, 0, valptr);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 9657c1883311..c04ab7258d64 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -640,10 +640,6 @@ static int pvr2_s_ext_ctrls(struct file *file, void *priv,
unsigned int idx;
int ret;
- /* Default value cannot be changed */
- if (ctls->which == V4L2_CTRL_WHICH_DEF_VAL)
- return -EINVAL;
-
ret = 0;
for (idx = 0; idx < ctls->count; idx++) {
ctrl = ctls->controls + idx;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 0e231e576dc3..9f445e6ab5fa 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1234,6 +1234,11 @@ static void stk_v4l_dev_release(struct video_device *vd)
if (dev->sio_bufs != NULL || dev->isobufs != NULL)
pr_err("We are leaking memory\n");
usb_put_intf(dev->interface);
+ usb_put_dev(dev->udev);
+
+ v4l2_ctrl_handler_free(&dev->hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
}
static const struct video_device stk_v4l_data = {
@@ -1309,7 +1314,7 @@ static int stk_camera_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->wait_frame);
dev->first_init = 1; /* webcam LED management */
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
dev->interface = interface;
usb_get_intf(interface);
@@ -1365,6 +1370,7 @@ static int stk_camera_probe(struct usb_interface *interface,
error_put:
usb_put_intf(interface);
+ usb_put_dev(dev->udev);
error:
v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&dev->v4l2_dev);
@@ -1385,9 +1391,6 @@ static void stk_camera_disconnect(struct usb_interface *interface)
video_device_node_name(&dev->vdev));
video_unregister_device(&dev->vdev);
- v4l2_ctrl_handler_free(&dev->hdl);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 3f650ede0c3d..e293f6f3d1bc 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -852,8 +852,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
strscpy(cap->driver, "tm6000", sizeof(cap->driver));
- strscpy(cap->card, "Trident TVMaster TM5600/6000/6010",
- sizeof(cap->card));
+ strscpy(cap->card, "Trident TM5600/6000/6010", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_DEVICE_CAPS;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index bfda46a36dc5..38822cedd93a 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -327,7 +327,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
result = mutex_lock_interruptible(&dec->usb_mutex);
if (result) {
printk("%s: Failed to lock usb mutex.\n", __func__);
- goto err;
+ goto err_free;
}
b[0] = 0xaa;
@@ -349,7 +349,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: command bulk message failed: error %d\n",
__func__, result);
- goto err;
+ goto err_mutex_unlock;
}
result = usb_bulk_msg(dec->udev, dec->result_pipe, b,
@@ -358,7 +358,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: result bulk message failed: error %d\n",
__func__, result);
- goto err;
+ goto err_mutex_unlock;
} else {
if (debug) {
printk(KERN_DEBUG "%s: result: %*ph\n",
@@ -371,9 +371,9 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
memcpy(cmd_result, &b[4], b[3]);
}
-err:
+err_mutex_unlock:
mutex_unlock(&dec->usb_mutex);
-
+err_free:
kfree(b);
return result;
}
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index b3dde98499f4..30bfe9069a1f 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -357,6 +357,11 @@ static const struct uvc_control_info uvc_ctrls[] = {
},
};
+static const u32 uvc_control_classes[] = {
+ V4L2_CID_CAMERA_CLASS,
+ V4L2_CID_USER_CLASS,
+};
+
static const struct uvc_menu_info power_line_frequency_controls[] = {
{ 0, "Disabled" },
{ 1, "50 Hz" },
@@ -427,7 +432,6 @@ static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BRIGHTNESS_CONTROL,
.size = 16,
@@ -437,7 +441,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_CONTRAST,
- .name = "Contrast",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_CONTRAST_CONTROL,
.size = 16,
@@ -447,7 +450,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_HUE,
- .name = "Hue",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_CONTROL,
.size = 16,
@@ -459,7 +461,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_SATURATION,
- .name = "Saturation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SATURATION_CONTROL,
.size = 16,
@@ -469,7 +470,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_SHARPNESS,
- .name = "Sharpness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SHARPNESS_CONTROL,
.size = 16,
@@ -479,7 +479,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_GAMMA,
- .name = "Gamma",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAMMA_CONTROL,
.size = 16,
@@ -489,7 +488,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_BACKLIGHT_COMPENSATION,
- .name = "Backlight Compensation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.size = 16,
@@ -499,7 +497,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_GAIN,
- .name = "Gain",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAIN_CONTROL,
.size = 16,
@@ -509,7 +506,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
- .name = "Power Line Frequency",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.size = 2,
@@ -521,7 +517,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_HUE_AUTO,
- .name = "Hue, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_AUTO_CONTROL,
.size = 1,
@@ -532,7 +527,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
- .name = "Exposure, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_MODE_CONTROL,
.size = 4,
@@ -545,7 +539,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
- .name = "Exposure, Auto Priority",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_PRIORITY_CONTROL,
.size = 1,
@@ -555,7 +548,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .name = "Exposure (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.size = 32,
@@ -567,7 +559,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
- .name = "White Balance Temperature, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.size = 1,
@@ -578,7 +569,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
- .name = "White Balance Temperature",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.size = 16,
@@ -590,7 +580,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
- .name = "White Balance Component, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.size = 1,
@@ -602,7 +591,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_BLUE_BALANCE,
- .name = "White Balance Blue Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
@@ -614,7 +602,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_RED_BALANCE,
- .name = "White Balance Red Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
@@ -626,7 +613,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_FOCUS_ABSOLUTE,
- .name = "Focus (absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.size = 16,
@@ -638,7 +624,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_FOCUS_AUTO,
- .name = "Focus, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_AUTO_CONTROL,
.size = 1,
@@ -649,7 +634,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_IRIS_ABSOLUTE,
- .name = "Iris, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
.size = 16,
@@ -659,7 +643,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_IRIS_RELATIVE,
- .name = "Iris, Relative",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.size = 8,
@@ -669,7 +652,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_ZOOM_ABSOLUTE,
- .name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.size = 16,
@@ -679,7 +661,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_ZOOM_CONTINUOUS,
- .name = "Zoom, Continuous",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.size = 0,
@@ -691,7 +672,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PAN_ABSOLUTE,
- .name = "Pan (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
@@ -701,7 +681,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_TILT_ABSOLUTE,
- .name = "Tilt (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
@@ -711,7 +690,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PAN_SPEED,
- .name = "Pan (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
@@ -723,7 +701,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_TILT_SPEED,
- .name = "Tilt (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
@@ -735,7 +712,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PRIVACY,
- .name = "Privacy",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
@@ -745,7 +721,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PRIVACY,
- .name = "Privacy",
.entity = UVC_GUID_EXT_GPIO_CONTROLLER,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
@@ -1024,6 +999,85 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
return 0;
}
+static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ u32 found_id)
+{
+ bool find_next = req_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ unsigned int i;
+
+ req_id &= V4L2_CTRL_ID_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) {
+ if (!(chain->ctrl_class_bitmap & BIT(i)))
+ continue;
+ if (!find_next) {
+ if (uvc_control_classes[i] == req_id)
+ return i;
+ continue;
+ }
+ if (uvc_control_classes[i] > req_id &&
+ uvc_control_classes[i] < found_id)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ u32 found_id, struct v4l2_queryctrl *v4l2_ctrl)
+{
+ int idx;
+
+ idx = __uvc_query_v4l2_class(chain, req_id, found_id);
+ if (idx < 0)
+ return -ENODEV;
+
+ memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
+ v4l2_ctrl->id = uvc_control_classes[idx];
+ strscpy(v4l2_ctrl->name, v4l2_ctrl_get_name(v4l2_ctrl->id),
+ sizeof(v4l2_ctrl->name));
+ v4l2_ctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ v4l2_ctrl->flags = V4L2_CTRL_FLAG_WRITE_ONLY
+ | V4L2_CTRL_FLAG_READ_ONLY;
+ return 0;
+}
+
+int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ bool read)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+
+ if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
+ return -EACCES;
+
+ ctrl = uvc_find_control(chain, v4l2_id, &mapping);
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) && read)
+ return -EACCES;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ return -EACCES;
+
+ return 0;
+}
+
+static const char *uvc_map_get_name(const struct uvc_control_mapping *map)
+{
+ const char *name;
+
+ if (map->name)
+ return map->name;
+
+ name = v4l2_ctrl_get_name(map->id);
+ if (name)
+ return name;
+
+ return "Unknown Control";
+}
+
static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
@@ -1037,7 +1091,8 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
v4l2_ctrl->id = mapping->id;
v4l2_ctrl->type = mapping->v4l2_type;
- strscpy(v4l2_ctrl->name, mapping->name, sizeof(v4l2_ctrl->name));
+ strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
+ sizeof(v4l2_ctrl->name));
v4l2_ctrl->flags = 0;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
@@ -1127,12 +1182,31 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
if (ret < 0)
return -ERESTARTSYS;
+ /* Check if the ctrl is a know class */
+ if (!(v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL)) {
+ ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, 0, v4l2_ctrl);
+ if (!ret)
+ goto done;
+ }
+
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
goto done;
}
+ /*
+ * If we're enumerating control with V4L2_CTRL_FLAG_NEXT_CTRL, check if
+ * a class should be inserted between the previous control and the one
+ * we have just found.
+ */
+ if (v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, mapping->id,
+ v4l2_ctrl);
+ if (!ret)
+ goto done;
+ }
+
ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
done:
mutex_unlock(&chain->ctrl_mutex);
@@ -1426,6 +1500,11 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
if (ret < 0)
return -ERESTARTSYS;
+ if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0) {
+ ret = 0;
+ goto done;
+ }
+
ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
@@ -1459,7 +1538,10 @@ static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev)
struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
mutex_lock(&handle->chain->ctrl_mutex);
+ if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0)
+ goto done;
list_del(&sev->node);
+done:
mutex_unlock(&handle->chain->ctrl_mutex);
}
@@ -1500,7 +1582,7 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
- struct uvc_entity *entity, int rollback)
+ struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
{
struct uvc_control *ctrl;
unsigned int i;
@@ -1542,31 +1624,59 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
ctrl->dirty = 0;
- if (ret < 0)
+ if (ret < 0) {
+ if (err_ctrl)
+ *err_ctrl = ctrl;
return ret;
+ }
}
return 0;
}
+static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
+ struct v4l2_ext_controls *ctrls,
+ struct uvc_control *uvc_control)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl_found;
+ unsigned int i;
+
+ if (!entity)
+ return ctrls->count;
+
+ for (i = 0; i < ctrls->count; i++) {
+ __uvc_find_control(entity, ctrls->controls[i].id, &mapping,
+ &ctrl_found, 0);
+ if (uvc_control == ctrl_found)
+ return i;
+ }
+
+ return ctrls->count;
+}
+
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count)
+ struct v4l2_ext_controls *ctrls)
{
struct uvc_video_chain *chain = handle->chain;
+ struct uvc_control *err_ctrl;
struct uvc_entity *entity;
int ret = 0;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback);
+ ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
+ &err_ctrl);
if (ret < 0)
goto done;
}
if (!rollback)
- uvc_ctrl_send_events(handle, xctrls, xctrls_count);
+ uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
done:
+ if (ret < 0 && ctrls)
+ ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ err_ctrl);
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -1577,6 +1687,9 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
@@ -1596,6 +1709,9 @@ int uvc_ctrl_set(struct uvc_fh *handle,
s32 max;
int ret;
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
@@ -2011,14 +2127,14 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
- dev_info(&dev->udev->dev,
- "restoring control %pUl/%u/%u\n",
- ctrl->info.entity, ctrl->info.index,
- ctrl->info.selector);
+ dev_dbg(&dev->udev->dev,
+ "restoring control %pUl/%u/%u\n",
+ ctrl->info.entity, ctrl->info.index,
+ ctrl->info.selector);
ctrl->dirty = 1;
}
- ret = uvc_ctrl_commit_entity(dev, entity, 0);
+ ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
if (ret < 0)
return ret;
}
@@ -2057,11 +2173,12 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
/*
* Add a control mapping to a given control.
*/
-static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
+static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
{
struct uvc_control_mapping *map;
unsigned int size;
+ unsigned int i;
/* Most mappings come from static kernel data and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
@@ -2085,9 +2202,18 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map->set == NULL)
map->set = uvc_set_le_value;
+ for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) {
+ if (V4L2_CTRL_ID2WHICH(uvc_control_classes[i]) ==
+ V4L2_CTRL_ID2WHICH(map->id)) {
+ chain->ctrl_class_bitmap |= BIT(i);
+ break;
+ }
+ }
+
list_add_tail(&map->list, &ctrl->info.mappings);
- uvc_dbg(dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
- map->name, ctrl->info.entity, ctrl->info.selector);
+ uvc_dbg(chain->dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
+ uvc_map_get_name(map), ctrl->info.entity,
+ ctrl->info.selector);
return 0;
}
@@ -2105,7 +2231,7 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', control id 0x%08x is invalid\n",
- mapping->name, mapping->id);
+ uvc_map_get_name(mapping), mapping->id);
return -EINVAL;
}
@@ -2152,7 +2278,7 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
if (mapping->id == map->id) {
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', control id 0x%08x already exists\n",
- mapping->name, mapping->id);
+ uvc_map_get_name(mapping), mapping->id);
ret = -EEXIST;
goto done;
}
@@ -2163,12 +2289,12 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
atomic_dec(&dev->nmappings);
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', maximum mappings count (%u) exceeded\n",
- mapping->name, UVC_MAX_CONTROL_MAPPINGS);
+ uvc_map_get_name(mapping), UVC_MAX_CONTROL_MAPPINGS);
ret = -ENOMEM;
goto done;
}
- ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ ret = __uvc_ctrl_add_mapping(chain, ctrl, mapping);
if (ret < 0)
atomic_dec(&dev->nmappings);
@@ -2244,7 +2370,8 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
* Add control information and hardcoded stock control mappings to the given
* device.
*/
-static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
+static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl)
{
const struct uvc_control_info *info = uvc_ctrls;
const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
@@ -2263,14 +2390,14 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
for (; info < iend; ++info) {
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
- uvc_ctrl_add_info(dev, ctrl, info);
+ uvc_ctrl_add_info(chain->dev, ctrl, info);
/*
* Retrieve control flags from the device. Ignore errors
* and work with default flag values from the uvc_ctrl
* array when the device doesn't properly implement
* GET_INFO on standard controls.
*/
- uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
+ uvc_ctrl_get_flags(chain->dev, ctrl, &ctrl->info);
break;
}
}
@@ -2281,22 +2408,20 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
for (; mapping < mend; ++mapping) {
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
- __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ __uvc_ctrl_add_mapping(chain, ctrl, mapping);
}
}
/*
* Initialize device controls.
*/
-int uvc_ctrl_init_device(struct uvc_device *dev)
+static int uvc_ctrl_init_chain(struct uvc_video_chain *chain)
{
struct uvc_entity *entity;
unsigned int i;
- INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
-
/* Walk the entities list and instantiate controls */
- list_for_each_entry(entity, &dev->entities, list) {
+ list_for_each_entry(entity, &chain->entities, chain) {
struct uvc_control *ctrl;
unsigned int bControlSize = 0, ncontrols;
u8 *bmControls = NULL;
@@ -2316,7 +2441,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
}
/* Remove bogus/blacklisted controls */
- uvc_ctrl_prune_entity(dev, entity);
+ uvc_ctrl_prune_entity(chain->dev, entity);
/* Count supported controls and allocate the controls array */
ncontrols = memweight(bmControls, bControlSize);
@@ -2338,7 +2463,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
ctrl->entity = entity;
ctrl->index = i;
- uvc_ctrl_init_ctrl(dev, ctrl);
+ uvc_ctrl_init_ctrl(chain, ctrl);
ctrl++;
}
}
@@ -2346,6 +2471,22 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
return 0;
}
+int uvc_ctrl_init_device(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ int ret;
+
+ INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
+
+ list_for_each_entry(chain, &dev->chains, list) {
+ ret = uvc_ctrl_init_chain(chain);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Cleanup device controls.
*/
@@ -2357,6 +2498,7 @@ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
list_del(&mapping->list);
kfree(mapping->menu_info);
+ kfree(mapping->name);
kfree(mapping);
}
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9a791d8ef200..7c007426e082 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -16,7 +16,6 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <linux/version.h>
#include <asm/unaligned.h>
#include <media/v4l2-common.h>
@@ -2194,6 +2193,7 @@ int uvc_register_video_device(struct uvc_device *dev,
const struct v4l2_file_operations *fops,
const struct v4l2_ioctl_ops *ioctl_ops)
{
+ const char *name;
int ret;
/* Initialize the video buffers queue. */
@@ -2222,16 +2222,20 @@ int uvc_register_video_device(struct uvc_device *dev,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
default:
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ name = "Video Capture";
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ name = "Video Output";
break;
case V4L2_BUF_TYPE_META_CAPTURE:
vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+ name = "Metadata";
break;
}
- strscpy(vdev->name, dev->name, sizeof(vdev->name));
+ snprintf(vdev->name, sizeof(vdev->name), "%s %u", name,
+ stream->header.bTerminalLink);
/*
* Set the driver data before calling video_register_device, otherwise
@@ -2455,14 +2459,14 @@ static int uvc_probe(struct usb_interface *intf,
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
- /* Initialize controls. */
- if (uvc_ctrl_init_device(dev) < 0)
- goto error;
-
/* Scan the device for video chains. */
if (uvc_scan_device(dev) < 0)
goto error;
+ /* Initialize controls. */
+ if (uvc_ctrl_init_device(dev) < 0)
+ goto error;
+
/* Register video device nodes. */
if (uvc_register_chains(dev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index b6279ad7ac84..82de7781f5b6 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -30,7 +30,7 @@ static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
struct uvc_video_chain *chain = stream->chain;
strscpy(cap->driver, "uvcvideo", sizeof(cap->driver));
- strscpy(cap->card, vfh->vdev->name, sizeof(cap->card));
+ strscpy(cap->card, stream->dev->name, sizeof(cap->card));
usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| chain->caps;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 6acb8013de08..f4e4aff8ddf7 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -40,7 +40,13 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
return -ENOMEM;
map->id = xmap->id;
- memcpy(map->name, xmap->name, sizeof(map->name));
+ /* Non standard control id. */
+ if (v4l2_ctrl_get_name(map->id) == NULL) {
+ map->name = kmemdup(xmap->name, sizeof(xmap->name),
+ GFP_KERNEL);
+ if (!map->name)
+ return -ENOMEM;
+ }
memcpy(map->entity, xmap->entity, sizeof(map->entity));
map->selector = xmap->selector;
map->size = xmap->size;
@@ -472,10 +478,13 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
uvc_simplify_fraction(&timeperframe.numerator,
&timeperframe.denominator, 8, 333);
- if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
parm->parm.capture.timeperframe = timeperframe;
- else
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
parm->parm.output.timeperframe = timeperframe;
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
return 0;
}
@@ -614,13 +623,12 @@ static int uvc_v4l2_release(struct file *file)
static int uvc_ioctl_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct uvc_fh *handle = file->private_data;
struct uvc_video_chain *chain = handle->chain;
struct uvc_streaming *stream = handle->stream;
strscpy(cap->driver, "uvcvideo", sizeof(cap->driver));
- strscpy(cap->card, vdev->name, sizeof(cap->card));
+ strscpy(cap->card, handle->stream->dev->name, sizeof(cap->card));
usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| chain->caps;
@@ -995,58 +1003,24 @@ static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_ctrl(struct file *file, void *fh,
- struct v4l2_control *ctrl)
+static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
+ struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl)
{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
- struct v4l2_ext_control xctrl;
- int ret;
-
- memset(&xctrl, 0, sizeof(xctrl));
- xctrl.id = ctrl->id;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
-
- ret = uvc_ctrl_get(chain, &xctrl);
- uvc_ctrl_rollback(handle);
- if (ret < 0)
- return ret;
-
- ctrl->value = xctrl.value;
- return 0;
-}
-
-static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
- struct v4l2_control *ctrl)
-{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
- struct v4l2_ext_control xctrl;
- int ret;
-
- memset(&xctrl, 0, sizeof(xctrl));
- xctrl.id = ctrl->id;
- xctrl.value = ctrl->value;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+ struct v4l2_ext_control *ctrl = ctrls->controls;
+ unsigned int i;
+ int ret = 0;
- ret = uvc_ctrl_set(handle, &xctrl);
- if (ret < 0) {
- uvc_ctrl_rollback(handle);
- return ret;
+ for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+ ret = uvc_ctrl_is_accessible(chain, ctrl->id,
+ ioctl == VIDIOC_G_EXT_CTRLS);
+ if (ret)
+ break;
}
- ret = uvc_ctrl_commit(handle, &xctrl, 1);
- if (ret < 0)
- return ret;
+ ctrls->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : ctrls->count;
- ctrl->value = xctrl.value;
- return 0;
+ return ret;
}
static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
@@ -1058,6 +1032,10 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
unsigned int i;
int ret;
+ ret = uvc_ctrl_check_access(chain, ctrls, VIDIOC_G_EXT_CTRLS);
+ if (ret < 0)
+ return ret;
+
if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) {
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
struct v4l2_queryctrl qc = { .id = ctrl->id };
@@ -1094,16 +1072,16 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
struct v4l2_ext_controls *ctrls,
- bool commit)
+ unsigned long ioctl)
{
struct v4l2_ext_control *ctrl = ctrls->controls;
struct uvc_video_chain *chain = handle->chain;
unsigned int i;
int ret;
- /* Default value cannot be changed */
- if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL)
- return -EINVAL;
+ ret = uvc_ctrl_check_access(chain, ctrls, ioctl);
+ if (ret < 0)
+ return ret;
ret = uvc_ctrl_begin(chain);
if (ret < 0)
@@ -1113,15 +1091,16 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
ret = uvc_ctrl_set(handle, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = commit ? ctrls->count : i;
+ ctrls->error_idx = ioctl == VIDIOC_S_EXT_CTRLS ?
+ ctrls->count : i;
return ret;
}
}
ctrls->error_idx = 0;
- if (commit)
- return uvc_ctrl_commit(handle, ctrls->controls, ctrls->count);
+ if (ioctl == VIDIOC_S_EXT_CTRLS)
+ return uvc_ctrl_commit(handle, ctrls);
else
return uvc_ctrl_rollback(handle);
}
@@ -1131,7 +1110,7 @@ static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
- return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, true);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_S_EXT_CTRLS);
}
static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
@@ -1139,7 +1118,7 @@ static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
- return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, false);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_TRY_EXT_CTRLS);
}
static int uvc_ioctl_querymenu(struct file *file, void *fh,
@@ -1538,8 +1517,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_s_input = uvc_ioctl_s_input,
.vidioc_queryctrl = uvc_ioctl_queryctrl,
.vidioc_query_ext_ctrl = uvc_ioctl_query_ext_ctrl,
- .vidioc_g_ctrl = uvc_ioctl_g_ctrl,
- .vidioc_s_ctrl = uvc_ioctl_s_ctrl,
.vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
.vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
.vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index e16464606b14..9f37eaf28ce7 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -115,6 +115,11 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
case 5: /* Invalid unit */
case 6: /* Invalid control */
case 7: /* Invalid Request */
+ /*
+ * The firmware has not properly implemented
+ * the control or there has been a HW error.
+ */
+ return -EIO;
case 8: /* Invalid value within range */
return -EINVAL;
default: /* reserved or unknown */
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index cce5e38133cd..2e5366143b81 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -241,7 +241,7 @@ struct uvc_control_mapping {
struct list_head ev_subs;
u32 id;
- u8 name[32];
+ char *name;
u8 entity[16];
u8 selector;
@@ -476,6 +476,7 @@ struct uvc_video_chain {
struct v4l2_prio_state prio; /* V4L2 priority state */
u32 caps; /* V4L2 chain-wide caps */
+ u8 ctrl_class_bitmap; /* Bitmap of valid classes */
};
struct uvc_stats_frame {
@@ -523,7 +524,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METADATA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 10240
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
@@ -885,21 +886,21 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
int uvc_ctrl_begin(struct uvc_video_chain *chain);
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count);
+ struct v4l2_ext_controls *ctrls);
static inline int uvc_ctrl_commit(struct uvc_fh *handle,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count)
+ struct v4l2_ext_controls *ctrls)
{
- return __uvc_ctrl_commit(handle, 0, xctrls, xctrls_count);
+ return __uvc_ctrl_commit(handle, 0, ctrls);
}
static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
{
- return __uvc_ctrl_commit(handle, 1, NULL, 0);
+ return __uvc_ctrl_commit(handle, 1, NULL);
}
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ bool read);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index cd9e78c63791..0404267f1ae4 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -24,9 +24,9 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
if (!n->ops || !n->ops->bound)
return 0;
@@ -34,9 +34,9 @@ static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
return n->ops->bound(n, subdev, asd);
}
-static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
if (!n->ops || !n->ops->unbind)
return;
@@ -44,7 +44,7 @@ static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
n->ops->unbind(n, subdev, asd);
}
-static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
+static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
{
if (!n->ops || !n->ops->complete)
return 0;
@@ -215,7 +215,7 @@ v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
/* Get v4l2_device related to the notifier if one can be found. */
static struct v4l2_device *
-v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
{
while (notifier->parent)
notifier = notifier->parent;
@@ -227,7 +227,7 @@ v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
* Return true if all child sub-device notifiers are complete, false otherwise.
*/
static bool
-v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd;
@@ -239,7 +239,7 @@ v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier &&
- !v4l2_async_notifier_can_complete(subdev_notifier))
+ !v4l2_async_nf_can_complete(subdev_notifier))
return false;
}
@@ -251,7 +251,7 @@ v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
* sub-devices have been bound; v4l2_device is also available then.
*/
static int
-v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
{
/* Quick check whether there are still more sub-devices here. */
if (!list_empty(&notifier->waiting))
@@ -266,14 +266,14 @@ v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
return 0;
/* Is everything ready? */
- if (!v4l2_async_notifier_can_complete(notifier))
+ if (!v4l2_async_nf_can_complete(notifier))
return 0;
- return v4l2_async_notifier_call_complete(notifier);
+ return v4l2_async_nf_call_complete(notifier);
}
static int
-v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev,
@@ -287,7 +287,7 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
if (ret < 0)
return ret;
- ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
+ ret = v4l2_async_nf_call_bound(notifier, sd, asd);
if (ret < 0) {
v4l2_device_unregister_subdev(sd);
return ret;
@@ -315,15 +315,15 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
*/
subdev_notifier->parent = notifier;
- return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
+ return v4l2_async_nf_try_all_subdevs(subdev_notifier);
}
/* Test all async sub-devices in a notifier for a match. */
static int
-v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_device *v4l2_dev =
- v4l2_async_notifier_find_v4l2_dev(notifier);
+ v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_subdev *sd;
if (!v4l2_dev)
@@ -367,7 +367,7 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Unbind all sub-devices in the notifier tree. */
static void
-v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
@@ -376,9 +376,9 @@ v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
list_move(&sd->async_list, &subdev_list);
@@ -389,8 +389,8 @@ v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
/* See if an async sub-device can be found in a notifier's lists. */
static bool
-__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+__v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
struct v4l2_async_subdev *asd_y;
struct v4l2_subdev *sd;
@@ -416,9 +416,8 @@ __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
* If @this_index < 0, search the notifier's entire @asd_list.
*/
static bool
-v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd, int this_index)
{
struct v4l2_async_subdev *asd_y;
int j = 0;
@@ -435,15 +434,15 @@ v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
/* Check that an asd does not exist in other notifiers. */
list_for_each_entry(notifier, &notifier_list, list)
- if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
+ if (__v4l2_async_nf_has_async_subdev(notifier, asd))
return true;
return false;
}
-static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd,
+ int this_index)
{
struct device *dev =
notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
@@ -454,8 +453,7 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
switch (asd->match_type) {
case V4L2_ASYNC_MATCH_I2C:
case V4L2_ASYNC_MATCH_FWNODE:
- if (v4l2_async_notifier_has_async_subdev(notifier, asd,
- this_index)) {
+ if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
return -EEXIST;
}
@@ -469,13 +467,13 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
return 0;
}
-void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
{
INIT_LIST_HEAD(&notifier->asd_list);
}
-EXPORT_SYMBOL(v4l2_async_notifier_init);
+EXPORT_SYMBOL(v4l2_async_nf_init);
-static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd;
int ret, i = 0;
@@ -486,18 +484,18 @@ static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
mutex_lock(&list_lock);
list_for_each_entry(asd, &notifier->asd_list, asd_list) {
- ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
+ ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
if (ret)
goto err_unlock;
list_add_tail(&asd->list, &notifier->waiting);
}
- ret = v4l2_async_notifier_try_all_subdevs(notifier);
+ ret = v4l2_async_nf_try_all_subdevs(notifier);
if (ret < 0)
goto err_unbind;
- ret = v4l2_async_notifier_try_complete(notifier);
+ ret = v4l2_async_nf_try_complete(notifier);
if (ret < 0)
goto err_unbind;
@@ -512,7 +510,7 @@ err_unbind:
/*
* On failure, unbind all sub-devices registered through this notifier.
*/
- v4l2_async_notifier_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
err_unlock:
mutex_unlock(&list_lock);
@@ -520,8 +518,8 @@ err_unlock:
return ret;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
{
int ret;
@@ -530,16 +528,16 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
notifier->v4l2_dev = v4l2_dev;
- ret = __v4l2_async_notifier_register(notifier);
+ ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->v4l2_dev = NULL;
return ret;
}
-EXPORT_SYMBOL(v4l2_async_notifier_register);
+EXPORT_SYMBOL(v4l2_async_nf_register);
-int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier)
{
int ret;
@@ -548,21 +546,21 @@ int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
notifier->sd = sd;
- ret = __v4l2_async_notifier_register(notifier);
+ ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->sd = NULL;
return ret;
}
-EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
+EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
static void
-__v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;
- v4l2_async_notifier_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
notifier->sd = NULL;
notifier->v4l2_dev = NULL;
@@ -570,17 +568,17 @@ __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
list_del(&notifier->list);
}
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
- __v4l2_async_notifier_unregister(notifier);
+ __v4l2_async_nf_unregister(notifier);
mutex_unlock(&list_lock);
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL(v4l2_async_nf_unregister);
-static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd, *tmp;
@@ -601,24 +599,24 @@ static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
}
}
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
- __v4l2_async_notifier_cleanup(notifier);
+ __v4l2_async_nf_cleanup(notifier);
mutex_unlock(&list_lock);
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
+EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
-int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
int ret;
mutex_lock(&list_lock);
- ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
+ ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
if (ret)
goto unlock;
@@ -628,12 +626,12 @@ unlock:
mutex_unlock(&list_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -645,7 +643,7 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
asd->match.fwnode = fwnode_handle_get(fwnode);
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret) {
fwnode_handle_put(fwnode);
kfree(asd);
@@ -654,12 +652,12 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
struct fwnode_handle *remote;
@@ -668,21 +666,19 @@ __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif
if (!remote)
return ERR_PTR(-ENOTCONN);
- asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
- asd_struct_size);
+ asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
/*
- * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
+ * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
* so drop the one we got in fwnode_graph_get_remote_port_parent.
*/
fwnode_handle_put(remote);
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
+ unsigned short address, unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -695,7 +691,7 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
asd->match.i2c.adapter_id = adapter_id;
asd->match.i2c.address = address;
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret) {
kfree(asd);
return ERR_PTR(ret);
@@ -703,7 +699,7 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
@@ -725,7 +721,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
list_for_each_entry(notifier, &notifier_list, list) {
struct v4l2_device *v4l2_dev =
- v4l2_async_notifier_find_v4l2_dev(notifier);
+ v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_async_subdev *asd;
if (!v4l2_dev)
@@ -739,7 +735,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
if (ret)
goto err_unbind;
- ret = v4l2_async_notifier_try_complete(notifier);
+ ret = v4l2_async_nf_try_complete(notifier);
if (ret)
goto err_unbind;
@@ -761,10 +757,10 @@ err_unbind:
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
if (sd->asd)
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
mutex_unlock(&list_lock);
@@ -780,8 +776,8 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
mutex_lock(&list_lock);
- __v4l2_async_notifier_unregister(sd->subdev_notifier);
- __v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ __v4l2_async_nf_unregister(sd->subdev_notifier);
+ __v4l2_async_nf_cleanup(sd->subdev_notifier);
kfree(sd->subdev_notifier);
sd->subdev_notifier = NULL;
@@ -790,7 +786,7 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
}
v4l2_async_cleanup(sd);
@@ -825,7 +821,7 @@ static void print_waiting_subdev(struct seq_file *s,
}
static const char *
-v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
{
if (notifier->v4l2_dev)
return notifier->v4l2_dev->name;
@@ -843,7 +839,7 @@ static int pending_subdevs_show(struct seq_file *s, void *data)
mutex_lock(&list_lock);
list_for_each_entry(notif, &notifier_list, list) {
- seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
+ seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
list_for_each_entry(asd, &notif->waiting, list)
print_waiting_subdev(s, asd);
}
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 04af03285a20..df34b2a283bc 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -275,6 +275,9 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ /* Tiled YUV formats */
+ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+
/* YUV planar formats, non contiguous variant */
{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 47aff3b19742..8176769a89fa 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -126,6 +126,9 @@ struct v4l2_format32 {
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
+ * configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -134,7 +137,8 @@ struct v4l2_create_buffers32 {
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
__u32 capabilities;
- __u32 reserved[7];
+ __u32 flags;
+ __u32 reserved[6];
};
static int get_v4l2_format32(struct v4l2_format *p64,
@@ -182,6 +186,8 @@ static int get_v4l2_create32(struct v4l2_create_buffers *p64,
if (copy_from_user(p64, p32,
offsetof(struct v4l2_create_buffers32, format)))
return -EFAULT;
+ if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
+ return -EFAULT;
return get_v4l2_format32(&p64->format, &p32->format);
}
@@ -227,6 +233,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *p64,
if (copy_to_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
put_user(p64->capabilities, &p32->capabilities) ||
+ put_user(p64->flags, &p32->flags) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return put_v4l2_format32(&p64->format, &p32->format);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index c4b5082849b6..70adfc1b9c81 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -687,6 +687,9 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ break;
+
case V4L2_CTRL_TYPE_AREA:
area = p;
if (!area->width || !area->height)
@@ -1240,6 +1243,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix);
+ break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 421300e13a41..ebe82b6ba6e6 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -997,6 +997,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
@@ -1107,6 +1108,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+ case V4L2_CID_NOTIFY_GAINS: return "Notify Gains";
/* Image processing controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1490,6 +1492,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
break;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 843259c304bb..00457e1e93f6 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -780,11 +780,11 @@ int v4l2_fwnode_device_parse(struct device *dev,
EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
static int
-v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
- struct v4l2_async_notifier *notifier,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size,
- parse_endpoint_func parse_endpoint)
+v4l2_async_nf_fwnode_parse_endpoint(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
struct v4l2_fwnode_endpoint vep = { .bus_type = 0 };
struct v4l2_async_subdev *asd;
@@ -822,7 +822,7 @@ v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
if (ret < 0)
goto out_err;
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret < 0) {
/* not an error if asd already exists */
if (ret == -EEXIST)
@@ -839,13 +839,11 @@ out_err:
return ret == -ENOTCONN ? 0 : ret;
}
-static int
-__v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- unsigned int port,
- bool has_port,
- parse_endpoint_func parse_endpoint)
+int
+v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
struct fwnode_handle *fwnode;
int ret = 0;
@@ -863,22 +861,11 @@ __v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
if (!is_available)
continue;
- if (has_port) {
- struct fwnode_endpoint ep;
-
- ret = fwnode_graph_parse_endpoint(fwnode, &ep);
- if (ret)
- break;
-
- if (ep.port != port)
- continue;
- }
- ret = v4l2_async_notifier_fwnode_parse_endpoint(dev,
- notifier,
- fwnode,
- asd_struct_size,
- parse_endpoint);
+ ret = v4l2_async_nf_fwnode_parse_endpoint(dev, notifier,
+ fwnode,
+ asd_struct_size,
+ parse_endpoint);
if (ret < 0)
break;
}
@@ -887,18 +874,7 @@ __v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
return ret;
}
-
-int
-v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint)
-{
- return __v4l2_async_notifier_parse_fwnode_ep(dev, notifier,
- asd_struct_size, 0,
- false, parse_endpoint);
-}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
+EXPORT_SYMBOL_GPL(v4l2_async_nf_parse_fwnode_endpoints);
/*
* v4l2_fwnode_reference_parse - parse references for async sub-devices
@@ -942,9 +918,8 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
index++) {
struct v4l2_async_subdev *asd;
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier,
- args.fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
/* not an error if asd already exists */
@@ -1243,8 +1218,8 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
index++) {
struct v4l2_async_subdev *asd;
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
@@ -1260,7 +1235,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
}
/**
- * v4l2_async_notifier_parse_fwnode_sensor - parse common references on
+ * v4l2_async_nf_parse_fwnode_sensor - parse common references on
* sensors for async sub-devices
* @dev: the device node the properties of which are parsed for references
* @notifier: the async notifier where the async subdevs will be added
@@ -1269,7 +1244,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
* sensor and set up async sub-devices for them.
*
* Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_release() after it has been unregistered and the async
+ * v4l2_async_nf_release() after it has been unregistered and the async
* sub-devices are no longer in use, even in the case the function returned an
* error.
*
@@ -1278,8 +1253,8 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
* -EINVAL if property parsing failed
*/
static int
-v4l2_async_notifier_parse_fwnode_sensor(struct device *dev,
- struct v4l2_async_notifier *notifier)
+v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
+ struct v4l2_async_notifier *notifier)
{
static const char * const led_props[] = { "led" };
static const struct v4l2_fwnode_int_props props[] = {
@@ -1320,13 +1295,13 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
if (!notifier)
return -ENOMEM;
- v4l2_async_notifier_init(notifier);
+ v4l2_async_nf_init(notifier);
- ret = v4l2_async_notifier_parse_fwnode_sensor(sd->dev, notifier);
+ ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
if (ret < 0)
goto out_cleanup;
- ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ ret = v4l2_async_subdev_nf_register(sd, notifier);
if (ret < 0)
goto out_cleanup;
@@ -1339,10 +1314,10 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
return 0;
out_unregister:
- v4l2_async_notifier_unregister(notifier);
+ v4l2_async_nf_unregister(notifier);
out_cleanup:
- v4l2_async_notifier_cleanup(notifier);
+ v4l2_async_nf_cleanup(notifier);
kfree(notifier);
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 05d5db3d85e5..31d0109ce5a8 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -869,7 +869,7 @@ static void v4l_print_default(const void *arg, bool write_only)
pr_cont("driver-specific ioctl\n");
}
-static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
+static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
{
__u32 i;
@@ -878,23 +878,41 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
- /* V4L2_CID_PRIVATE_BASE cannot be used as control class
- when using extended controls.
- Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
- is it allowed for backwards compatibility.
- */
- if (!allow_priv && c->which == V4L2_CID_PRIVATE_BASE)
- return 0;
- if (!c->which)
- return 1;
+ switch (c->which) {
+ case V4L2_CID_PRIVATE_BASE:
+ /*
+ * V4L2_CID_PRIVATE_BASE cannot be used as control class
+ * when using extended controls.
+ * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
+ * is it allowed for backwards compatibility.
+ */
+ if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL)
+ return false;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ /* Default value cannot be changed */
+ if (ioctl == VIDIOC_S_EXT_CTRLS ||
+ ioctl == VIDIOC_TRY_EXT_CTRLS) {
+ c->error_idx = c->count;
+ return false;
+ }
+ return true;
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return true;
+ case V4L2_CTRL_WHICH_REQUEST_VAL:
+ c->error_idx = c->count;
+ return false;
+ }
+
/* Check that all controls are from the same control class. */
for (i = 0; i < c->count; i++) {
if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) {
- c->error_idx = i;
- return 0;
+ c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i :
+ c->count;
+ return false;
}
}
- return 1;
+ return true;
}
static int check_fmt(struct file *file, enum v4l2_buf_type type)
@@ -1274,7 +1292,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
- case V4L2_PIX_FMT_HM12: descr = "YUV 4:2:0 (16x16 Macroblocks)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
@@ -1282,6 +1299,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_NV12_4L4: descr = "Y/CbCr 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_16L16: descr = "Y/CbCr 4:2:0 (16x16 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_32L32: descr = "Y/CbCr 4:2:0 (32x32 Linear)"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
@@ -1346,6 +1366,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break;
case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
@@ -1415,7 +1436,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
if (fmt->description[0])
return;
@@ -2004,7 +2024,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, capabilities);
+ CLEAR_AFTER_FIELD(p, flags);
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -2045,7 +2065,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(create, capabilities);
+ CLEAR_AFTER_FIELD(create, flags);
v4l_sanitize_format(&create->format);
@@ -2187,7 +2207,7 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1)) {
+ if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
if (ret == 0)
@@ -2206,6 +2226,7 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
@@ -2221,9 +2242,11 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1))
- return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
- return -EINVAL;
+ if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
+ return -EINVAL;
+ ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ p->value = ctrl.value;
+ return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2243,8 +2266,8 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
+ ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2264,8 +2287,8 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
+ ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2285,8 +2308,8 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
+ ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
}
/*
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 72c0df129d5c..30bff6cb1b8d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -55,8 +55,8 @@ config ATMEL_EBI
SRAMs, ATA devices, etc.
config BRCMSTB_DPFE
- bool "Broadcom STB DPFE driver" if COMPILE_TEST
- default y if ARCH_BRCMSTB
+ tristate "Broadcom STB DPFE driver"
+ default ARCH_BRCMSTB
depends on ARCH_BRCMSTB || COMPILE_TEST
help
This driver provides access to the DPFE interface of Broadcom
@@ -210,6 +210,7 @@ config RENESAS_RPCIF
tristate "Renesas RPC-IF driver"
depends on ARCH_RENESAS || COMPILE_TEST
select REGMAP_MMIO
+ select RESET_CONTROLLER
help
This supports Renesas R-Car Gen3 or RZ/G2 RPC-IF which provides
either SPI host or HyperFlash. You'll have to select individual
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index d062c2f8250f..75a8c38df939 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -263,7 +263,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
if (ret < 0)
- goto err;
+ goto err_unmap_nandirq;
init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
@@ -272,7 +272,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_ifc_ctrl_dev->irq);
- goto err_irq;
+ goto err_unmap_nandirq;
}
if (fsl_ifc_ctrl_dev->nand_irq) {
@@ -281,17 +281,16 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_ifc_ctrl_dev->nand_irq);
- goto err_nandirq;
+ goto err_free_irq;
}
}
return 0;
-err_nandirq:
- free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
- irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
-err_irq:
+err_free_irq:
free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+err_unmap_nandirq:
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
err:
iounmap(fsl_ifc_ctrl_dev->gregs);
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
index e59ccbd982d0..6cd508478b14 100644
--- a/drivers/memory/jedec_ddr.h
+++ b/drivers/memory/jedec_ddr.h
@@ -112,6 +112,26 @@
#define NUM_DDR_ADDR_TABLE_ENTRIES 11
#define NUM_DDR_TIMING_TABLE_ENTRIES 4
+#define LPDDR2_MANID_SAMSUNG 1
+#define LPDDR2_MANID_QIMONDA 2
+#define LPDDR2_MANID_ELPIDA 3
+#define LPDDR2_MANID_ETRON 4
+#define LPDDR2_MANID_NANYA 5
+#define LPDDR2_MANID_HYNIX 6
+#define LPDDR2_MANID_MOSEL 7
+#define LPDDR2_MANID_WINBOND 8
+#define LPDDR2_MANID_ESMT 9
+#define LPDDR2_MANID_SPANSION 11
+#define LPDDR2_MANID_SST 12
+#define LPDDR2_MANID_ZMOS 13
+#define LPDDR2_MANID_INTEL 14
+#define LPDDR2_MANID_NUMONYX 254
+#define LPDDR2_MANID_MICRON 255
+
+#define LPDDR2_TYPE_S4 0
+#define LPDDR2_TYPE_S2 1
+#define LPDDR2_TYPE_NVM 2
+
/* Structure for DDR addressing info from the JEDEC spec */
struct lpddr2_addressing {
u32 num_banks;
@@ -170,6 +190,33 @@ extern const struct lpddr2_timings
lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+/* Structure of MR8 */
+union lpddr2_basic_config4 {
+ u32 value;
+
+ struct {
+ unsigned int arch_type : 2;
+ unsigned int density : 4;
+ unsigned int io_width : 2;
+ } __packed;
+};
+
+/*
+ * Structure for information about LPDDR2 chip. All parameters are
+ * matching raw values of standard mode register bitfields or set to
+ * -ENOENT if info unavailable.
+ */
+struct lpddr2_info {
+ int arch_type;
+ int density;
+ int io_width;
+ int manufacturer_id;
+ int revision_id1;
+ int revision_id2;
+};
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id);
+
/*
* Structure for timings for LPDDR3 based on LPDDR2 plus additional fields.
* All parameters are in pico seconds(ps) excluding max_freq, min_freq which
diff --git a/drivers/memory/jedec_ddr_data.c b/drivers/memory/jedec_ddr_data.c
index ed601d813175..2cca4fa188f9 100644
--- a/drivers/memory/jedec_ddr_data.c
+++ b/drivers/memory/jedec_ddr_data.c
@@ -131,3 +131,44 @@ const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
.tFAW = 8
};
EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id)
+{
+ switch (manufacturer_id) {
+ case LPDDR2_MANID_SAMSUNG:
+ return "Samsung";
+ case LPDDR2_MANID_QIMONDA:
+ return "Qimonda";
+ case LPDDR2_MANID_ELPIDA:
+ return "Elpida";
+ case LPDDR2_MANID_ETRON:
+ return "Etron";
+ case LPDDR2_MANID_NANYA:
+ return "Nanya";
+ case LPDDR2_MANID_HYNIX:
+ return "Hynix";
+ case LPDDR2_MANID_MOSEL:
+ return "Mosel";
+ case LPDDR2_MANID_WINBOND:
+ return "Winbond";
+ case LPDDR2_MANID_ESMT:
+ return "ESMT";
+ case LPDDR2_MANID_SPANSION:
+ return "Spansion";
+ case LPDDR2_MANID_SST:
+ return "SST";
+ case LPDDR2_MANID_ZMOS:
+ return "ZMOS";
+ case LPDDR2_MANID_INTEL:
+ return "Intel";
+ case LPDDR2_MANID_NUMONYX:
+ return "Numonyx";
+ case LPDDR2_MANID_MICRON:
+ return "Micron";
+ default:
+ break;
+ }
+
+ return "invalid";
+}
+EXPORT_SYMBOL_GPL(lpddr2_jedec_manufacturer);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index c5fb51f73b34..b883dcc0bbfa 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -17,13 +17,33 @@
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <dt-bindings/memory/mtk-memory-port.h>
-/* mt8173 */
-#define SMI_LARB_MMU_EN 0xf00
+/* SMI COMMON */
+#define SMI_L1LEN 0x100
-/* mt8167 */
-#define MT8167_SMI_LARB_MMU_EN 0xfc0
+#define SMI_BUS_SEL 0x220
+#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+#define SMI_M4U_TH 0x234
+#define SMI_FIFO_TH1 0x238
+#define SMI_FIFO_TH2 0x23c
+#define SMI_DCM 0x300
+#define SMI_DUMMY 0x444
-/* mt2701 */
+/* SMI LARB */
+#define SMI_LARB_CMD_THRT_CON 0x24
+#define SMI_LARB_THRT_RD_NU_LMT_MSK GENMASK(7, 4)
+#define SMI_LARB_THRT_RD_NU_LMT (5 << 4)
+
+#define SMI_LARB_SW_FLAG 0x40
+#define SMI_LARB_SW_FLAG_1 0x1
+
+#define SMI_LARB_OSTDL_PORT 0x200
+#define SMI_LARB_OSTDL_PORTx(id) (SMI_LARB_OSTDL_PORT + (((id) & 0x1f) << 2))
+
+/* Below are about mmu enable registers, they are different in SoCs */
+/* gen1: mt2701 */
#define REG_SMI_SECUR_CON_BASE 0x5c0
/* every register control 8 port, register offset 0x4 */
@@ -41,99 +61,94 @@
/* mt2701 domain should be set to 3 */
#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
-/* mt2712 */
-#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
-#define F_MMU_EN BIT(0)
-#define BANK_SEL(id) ({ \
+/* gen2: */
+/* mt8167 */
+#define MT8167_SMI_LARB_MMU_EN 0xfc0
+
+/* mt8173 */
+#define MT8173_SMI_LARB_MMU_EN 0xf00
+
+/* general */
+#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
+#define F_MMU_EN BIT(0)
+#define BANK_SEL(id) ({ \
u32 _id = (id) & 0x3; \
(_id << 8 | _id << 10 | _id << 12 | _id << 14); \
})
-/* SMI COMMON */
-#define SMI_BUS_SEL 0x220
-#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
-/* All are MMU0 defaultly. Only specialize mmu1 here. */
-#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+#define SMI_COMMON_INIT_REGS_NR 6
+#define SMI_LARB_PORT_NR_MAX 32
+
+#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
+#define MTK_SMI_FLAG_SW_FLAG BIT(1)
+#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
+
+struct mtk_smi_reg_pair {
+ unsigned int offset;
+ u32 value;
+};
-enum mtk_smi_gen {
+enum mtk_smi_type {
MTK_SMI_GEN1,
- MTK_SMI_GEN2
+ MTK_SMI_GEN2, /* gen2 smi common */
+ MTK_SMI_GEN2_SUB_COMM, /* gen2 smi sub common */
};
+#define MTK_SMI_CLK_NR_MAX 4
+
+/* larbs: Require apb/smi clocks while gals is optional. */
+static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"};
+#define MTK_SMI_LARB_REQ_CLK_NR 2
+#define MTK_SMI_LARB_OPT_CLK_NR 1
+
+/*
+ * common: Require these four clocks in has_gals case. Otherwise, only apb/smi are required.
+ * sub common: Require apb/smi/gals0 clocks in has_gals case. Otherwise, only apb/smi are required.
+ */
+static const char * const mtk_smi_common_clks[] = {"apb", "smi", "gals0", "gals1"};
+#define MTK_SMI_COM_REQ_CLK_NR 2
+#define MTK_SMI_COM_GALS_REQ_CLK_NR MTK_SMI_CLK_NR_MAX
+#define MTK_SMI_SUB_COM_GALS_REQ_CLK_NR 3
+
struct mtk_smi_common_plat {
- enum mtk_smi_gen gen;
- bool has_gals;
- u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+ enum mtk_smi_type type;
+ bool has_gals;
+ u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+
+ const struct mtk_smi_reg_pair *init;
};
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
void (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
- bool has_gals;
+ unsigned int flags_general;
+ const u8 (*ostd)[SMI_LARB_PORT_NR_MAX];
};
struct mtk_smi {
struct device *dev;
- struct clk *clk_apb, *clk_smi;
- struct clk *clk_gals0, *clk_gals1;
+ unsigned int clk_num;
+ struct clk_bulk_data clks[MTK_SMI_CLK_NR_MAX];
struct clk *clk_async; /*only needed by mt2701*/
union {
void __iomem *smi_ao_base; /* only for gen1 */
void __iomem *base; /* only for gen2 */
};
+ struct device *smi_common_dev; /* for sub common */
const struct mtk_smi_common_plat *plat;
};
struct mtk_smi_larb { /* larb: local arbiter */
struct mtk_smi smi;
void __iomem *base;
- struct device *smi_common_dev;
+ struct device *smi_common_dev; /* common or sub-common dev */
const struct mtk_smi_larb_gen *larb_gen;
int larbid;
u32 *mmu;
unsigned char *bank;
};
-static int mtk_smi_clk_enable(const struct mtk_smi *smi)
-{
- int ret;
-
- ret = clk_prepare_enable(smi->clk_apb);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(smi->clk_smi);
- if (ret)
- goto err_disable_apb;
-
- ret = clk_prepare_enable(smi->clk_gals0);
- if (ret)
- goto err_disable_smi;
-
- ret = clk_prepare_enable(smi->clk_gals1);
- if (ret)
- goto err_disable_gals0;
-
- return 0;
-
-err_disable_gals0:
- clk_disable_unprepare(smi->clk_gals0);
-err_disable_smi:
- clk_disable_unprepare(smi->clk_smi);
-err_disable_apb:
- clk_disable_unprepare(smi->clk_apb);
- return ret;
-}
-
-static void mtk_smi_clk_disable(const struct mtk_smi *smi)
-{
- clk_disable_unprepare(smi->clk_gals1);
- clk_disable_unprepare(smi->clk_gals0);
- clk_disable_unprepare(smi->clk_smi);
- clk_disable_unprepare(smi->clk_apb);
-}
-
int mtk_smi_larb_get(struct device *larbdev)
{
int ret = pm_runtime_resume_and_get(larbdev);
@@ -166,36 +181,16 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
-static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
-{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- u32 reg;
- int i;
-
- if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
- return;
-
- for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
- reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
- reg |= F_MMU_EN;
- reg |= BANK_SEL(larb->bank[i]);
- writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
- }
-}
-
-static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+static void
+mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-
- writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+ /* Do nothing as the iommu is always enabled. */
}
-static void mtk_smi_larb_config_port_mt8167(struct device *dev)
-{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-
- writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
-}
+static const struct component_ops mtk_smi_larb_component_ops = {
+ .bind = mtk_smi_larb_bind,
+ .unbind = mtk_smi_larb_unbind,
+};
static void mtk_smi_larb_config_port_gen1(struct device *dev)
{
@@ -228,25 +223,94 @@ static void mtk_smi_larb_config_port_gen1(struct device *dev)
}
}
-static void
-mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
+static void mtk_smi_larb_config_port_mt8167(struct device *dev)
{
- /* Do nothing as the iommu is always enabled. */
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
}
-static const struct component_ops mtk_smi_larb_component_ops = {
- .bind = mtk_smi_larb_bind,
- .unbind = mtk_smi_larb_unbind,
-};
+static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
- /* mt8173 do not need the port in larb */
- .config_port = mtk_smi_larb_config_port_mt8173,
-};
+ writel(*larb->mmu, larb->base + MT8173_SMI_LARB_MMU_EN);
+}
-static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
- /* mt8167 do not need the port in larb */
- .config_port = mtk_smi_larb_config_port_mt8167,
+static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ u32 reg, flags_general = larb->larb_gen->flags_general;
+ const u8 *larbostd = larb->larb_gen->ostd[larb->larbid];
+ int i;
+
+ if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
+ return;
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_THRT_UPDATE)) {
+ reg = readl_relaxed(larb->base + SMI_LARB_CMD_THRT_CON);
+ reg &= ~SMI_LARB_THRT_RD_NU_LMT_MSK;
+ reg |= SMI_LARB_THRT_RD_NU_LMT;
+ writel_relaxed(reg, larb->base + SMI_LARB_CMD_THRT_CON);
+ }
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_SW_FLAG))
+ writel_relaxed(SMI_LARB_SW_FLAG_1, larb->base + SMI_LARB_SW_FLAG);
+
+ for (i = 0; i < SMI_LARB_PORT_NR_MAX && larbostd && !!larbostd[i]; i++)
+ writel_relaxed(larbostd[i], larb->base + SMI_LARB_OSTDL_PORTx(i));
+
+ for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
+ reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
+ reg |= F_MMU_EN;
+ reg |= BANK_SEL(larb->bank[i]);
+ writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
+ }
+}
+
+static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
+ [1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,}, /* ... */
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a,},
+ [5] = {0x06, 0x01, 0x17, 0x06, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x0c, 0x12,},
+ [9] = {0x0a, 0x08, 0x04, 0x06, 0x01, 0x01, 0x10, 0x18, 0x11, 0x0a,
+ 0x08, 0x04, 0x11, 0x06, 0x02, 0x06, 0x01, 0x11, 0x11, 0x06,},
+ [10] = {0x18, 0x08, 0x01, 0x01, 0x20, 0x12, 0x18, 0x06, 0x05, 0x10,
+ 0x08, 0x08, 0x10, 0x08, 0x08, 0x18, 0x0c, 0x09, 0x0b, 0x0d,
+ 0x0d, 0x06, 0x10, 0x10,},
+ [11] = {0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x01, 0x01, 0x01, 0x01,},
+ [12] = {0x09, 0x09, 0x05, 0x05, 0x0c, 0x18, 0x02, 0x02, 0x04, 0x02,},
+ [13] = {0x02, 0x02, 0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x08, 0x01,},
+ [14] = {0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x16, 0x01, 0x16, 0x01,
+ 0x01, 0x02, 0x02, 0x08, 0x02,},
+ [15] = {},
+ [16] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [17] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [18] = {0x12, 0x06, 0x12, 0x06,},
+ [19] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [20] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [21] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [23] = {0x18, 0x01,},
+ [24] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x01,
+ 0x01, 0x01,},
+ [25] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [26] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [27] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [28] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
@@ -269,8 +333,17 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
/* DUMMY | IPU0 | IPU1 | CCU | MDLA */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
+ /* mt8167 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8167,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
+ /* mt8173 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8173,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
- .has_gals = true,
.config_port = mtk_smi_larb_config_port_gen2_general,
.larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
/* IPU0 | IPU1 | CCU */
@@ -280,99 +353,114 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG,
+ .ostd = mtk_smi_larb_mt8195_ostd,
+};
+
static const struct of_device_id mtk_smi_larb_of_ids[] = {
- {
- .compatible = "mediatek,mt8167-smi-larb",
- .data = &mtk_smi_larb_mt8167
- },
- {
- .compatible = "mediatek,mt8173-smi-larb",
- .data = &mtk_smi_larb_mt8173
- },
- {
- .compatible = "mediatek,mt2701-smi-larb",
- .data = &mtk_smi_larb_mt2701
- },
- {
- .compatible = "mediatek,mt2712-smi-larb",
- .data = &mtk_smi_larb_mt2712
- },
- {
- .compatible = "mediatek,mt6779-smi-larb",
- .data = &mtk_smi_larb_mt6779
- },
- {
- .compatible = "mediatek,mt8183-smi-larb",
- .data = &mtk_smi_larb_mt8183
- },
- {
- .compatible = "mediatek,mt8192-smi-larb",
- .data = &mtk_smi_larb_mt8192
- },
+ {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701},
+ {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
+ {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
+ {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
+ {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
+ {.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
+ {.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
};
-static int mtk_smi_larb_probe(struct platform_device *pdev)
+static int mtk_smi_device_link_common(struct device *dev, struct device **com_dev)
{
- struct mtk_smi_larb *larb;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct device_node *smi_node;
- struct platform_device *smi_pdev;
+ struct platform_device *smi_com_pdev;
+ struct device_node *smi_com_node;
+ struct device *smi_com_dev;
struct device_link *link;
- larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
- if (!larb)
- return -ENOMEM;
-
- larb->larb_gen = of_device_get_match_data(dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- larb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(larb->base))
- return PTR_ERR(larb->base);
-
- larb->smi.clk_apb = devm_clk_get(dev, "apb");
- if (IS_ERR(larb->smi.clk_apb))
- return PTR_ERR(larb->smi.clk_apb);
-
- larb->smi.clk_smi = devm_clk_get(dev, "smi");
- if (IS_ERR(larb->smi.clk_smi))
- return PTR_ERR(larb->smi.clk_smi);
-
- if (larb->larb_gen->has_gals) {
- /* The larbs may still haven't gals even if the SoC support.*/
- larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
- if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
- larb->smi.clk_gals0 = NULL;
- else if (IS_ERR(larb->smi.clk_gals0))
- return PTR_ERR(larb->smi.clk_gals0);
- }
- larb->smi.dev = dev;
-
- smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
- if (!smi_node)
+ smi_com_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_com_node)
return -EINVAL;
- smi_pdev = of_find_device_by_node(smi_node);
- of_node_put(smi_node);
- if (smi_pdev) {
- if (!platform_get_drvdata(smi_pdev))
+ smi_com_pdev = of_find_device_by_node(smi_com_node);
+ of_node_put(smi_com_node);
+ if (smi_com_pdev) {
+ /* smi common is the supplier, Make sure it is ready before */
+ if (!platform_get_drvdata(smi_com_pdev))
return -EPROBE_DEFER;
- larb->smi_common_dev = &smi_pdev->dev;
- link = device_link_add(dev, larb->smi_common_dev,
+ smi_com_dev = &smi_com_pdev->dev;
+ link = device_link_add(dev, smi_com_dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link) {
dev_err(dev, "Unable to link smi-common dev\n");
return -ENODEV;
}
+ *com_dev = smi_com_dev;
} else {
dev_err(dev, "Failed to get the smi_common device\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int mtk_smi_dts_clk_init(struct device *dev, struct mtk_smi *smi,
+ const char * const clks[],
+ unsigned int clk_nr_required,
+ unsigned int clk_nr_optional)
+{
+ int i, ret;
+
+ for (i = 0; i < clk_nr_required; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get(dev, clk_nr_required, smi->clks);
+ if (ret)
+ return ret;
+
+ for (i = clk_nr_required; i < clk_nr_required + clk_nr_optional; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get_optional(dev, clk_nr_optional,
+ smi->clks + clk_nr_required);
+ smi->clk_num = clk_nr_required + clk_nr_optional;
+ return ret;
+}
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+ return -ENOMEM;
+
+ larb->larb_gen = of_device_get_match_data(dev);
+ larb->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(larb->base))
+ return PTR_ERR(larb->base);
+
+ ret = mtk_smi_dts_clk_init(dev, &larb->smi, mtk_smi_larb_clks,
+ MTK_SMI_LARB_REQ_CLK_NR, MTK_SMI_LARB_OPT_CLK_NR);
+ if (ret)
+ return ret;
+
+ larb->smi.dev = dev;
+
+ ret = mtk_smi_device_link_common(dev, &larb->smi_common_dev);
+ if (ret < 0)
+ return ret;
pm_runtime_enable(dev);
platform_set_drvdata(pdev, larb);
- return component_add(dev, &mtk_smi_larb_component_ops);
+ ret = component_add(dev, &mtk_smi_larb_component_ops);
+ if (ret)
+ goto err_pm_disable;
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
+ return ret;
}
static int mtk_smi_larb_remove(struct platform_device *pdev)
@@ -391,11 +479,9 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
int ret;
- ret = mtk_smi_clk_enable(&larb->smi);
- if (ret < 0) {
- dev_err(dev, "Failed to enable clock(%d).\n", ret);
+ ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks);
+ if (ret < 0)
return ret;
- }
/* Configure the basic setting for this larb */
larb_gen->config_port(dev);
@@ -407,7 +493,7 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- mtk_smi_clk_disable(&larb->smi);
+ clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks);
return 0;
}
@@ -427,64 +513,75 @@ static struct platform_driver mtk_smi_larb_driver = {
}
};
+static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1LEN, 0xb},
+ {SMI_M4U_TH, 0xe100e10},
+ {SMI_FIFO_TH1, 0x506090a},
+ {SMI_FIFO_TH2, 0x506090a},
+ {SMI_DCM, 0x4f1},
+ {SMI_DUMMY, 0x1},
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
- .gen = MTK_SMI_GEN1,
+ .type = MTK_SMI_GEN1,
};
static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
};
static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
- .gen = MTK_SMI_GEN2,
- .has_gals = true,
- .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
- F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
F_MMU1_LARB(6),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vdo = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(3) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vpp = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_sub_common_mt8195 = {
+ .type = MTK_SMI_GEN2_SUB_COMM,
+ .has_gals = true,
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
- {
- .compatible = "mediatek,mt8173-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt8167-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt2701-smi-common",
- .data = &mtk_smi_common_gen1,
- },
- {
- .compatible = "mediatek,mt2712-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt6779-smi-common",
- .data = &mtk_smi_common_mt6779,
- },
- {
- .compatible = "mediatek,mt8183-smi-common",
- .data = &mtk_smi_common_mt8183,
- },
- {
- .compatible = "mediatek,mt8192-smi-common",
- .data = &mtk_smi_common_mt8192,
- },
+ {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1},
+ {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
+ {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
+ {.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
+ {.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
+ {.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
+ {.compatible = "mediatek,mt8195-smi-sub-common", .data = &mtk_smi_sub_common_mt8195},
{}
};
@@ -492,8 +589,7 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_smi *common;
- struct resource *res;
- int ret;
+ int ret, clk_required = MTK_SMI_COM_REQ_CLK_NR;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -501,23 +597,15 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
common->dev = dev;
common->plat = of_device_get_match_data(dev);
- common->clk_apb = devm_clk_get(dev, "apb");
- if (IS_ERR(common->clk_apb))
- return PTR_ERR(common->clk_apb);
-
- common->clk_smi = devm_clk_get(dev, "smi");
- if (IS_ERR(common->clk_smi))
- return PTR_ERR(common->clk_smi);
-
if (common->plat->has_gals) {
- common->clk_gals0 = devm_clk_get(dev, "gals0");
- if (IS_ERR(common->clk_gals0))
- return PTR_ERR(common->clk_gals0);
-
- common->clk_gals1 = devm_clk_get(dev, "gals1");
- if (IS_ERR(common->clk_gals1))
- return PTR_ERR(common->clk_gals1);
+ if (common->plat->type == MTK_SMI_GEN2)
+ clk_required = MTK_SMI_COM_GALS_REQ_CLK_NR;
+ else if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ clk_required = MTK_SMI_SUB_COM_GALS_REQ_CLK_NR;
}
+ ret = mtk_smi_dts_clk_init(dev, common, mtk_smi_common_clks, clk_required, 0);
+ if (ret)
+ return ret;
/*
* for mtk smi gen 1, we need to get the ao(always on) base to config
@@ -525,9 +613,8 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
* clock into emi clock domain, but for mtk smi gen2, there's no smi ao
* base.
*/
- if (common->plat->gen == MTK_SMI_GEN1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- common->smi_ao_base = devm_ioremap_resource(dev, res);
+ if (common->plat->type == MTK_SMI_GEN1) {
+ common->smi_ao_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->smi_ao_base))
return PTR_ERR(common->smi_ao_base);
@@ -539,11 +626,18 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (ret)
return ret;
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- common->base = devm_ioremap_resource(dev, res);
+ common->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->base))
return PTR_ERR(common->base);
}
+
+ /* link its smi-common if this is smi-sub-common */
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM) {
+ ret = mtk_smi_device_link_common(dev, &common->smi_common_dev);
+ if (ret < 0)
+ return ret;
+ }
+
pm_runtime_enable(dev);
platform_set_drvdata(pdev, common);
return 0;
@@ -551,6 +645,10 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
static int mtk_smi_common_remove(struct platform_device *pdev)
{
+ struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
+
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -558,17 +656,21 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
static int __maybe_unused mtk_smi_common_resume(struct device *dev)
{
struct mtk_smi *common = dev_get_drvdata(dev);
- u32 bus_sel = common->plat->bus_sel;
- int ret;
+ const struct mtk_smi_reg_pair *init = common->plat->init;
+ u32 bus_sel = common->plat->bus_sel; /* default is 0 */
+ int ret, i;
- ret = mtk_smi_clk_enable(common);
- if (ret) {
- dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
+ ret = clk_bulk_prepare_enable(common->clk_num, common->clks);
+ if (ret)
return ret;
- }
- if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
- writel(bus_sel, common->base + SMI_BUS_SEL);
+ if (common->plat->type != MTK_SMI_GEN2)
+ return 0;
+
+ for (i = 0; i < SMI_COMMON_INIT_REGS_NR && init && init[i].offset; i++)
+ writel_relaxed(init[i].value, common->base + init[i].offset);
+
+ writel(bus_sel, common->base + SMI_BUS_SEL);
return 0;
}
@@ -576,7 +678,7 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
{
struct mtk_smi *common = dev_get_drvdata(dev);
- mtk_smi_clk_disable(common);
+ clk_bulk_disable_unprepare(common->clk_num, common->clks);
return 0;
}
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index d9f5437d3bce..b94408954d85 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -298,3 +298,90 @@ default_timings:
return NULL;
}
EXPORT_SYMBOL(of_lpddr3_get_ddr_timings);
+
+/**
+ * of_lpddr2_get_info() - extracts information about the lpddr2 chip.
+ * @np: Pointer to device tree node containing lpddr2 info
+ * @dev: Device requesting info
+ *
+ * Populates lpddr2_info structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If error
+ * happened while populating, returns NULL. If property is missing
+ * in a device-tree, then the corresponding value is set to -ENOENT.
+ */
+const struct lpddr2_info
+*of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ struct lpddr2_info *ret_info, info = {};
+ struct property *prop;
+ const char *cp;
+ int err;
+
+ err = of_property_read_u32(np, "revision-id1", &info.revision_id1);
+ if (err)
+ info.revision_id1 = -ENOENT;
+
+ err = of_property_read_u32(np, "revision-id2", &info.revision_id2);
+ if (err)
+ info.revision_id2 = -ENOENT;
+
+ err = of_property_read_u32(np, "io-width", &info.io_width);
+ if (err)
+ return NULL;
+
+ info.io_width = 32 / info.io_width - 1;
+
+ err = of_property_read_u32(np, "density", &info.density);
+ if (err)
+ return NULL;
+
+ info.density = ffs(info.density) - 7;
+
+ if (of_device_is_compatible(np, "jedec,lpddr2-s4"))
+ info.arch_type = LPDDR2_TYPE_S4;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-s2"))
+ info.arch_type = LPDDR2_TYPE_S2;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-nvm"))
+ info.arch_type = LPDDR2_TYPE_NVM;
+ else
+ return NULL;
+
+ prop = of_find_property(np, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp)) {
+
+#define OF_LPDDR2_VENDOR_CMP(compat, ID) \
+ if (!of_compat_cmp(cp, compat ",", strlen(compat ","))) { \
+ info.manufacturer_id = LPDDR2_MANID_##ID; \
+ break; \
+ }
+
+ OF_LPDDR2_VENDOR_CMP("samsung", SAMSUNG)
+ OF_LPDDR2_VENDOR_CMP("qimonda", QIMONDA)
+ OF_LPDDR2_VENDOR_CMP("elpida", ELPIDA)
+ OF_LPDDR2_VENDOR_CMP("etron", ETRON)
+ OF_LPDDR2_VENDOR_CMP("nanya", NANYA)
+ OF_LPDDR2_VENDOR_CMP("hynix", HYNIX)
+ OF_LPDDR2_VENDOR_CMP("mosel", MOSEL)
+ OF_LPDDR2_VENDOR_CMP("winbond", WINBOND)
+ OF_LPDDR2_VENDOR_CMP("esmt", ESMT)
+ OF_LPDDR2_VENDOR_CMP("spansion", SPANSION)
+ OF_LPDDR2_VENDOR_CMP("sst", SST)
+ OF_LPDDR2_VENDOR_CMP("zmos", ZMOS)
+ OF_LPDDR2_VENDOR_CMP("intel", INTEL)
+ OF_LPDDR2_VENDOR_CMP("numonyx", NUMONYX)
+ OF_LPDDR2_VENDOR_CMP("micron", MICRON)
+
+#undef OF_LPDDR2_VENDOR_CMP
+ }
+
+ if (!info.manufacturer_id)
+ info.manufacturer_id = -ENOENT;
+
+ ret_info = devm_kzalloc(dev, sizeof(*ret_info), GFP_KERNEL);
+ if (ret_info)
+ *ret_info = info;
+
+ return ret_info;
+}
+EXPORT_SYMBOL(of_lpddr2_get_info);
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index 4a99b232ab0a..1c4e47fede8a 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -20,6 +20,9 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
const struct lpddr3_timings *
of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
struct device *dev, u32 device_type, u32 *nr_frequencies);
+
+const struct lpddr2_info *of_lpddr2_get_info(struct device_node *np,
+ struct device *dev);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
@@ -46,6 +49,12 @@ static inline const struct lpddr3_timings
{
return NULL;
}
+
+static inline const struct lpddr2_info
+ *of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
#endif /* CONFIG_OF && CONFIG_DDR */
#endif /* __LINUX_MEMORY_OF_REG_ */
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 45eed659b0c6..7435baad0007 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -160,10 +160,61 @@ static const struct regmap_access_table rpcif_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
};
+
+/*
+ * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
+ * with proper width. Requires SMENR_SPIDE to be correctly set before!
+ */
+static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rpcif *rpc = context;
+
+ if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
+ u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
+
+ if (spide == 0x8) {
+ *val = readb(rpc->base + reg);
+ return 0;
+ } else if (spide == 0xC) {
+ *val = readw(rpc->base + reg);
+ return 0;
+ } else if (spide != 0xF) {
+ return -EILSEQ;
+ }
+ }
+
+ *val = readl(rpc->base + reg);
+ return 0;
+}
+
+static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rpcif *rpc = context;
+
+ if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
+ u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
+
+ if (spide == 0x8) {
+ writeb(val, rpc->base + reg);
+ return 0;
+ } else if (spide == 0xC) {
+ writew(val, rpc->base + reg);
+ return 0;
+ } else if (spide != 0xF) {
+ return -EILSEQ;
+ }
+ }
+
+ writel(val, rpc->base + reg);
+ return 0;
+}
+
static const struct regmap_config rpcif_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .reg_read = rpcif_reg_read,
+ .reg_write = rpcif_reg_write,
.fast_io = true,
.max_register = RPCIF_PHYINT,
.volatile_table = &rpcif_volatile_table,
@@ -173,17 +224,15 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- void __iomem *base;
rpc->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ rpc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rpc->base))
+ return PTR_ERR(rpc->base);
- rpc->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &rpcif_regmap_config);
+ rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config);
if (IS_ERR(rpc->regmap)) {
dev_err(&pdev->dev,
"failed to init regmap for rpcif, error %ld\n",
@@ -354,20 +403,16 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
nbytes = op->data.nbytes;
rpc->xferlen = nbytes;
- rpc->enable |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)) |
- RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
+ rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
}
}
EXPORT_SYMBOL(rpcif_prepare);
int rpcif_manual_xfer(struct rpcif *rpc)
{
- u32 smenr, smcr, pos = 0, max = 4;
+ u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
int ret = 0;
- if (rpc->bus_size == 2)
- max = 8;
-
pm_runtime_get_sync(rpc->dev);
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
@@ -378,37 +423,36 @@ int rpcif_manual_xfer(struct rpcif *rpc)
regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option);
regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy);
regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr);
+ regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr);
smenr = rpc->enable;
switch (rpc->dir) {
case RPCIF_DATA_OUT:
while (pos < rpc->xferlen) {
- u32 nbytes = rpc->xferlen - pos;
- u32 data[2];
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2];
smcr = rpc->smcr | RPCIF_SMCR_SPIE;
- if (nbytes > max) {
- nbytes = max;
+
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
+ if (bytes_left > nbytes)
smcr |= RPCIF_SMCR_SSLKP;
- }
+
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
memcpy(data, rpc->buffer + pos, nbytes);
- if (nbytes > 4) {
+ if (nbytes == 8) {
regmap_write(rpc->regmap, RPCIF_SMWDR1,
data[0]);
regmap_write(rpc->regmap, RPCIF_SMWDR0,
data[1]);
- } else if (nbytes > 2) {
+ } else {
regmap_write(rpc->regmap, RPCIF_SMWDR0,
data[0]);
- } else {
- regmap_write(rpc->regmap, RPCIF_SMWDR0,
- data[0] << 16);
}
- regmap_write(rpc->regmap, RPCIF_SMADR,
- rpc->smadr + pos);
- regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
ret = wait_msg_xfer_end(rpc);
if (ret)
@@ -448,14 +492,16 @@ int rpcif_manual_xfer(struct rpcif *rpc)
break;
}
while (pos < rpc->xferlen) {
- u32 nbytes = rpc->xferlen - pos;
- u32 data[2];
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2];
- if (nbytes > max)
- nbytes = max;
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
regmap_write(rpc->regmap, RPCIF_SMADR,
rpc->smadr + pos);
+ smenr &= ~RPCIF_SMENR_SPIDE(0xF);
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR,
rpc->smcr | RPCIF_SMCR_SPIE);
@@ -463,18 +509,14 @@ int rpcif_manual_xfer(struct rpcif *rpc)
if (ret)
goto err_out;
- if (nbytes > 4) {
+ if (nbytes == 8) {
regmap_read(rpc->regmap, RPCIF_SMRDR1,
&data[0]);
regmap_read(rpc->regmap, RPCIF_SMRDR0,
&data[1]);
- } else if (nbytes > 2) {
- regmap_read(rpc->regmap, RPCIF_SMRDR0,
- &data[0]);
- } else {
+ } else {
regmap_read(rpc->regmap, RPCIF_SMRDR0,
&data[0]);
- data[0] >>= 16;
}
memcpy(rpc->buffer + pos, data, nbytes);
@@ -502,6 +544,48 @@ err_out:
}
EXPORT_SYMBOL(rpcif_manual_xfer);
+static void memcpy_fromio_readw(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ const int maxw = (IS_ENABLED(CONFIG_64BIT)) ? 8 : 4;
+ u8 buf[2];
+
+ if (count && ((unsigned long)from & 1)) {
+ *(u16 *)buf = __raw_readw((void __iomem *)((unsigned long)from & ~1));
+ *(u8 *)to = buf[1];
+ from++;
+ to++;
+ count--;
+ }
+ while (count >= 2 && !IS_ALIGNED((unsigned long)from, maxw)) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ while (count >= maxw) {
+#ifdef CONFIG_64BIT
+ *(u64 *)to = __raw_readq(from);
+#else
+ *(u32 *)to = __raw_readl(from);
+#endif
+ from += maxw;
+ to += maxw;
+ count -= maxw;
+ }
+ while (count >= 2) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ if (count) {
+ *(u16 *)buf = __raw_readw(from);
+ *(u8 *)to = buf[0];
+ }
+}
+
ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
{
loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1);
@@ -523,7 +607,10 @@ ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
- memcpy_fromio(buf, rpc->dirmap + from, len);
+ if (rpc->bus_size == 2)
+ memcpy_fromio_readw(buf, rpc->dirmap + from, len);
+ else
+ memcpy_fromio(buf, rpc->dirmap + from, len);
pm_runtime_put(rpc->dev);
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 8e240f078afc..7fb70f573031 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -14,11 +14,12 @@ config EXYNOS5422_DMC
depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
depends on (PM_DEVFREQ && PM_DEVFREQ_EVENT)
help
- This adds driver for Exynos5422 DMC (Dynamic Memory Controller).
- The driver provides support for Dynamic Voltage and Frequency Scaling in
- DMC and DRAM. It also supports changing timings of DRAM running with
- different frequency. The timings are calculated based on DT memory
- information.
+ This adds driver for Samsung Exynos5422 SoC DMC (Dynamic Memory
+ Controller). The driver provides support for Dynamic Voltage and
+ Frequency Scaling in DMC and DRAM. It also supports changing timings
+ of DRAM running with different frequency. The timings are calculated
+ based on DT memory information.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
@@ -29,6 +30,6 @@ config EXYNOS_SROM
during suspend. If however appropriate device tree configuration
is provided, the driver enables support for external memory
or external devices.
- If unsure, say Y on devices with Samsung Exynos SocS.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
endif
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index f9bae36c03a3..7951764b4efe 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -16,6 +16,7 @@ config TEGRA20_EMC
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ
+ select DDR
help
This driver is for the External Memory Controller (EMC) found on
Tegra20 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 3c5aae7abf35..44b4a4080920 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -87,11 +87,9 @@ struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- err = devm_add_action(dev, tegra_mc_devm_action_put_device, mc);
- if (err) {
- put_device(mc->dev);
+ err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
+ if (err)
return ERR_PTR(err);
- }
return mc;
}
@@ -706,15 +704,6 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
goto remove_nodes;
}
- /*
- * MC driver is registered too early, so early that generic driver
- * syncing doesn't work for the MC. But it doesn't really matter
- * since syncing works for the EMC drivers, hence we can sync the
- * MC driver by ourselves and then EMC will complete syncing of
- * the whole ICC state.
- */
- icc_sync_state(mc->dev);
-
return 0;
remove_nodes:
@@ -835,6 +824,15 @@ static int __maybe_unused tegra_mc_resume(struct device *dev)
return 0;
}
+static void tegra_mc_sync_state(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ /* check whether ICC provider is registered */
+ if (mc->provider.dev == dev)
+ icc_sync_state(dev);
+}
+
static const struct dev_pm_ops tegra_mc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
};
@@ -845,6 +843,7 @@ static struct platform_driver tegra_mc_driver = {
.of_match_table = tegra_mc_of_match,
.pm = &tegra_mc_pm_ops,
.suppress_bind_attrs = true,
+ .sync_state = tegra_mc_sync_state,
},
.prevent_deferred_probe = true,
.probe = tegra_mc_probe,
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index d65e7c2a580b..746c4ef2c0af 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -197,6 +197,11 @@ static int tegra186_emc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
goto put_bpmp;
}
+ if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "EMC DVFS MRQ failed: %d (BPMP error code)\n", msg.rx.ret);
+ goto put_bpmp;
+ }
emc->debugfs.min_rate = ULONG_MAX;
emc->debugfs.max_rate = 0;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index c3462dbc8c22..497b6edbf3ca 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -5,6 +5,7 @@
* Author: Dmitry Osipenko <digetx@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -27,11 +28,15 @@
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
#include "mc.h"
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
+#define EMC_ADR_CFG_0 0x010
#define EMC_TIMING_CONTROL 0x028
#define EMC_RC 0x02c
#define EMC_RFC 0x030
@@ -68,6 +73,7 @@
#define EMC_QUSE_EXTRA 0x0ac
#define EMC_ODT_WRITE 0x0b0
#define EMC_ODT_READ 0x0b4
+#define EMC_MRR 0x0ec
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG6 0x114
#define EMC_STAT_CONTROL 0x160
@@ -94,6 +100,7 @@
#define EMC_REFRESH_OVERFLOW_INT BIT(3)
#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
@@ -102,11 +109,25 @@
#define EMC_DBG_CFG_PRIORITY BIT(24)
#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+#define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
#define EMC_PWR_GATHER_CLEAR (1 << 8)
#define EMC_PWR_GATHER_DISABLE (2 << 8)
#define EMC_PWR_GATHER_ENABLE (3 << 8)
+enum emc_dram_type {
+ DRAM_TYPE_RESERVED,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -201,6 +222,14 @@ struct tegra_emc {
struct mutex rate_lock;
struct devfreq_simple_ondemand_data ondemand_data;
+
+ /* memory chip identity information */
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ bool mrr_error;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -397,15 +426,19 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
if (!emc->timings)
return -ENOMEM;
- emc->num_timings = child_count;
timing = emc->timings;
for_each_child_of_node(node, child) {
+ if (of_node_name_eq(child, "lpddr2"))
+ continue;
+
err = load_one_timing_from_dt(emc, timing++, child);
if (err) {
of_node_put(child);
return err;
}
+
+ emc->num_timings++;
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
@@ -422,12 +455,18 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
}
static struct device_node *
-tegra_emc_find_node_by_ram_code(struct device *dev)
+tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
{
+ struct device *dev = emc->dev;
struct device_node *np;
u32 value, ram_code;
int err;
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
if (of_get_child_count(dev->of_node) == 0) {
dev_info_once(dev, "device-tree doesn't have memory timings\n");
return NULL;
@@ -442,8 +481,49 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
np = of_find_node_by_name(np, "emc-tables")) {
err = of_property_read_u32(np, "nvidia,ram-code", &value);
if (err || value != ram_code) {
- of_node_put(np);
- continue;
+ struct device_node *lpddr2_np;
+ bool cfg_mismatches = false;
+
+ lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ if (lpddr2_np) {
+ const struct lpddr2_info *info;
+
+ info = of_lpddr2_get_info(lpddr2_np, dev);
+ if (info) {
+ if (info->manufacturer_id >= 0 &&
+ info->manufacturer_id != emc->manufacturer_id)
+ cfg_mismatches = true;
+
+ if (info->revision_id1 >= 0 &&
+ info->revision_id1 != emc->revision_id1)
+ cfg_mismatches = true;
+
+ if (info->revision_id2 >= 0 &&
+ info->revision_id2 != emc->revision_id2)
+ cfg_mismatches = true;
+
+ if (info->density != emc->basic_conf4.density)
+ cfg_mismatches = true;
+
+ if (info->io_width != emc->basic_conf4.io_width)
+ cfg_mismatches = true;
+
+ if (info->arch_type != emc->basic_conf4.arch_type)
+ cfg_mismatches = true;
+ } else {
+ dev_err(dev, "failed to parse %pOF\n", lpddr2_np);
+ cfg_mismatches = true;
+ }
+
+ of_node_put(lpddr2_np);
+ } else {
+ cfg_mismatches = true;
+ }
+
+ if (cfg_mismatches) {
+ of_node_put(np);
+ continue;
+ }
}
return np;
@@ -455,10 +535,72 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
return NULL;
}
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev + 1;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ bool print_out)
+{
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value);
+
+ if (!print_out)
+ return;
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, emc->manufacturer_id,
+ lpddr2_jedec_manufacturer(emc->manufacturer_id),
+ emc->revision_id1, emc->revision_id2,
+ 4 >> emc->basic_conf4.arch_type,
+ 64 << emc->basic_conf4.density,
+ 32 >> emc->basic_conf4.io_width);
+}
+
static int emc_setup_hw(struct tegra_emc *emc)
{
+ u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg;
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 emc_cfg, emc_dbg, emc_fbio;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -496,7 +638,36 @@ static int emc_setup_hw(struct tegra_emc *emc)
else
emc->dram_bus_width = 32;
- dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+ dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio);
+
+ switch (dram_type) {
+ case DRAM_TYPE_RESERVED:
+ dram_type_str = "INVALID";
+ break;
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n",
+ emc->dram_bus_width, emem_numdev, dram_type_str,
+ emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev,
+ !print_sdram_info_once);
+ print_sdram_info_once = true;
+ }
return 0;
}
@@ -1049,14 +1220,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
- np = tegra_emc_find_node_by_ram_code(&pdev->dev);
- if (np) {
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
- }
-
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -1065,6 +1228,14 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
+ np = tegra_emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
@@ -1117,4 +1288,5 @@ module_platform_driver(tegra_emc_driver);
MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
index 0ebfa8eccf0c..cc76adb8d7e8 100644
--- a/drivers/memory/tegra/tegra210-emc-cc-r21021.c
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -478,7 +478,7 @@ static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
{
u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
- u32 list[] = {
+ static const u32 list[] = {
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index 06c0f17fa429..13584f9317a4 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1662,7 +1662,7 @@ static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
tegra210_emc_debug_min_rate_get,
tegra210_emc_debug_min_rate_set, "%llu\n");
@@ -1692,7 +1692,7 @@ static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
tegra210_emc_debug_max_rate_get,
tegra210_emc_debug_max_rate_set, "%llu\n");
@@ -1723,7 +1723,7 @@ static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
tegra210_emc_debug_temperature_get,
tegra210_emc_debug_temperature_set, "%llu\n");
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 7e21a852f2e1..80f98d717e13 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1289,7 +1289,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
tegra_emc_debug_min_rate_get,
tegra_emc_debug_min_rate_set, "%llu\n");
@@ -1319,7 +1319,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
tegra_emc_debug_max_rate_get,
tegra_emc_debug_max_rate_set, "%llu\n");
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index acf36676e388..0cda6c6baefc 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1736,7 +1736,7 @@ static int msb_init_card(struct memstick_dev *card)
msb->pages_in_block = boot_block->attr.block_size * 2;
msb->block_size = msb->page_size * msb->pages_in_block;
- if (msb->page_size > PAGE_SIZE) {
+ if ((size_t)msb->page_size > PAGE_SIZE) {
/* this isn't supported by linux at all, anyway*/
dbg("device page %d size isn't supported", msb->page_size);
return -EINVAL;
@@ -2156,10 +2156,14 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- device_add_disk(&card->dev, msb->disk, NULL);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
dbg("Disk added");
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 22778d0e24f5..c0450397b673 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1239,10 +1239,14 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- device_add_disk(&card->dev, msb->disk, NULL);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
msb->active = 1;
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f9a93b0565e1..21cb2a786058 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -882,7 +882,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
iounmap(host->addr);
err_out_free:
- kfree(msh);
+ memstick_free_host(msh);
return NULL;
}
@@ -927,8 +927,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
goto err_out_int;
}
- jm = kzalloc(sizeof(struct jmb38x_ms)
- + cnt * sizeof(struct memstick_host *), GFP_KERNEL);
+ jm = kzalloc(struct_size(jm, hosts, cnt), GFP_KERNEL);
if (!jm) {
rc = -ENOMEM;
goto err_out_int;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index e79a0218c492..1d35d147552d 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -838,15 +838,15 @@ static void r592_remove(struct pci_dev *pdev)
}
memstick_remove_host(dev->host);
+ if (dev->dummy_dma_page)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+
free_irq(dev->irq, dev);
iounmap(dev->mmio);
pci_release_regions(pdev);
pci_disable_device(pdev);
memstick_free_host(dev->host);
-
- if (dev->dummy_dma_page)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
- dev->dummy_dma_page_physical_address);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 3261cac762de..acdc257a900e 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -1350,7 +1350,7 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
HWaddr[5] = a[0];
dev->addr_len = FC_ALEN;
- memcpy(dev->dev_addr, HWaddr, FC_ALEN);
+ dev_addr_set(dev, HWaddr);
memset(dev->broadcast, 0xff, FC_ALEN);
/* The Tx queue is 127 deep on the 909.
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index bd869ec5edba..0ee0c6d808c3 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -69,7 +69,8 @@ static int ad_dpot_i2c_probe(struct i2c_client *client,
static int ad_dpot_i2c_remove(struct i2c_client *client)
{
- return ad_dpot_remove(&client->dev);
+ ad_dpot_remove(&client->dev);
+ return 0;
}
static const struct i2c_device_id ad_dpot_id[] = {
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index aea931dd272e..a9e75d80ad36 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -92,7 +92,8 @@ static int ad_dpot_spi_probe(struct spi_device *spi)
static int ad_dpot_spi_remove(struct spi_device *spi)
{
- return ad_dpot_remove(&spi->dev);
+ ad_dpot_remove(&spi->dev);
+ return 0;
}
static const struct spi_device_id ad_dpot_spi_id[] = {
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 5d8f3f6a95f2..756ef6912b5a 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -743,7 +743,7 @@ exit:
}
EXPORT_SYMBOL(ad_dpot_probe);
-int ad_dpot_remove(struct device *dev)
+void ad_dpot_remove(struct device *dev)
{
struct dpot_data *data = dev_get_drvdata(dev);
int i;
@@ -753,8 +753,6 @@ int ad_dpot_remove(struct device *dev)
ad_dpot_remove_files(dev, data->feat, i);
kfree(data);
-
- return 0;
}
EXPORT_SYMBOL(ad_dpot_remove);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index ee8dc9f5a45a..72a9d6801937 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -209,6 +209,6 @@ struct ad_dpot_bus_data {
int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
unsigned long devid, const char *name);
-int ad_dpot_remove(struct device *dev);
+void ad_dpot_remove(struct device *dev);
#endif
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index baf83594a01d..8c72eb590f79 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1536,7 +1536,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)pcidev->revision);
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index f950d0155876..1b010d9267c9 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -426,7 +426,7 @@ static ssize_t components_show(struct device *cdev,
{
struct enclosure_device *edev = to_enclosure_device(cdev);
- return snprintf(buf, 40, "%d\n", edev->components);
+ return sysfs_emit(buf, "%d\n", edev->components);
}
static DEVICE_ATTR_RO(components);
@@ -481,7 +481,7 @@ static ssize_t get_component_fault(struct device *cdev,
if (edev->cb->get_fault)
edev->cb->get_fault(edev, ecomp);
- return snprintf(buf, 40, "%d\n", ecomp->fault);
+ return sysfs_emit(buf, "%d\n", ecomp->fault);
}
static ssize_t set_component_fault(struct device *cdev,
@@ -505,7 +505,7 @@ static ssize_t get_component_status(struct device *cdev,
if (edev->cb->get_status)
edev->cb->get_status(edev, ecomp);
- return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
+ return sysfs_emit(buf, "%s\n", enclosure_status[ecomp->status]);
}
static ssize_t set_component_status(struct device *cdev,
@@ -539,7 +539,7 @@ static ssize_t get_component_active(struct device *cdev,
if (edev->cb->get_active)
edev->cb->get_active(edev, ecomp);
- return snprintf(buf, 40, "%d\n", ecomp->active);
+ return sysfs_emit(buf, "%d\n", ecomp->active);
}
static ssize_t set_component_active(struct device *cdev,
@@ -563,7 +563,7 @@ static ssize_t get_component_locate(struct device *cdev,
if (edev->cb->get_locate)
edev->cb->get_locate(edev, ecomp);
- return snprintf(buf, 40, "%d\n", ecomp->locate);
+ return sysfs_emit(buf, "%d\n", ecomp->locate);
}
static ssize_t set_component_locate(struct device *cdev,
@@ -593,7 +593,7 @@ static ssize_t get_component_power_status(struct device *cdev,
if (ecomp->power_status == -1)
return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
- return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", ecomp->power_status ? "on" : "off");
}
static ssize_t set_component_power_status(struct device *cdev,
@@ -623,7 +623,7 @@ static ssize_t get_component_type(struct device *cdev,
{
struct enclosure_component *ecomp = to_enclosure_component(cdev);
- return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
+ return sysfs_emit(buf, "%s\n", enclosure_type[ecomp->type]);
}
static ssize_t get_component_slot(struct device *cdev,
@@ -638,7 +638,7 @@ static ssize_t get_component_slot(struct device *cdev,
else
slot = ecomp->number;
- return snprintf(buf, 40, "%d\n", slot);
+ return sysfs_emit(buf, "%d\n", slot);
}
static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index ad6ced454655..39aca7753719 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -24,7 +24,7 @@
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
-#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
+#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
#define FASTRPC_MAX_CRCLIST 64
@@ -892,15 +892,17 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
for (i = inbufs; i < ctx->nbufs; ++i) {
- void *src = (void *)(uintptr_t)rpra[i].pv;
- void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
- u64 len = rpra[i].len;
+ if (!ctx->maps[i]) {
+ void *src = (void *)(uintptr_t)rpra[i].pv;
+ void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
+ u64 len = rpra[i].len;
- if (!kernel) {
- if (copy_to_user((void __user *)dst, src, len))
- return -EFAULT;
- } else {
- memcpy(dst, src, len);
+ if (!kernel) {
+ if (copy_to_user((void __user *)dst, src, len))
+ return -EFAULT;
+ } else {
+ memcpy(dst, src, len);
+ }
}
}
@@ -1765,3 +1767,4 @@ static void fastrpc_exit(void)
module_exit(fastrpc_exit);
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 039b923d1d60..1167463f26fb 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -233,8 +233,8 @@ static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
struct pci_dev *pci_dev = cd->pci_dev;
for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
- pci_unmap_page(pci_dev, dma_list[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pci_dev->dev, dma_list[i], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
dma_list[i] = 0x0;
}
}
@@ -251,12 +251,12 @@ static int genwqe_map_pages(struct genwqe_dev *cd,
dma_addr_t daddr;
dma_list[i] = 0x0;
- daddr = pci_map_page(pci_dev, page_list[i],
+ daddr = dma_map_page(&pci_dev->dev, page_list[i],
0, /* map_offs */
PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
+ DMA_BIDIRECTIONAL); /* FIXME rd/rw */
- if (pci_dma_mapping_error(pci_dev, daddr)) {
+ if (dma_mapping_error(&pci_dev->dev, daddr)) {
dev_err(&pci_dev->dev,
"[%s] err: no dma addr daddr=%016llx!\n",
__func__, (long long)daddr);
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
index 293d79811372..861c81006c6d 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/misc/habanalabs/Kconfig
@@ -8,6 +8,8 @@ config HABANA_AI
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
+ select DMA_SHARED_BUFFER
+ select CRC32
help
Enables PCIe card driver for Habana's AI Processors (AIP) that are
designed to accelerate Deep Learning inference and training workloads.
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 6ebe3c7001ff..82c3824cad00 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \
- common/state_dump.o
+ common/state_dump.o common/hwmgr.o
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 6dafff375f1c..4c8000fd246c 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -143,6 +143,7 @@ static void hl_fence_init(struct hl_fence *fence, u64 sequence)
fence->cs_sequence = sequence;
fence->error = 0;
fence->timestamp = ktime_set(0, 0);
+ fence->mcs_handling_done = false;
init_completion(&fence->completion);
}
@@ -431,11 +432,10 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
/* Don't cancel TDR in case this CS was timedout because we might be
* running from the TDR context
*/
- if (cs && (cs->timedout ||
- hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
+ if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
return;
- if (cs && cs->tdr_active)
+ if (cs->tdr_active)
cancel_delayed_work_sync(&cs->work_tdr);
spin_lock(&hdev->cs_mirror_lock);
@@ -536,10 +536,21 @@ static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
mcs_compl->timestamp =
ktime_to_ns(fence->timestamp);
complete_all(&mcs_compl->completion);
+
+ /*
+ * Setting mcs_handling_done inside the lock ensures
+ * at least one fence have mcs_handling_done set to
+ * true before wait for mcs finish. This ensures at
+ * least one CS will be set as completed when polling
+ * mcs fences.
+ */
+ fence->mcs_handling_done = true;
}
spin_unlock(&mcs_compl->lock);
}
+ /* In case CS completed without mcs completion initialized */
+ fence->mcs_handling_done = true;
}
static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
@@ -2371,32 +2382,48 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
break;
}
- mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
-
- if (status == CS_WAIT_STATUS_BUSY)
- continue;
-
- mcs_data->completion_bitmap |= BIT(i);
-
- /*
- * best effort to extract timestamp. few notes:
- * - if even single fence is gone we cannot extract timestamp
- * (as fence not exist anymore)
- * - for all completed CSs we take the earliest timestamp.
- * for this we have to validate that:
- * 1. given timestamp was indeed set
- * 2. the timestamp is earliest of all timestamps so far
- */
+ switch (status) {
+ case CS_WAIT_STATUS_BUSY:
+ /* CS did not finished, keep waiting on its QID*/
+ mcs_data->stream_master_qid_map |=
+ fence->stream_master_qid_map;
+ break;
+ case CS_WAIT_STATUS_COMPLETED:
+ /*
+ * Using mcs_handling_done to avoid possibility of mcs_data
+ * returns to user indicating CS completed before it finished
+ * all of its mcs handling, to avoid race the next time the
+ * user waits for mcs.
+ */
+ if (!fence->mcs_handling_done)
+ break;
- if (status == CS_WAIT_STATUS_GONE) {
+ mcs_data->completion_bitmap |= BIT(i);
+ /*
+ * For all completed CSs we take the earliest timestamp.
+ * For this we have to validate that the timestamp is
+ * earliest of all timestamps so far.
+ */
+ if (mcs_data->update_ts &&
+ (ktime_compare(fence->timestamp, first_cs_time) < 0))
+ first_cs_time = fence->timestamp;
+ break;
+ case CS_WAIT_STATUS_GONE:
mcs_data->update_ts = false;
mcs_data->gone_cs = true;
- } else if (mcs_data->update_ts &&
- (ktime_compare(fence->timestamp,
- ktime_set(0, 0)) > 0) &&
- (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
- first_cs_time = fence->timestamp;
+ /*
+ * It is possible to get an old sequence numbers from user
+ * which related to already completed CSs and their fences
+ * already gone. In this case, CS set as completed but
+ * no need to consider its QID for mcs completion.
+ */
+ mcs_data->completion_bitmap |= BIT(i);
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid fence status\n");
+ return -EINVAL;
}
+
}
hl_fences_put(mcs_data->fence_arr, arr_len);
@@ -2740,13 +2767,14 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
u32 timeout_us, u64 user_address,
- u32 target_value, u16 interrupt_offset,
- enum hl_cs_wait_status *status)
+ u64 target_value, u16 interrupt_offset,
+ enum hl_cs_wait_status *status,
+ u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
struct hl_user_interrupt *interrupt;
unsigned long timeout, flags;
- u32 completion_value;
+ u64 completion_value;
long completion_rc;
int rc = 0;
@@ -2780,15 +2808,17 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
*/
- if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
goto remove_pending_user_interrupt;
}
- if (completion_value >= target_value)
+ if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
- else
+ /* There was no interrupt, we assume the completion is now. */
+ pend->fence.timestamp = ktime_get();
+ } else
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
@@ -2812,7 +2842,7 @@ wait_again:
reinit_completion(&pend->fence.completion);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
- if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
@@ -2839,6 +2869,8 @@ remove_pending_user_interrupt:
list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ *timestamp = ktime_to_ns(pend->fence.timestamp);
+
kfree(pend);
hl_ctx_put(ctx);
@@ -2852,6 +2884,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
struct asic_fixed_properties *prop;
union hl_wait_cs_args *args = data;
enum hl_cs_wait_status status;
+ u64 timestamp;
int rc;
prop = &hdev->asic_prop;
@@ -2881,7 +2914,8 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
args->in.interrupt_timeout_us, args->in.addr,
- args->in.target, interrupt_offset, &status);
+ args->in.target, interrupt_offset, &status,
+ &timestamp);
if (rc) {
if (rc != -EINTR)
@@ -2893,6 +2927,11 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
+ if (timestamp) {
+ args->out.timestamp_nsec = timestamp;
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
+ }
+
switch (status) {
case CS_WAIT_STATUS_COMPLETED:
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 22978303ad63..d0aaccd4df2c 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -181,12 +181,6 @@ out_err:
return rc;
}
-void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
-{
- if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
- return;
-}
-
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
int rc = 0;
@@ -392,7 +386,7 @@ void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id)
- hl_ctx_free(hdev, ctx);
+ kref_put(&ctx->refcount, hl_ctx_do_release);
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->ctx_lock);
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 985f1f3dbd20..1f2a3dc6c4e2 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -1167,6 +1167,45 @@ static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n",
+ jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value)
+ hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1240,6 +1279,12 @@ static const struct file_operations hl_state_dump_fops = {
.write = hl_state_dump_write
};
+static const struct file_operations hl_timeout_locked_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_timeout_locked_read,
+ .write = hl_timeout_locked_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1421,6 +1466,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_state_dump_fops);
+ debugfs_create_file("timeout_locked",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_timeout_locked_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 97c7c86580e6..2022e5d7b3ad 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -69,13 +69,6 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->restore_phase_mutex);
- mutex_lock(&hdev->fpriv_list_lock);
- list_del(&hpriv->dev_node);
- hdev->compute_ctx = NULL;
- mutex_unlock(&hdev->fpriv_list_lock);
-
- kfree(hpriv);
-
if ((!hdev->pldm) && (hdev->pdev) &&
(!hdev->asic_funcs->is_device_idle(hdev,
idle_mask,
@@ -87,9 +80,32 @@ static void hpriv_release(struct kref *ref)
device_is_idle = false;
}
+ /* We need to remove the user from the list to make sure the reset process won't
+ * try to kill the user process. Because, if we got here, it means there are no
+ * more driver/device resources that the user process is occupying so there is
+ * no need to kill it
+ *
+ * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
+ * a race between the release and opening the device again. We don't want to let
+ * a user open the device while there a reset is about to happen.
+ */
+ mutex_lock(&hdev->fpriv_list_lock);
+ list_del(&hpriv->dev_node);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
if ((hdev->reset_if_device_not_idle && !device_is_idle)
|| hdev->reset_upon_device_release)
hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE);
+
+ /* Now we can mark the compute_ctx as empty. Even if a reset is running in a different
+ * thread, we don't care because the in_reset is marked so if a user will try to open
+ * the device it will fail on that, even if compute_ctx is NULL.
+ */
+ mutex_lock(&hdev->fpriv_list_lock);
+ hdev->compute_ctx = NULL;
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ kfree(hpriv);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -530,6 +546,19 @@ static void hl_device_heartbeat(struct work_struct *work)
return;
reschedule:
+ /*
+ * prev_reset_trigger tracks consecutive fatal h/w errors until first
+ * heartbeat immediately post reset.
+ * If control reached here, then at least one heartbeat work has been
+ * scheduled since last reset/init cycle.
+ * So if the device is not already in reset cycle, reset the flag
+ * prev_reset_trigger as no reset occurred with HL_RESET_FW_FATAL_ERR
+ * status for at least one heartbeat. From this point driver restarts
+ * tracking future consecutive fatal errors.
+ */
+ if (!(atomic_read(&hdev->in_reset)))
+ hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
@@ -909,6 +938,65 @@ static void device_disable_open_processes(struct hl_device *hdev)
mutex_unlock(&hdev->fpriv_list_lock);
}
+static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
+{
+ u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+
+ /*
+ * 'reset cause' is being updated here, because getting here
+ * means that it's the 1st time and the last time we're here
+ * ('in_reset' makes sure of it). This makes sure that
+ * 'reset_cause' will continue holding its 1st recorded reason!
+ */
+ if (flags & HL_RESET_HEARTBEAT) {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
+ cur_reset_trigger = HL_RESET_HEARTBEAT;
+ } else if (flags & HL_RESET_TDR) {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
+ cur_reset_trigger = HL_RESET_TDR;
+ } else if (flags & HL_RESET_FW_FATAL_ERR) {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ cur_reset_trigger = HL_RESET_FW_FATAL_ERR;
+ } else {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ }
+
+ /*
+ * If reset cause is same twice, then reset_trigger_repeated
+ * is set and if this reset is due to a fatal FW error
+ * device is set to an unstable state.
+ */
+ if (hdev->prev_reset_trigger != cur_reset_trigger) {
+ hdev->prev_reset_trigger = cur_reset_trigger;
+ hdev->reset_trigger_repeated = 0;
+ } else {
+ hdev->reset_trigger_repeated = 1;
+ }
+
+ /* If reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it.
+ *
+ * If F/W is performing the reset, no need to send it a message to disable
+ * PCI access
+ */
+ if ((flags & HL_RESET_HARD) &&
+ !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
+ /* Disable PCI access from device F/W so he won't send
+ * us additional interrupts. We disable MSI/MSI-X at
+ * the halt_engines function and we can't have the F/W
+ * sending us interrupts after that. We need to disable
+ * the access here because if the device is marked
+ * disable, the message won't be send. Also, in case
+ * of heartbeat, the device CPU is marked as disable
+ * so this message won't be sent
+ */
+ if (hl_fw_send_pci_access_msg(hdev,
+ CPUCP_PACKET_DISABLE_PCI_ACCESS))
+ dev_warn(hdev->dev,
+ "Failed to disable PCI access by F/W\n");
+ }
+}
+
/*
* hl_device_reset - reset the device
*
@@ -954,7 +1042,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
goto do_reset;
}
- if (!hard_reset && !hdev->allow_external_soft_reset) {
+ if (!hard_reset && !hdev->allow_inference_soft_reset) {
hard_instead_soft = true;
hard_reset = true;
}
@@ -978,47 +1066,21 @@ do_reset:
if (rc)
return 0;
- /*
- * 'reset cause' is being updated here, because getting here
- * means that it's the 1st time and the last time we're here
- * ('in_reset' makes sure of it). This makes sure that
- * 'reset_cause' will continue holding its 1st recorded reason!
- */
- if (flags & HL_RESET_HEARTBEAT)
- hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
- else if (flags & HL_RESET_TDR)
- hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
- else
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
-
- /* If reset is due to heartbeat, device CPU is no responsive in
- * which case no point sending PCI disable message to it.
- *
- * If F/W is performing the reset, no need to send it a message to disable
- * PCI access
- */
- if (hard_reset && !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
- /* Disable PCI access from device F/W so he won't send
- * us additional interrupts. We disable MSI/MSI-X at
- * the halt_engines function and we can't have the F/W
- * sending us interrupts after that. We need to disable
- * the access here because if the device is marked
- * disable, the message won't be send. Also, in case
- * of heartbeat, the device CPU is marked as disable
- * so this message won't be sent
- */
- if (hl_fw_send_pci_access_msg(hdev,
- CPUCP_PACKET_DISABLE_PCI_ACCESS))
- dev_warn(hdev->dev,
- "Failed to disable PCI access by F/W\n");
- }
+ handle_reset_trigger(hdev, flags);
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
take_release_locks(hdev);
- dev_err(hdev->dev, "Going to RESET device!\n");
+ if (hard_reset)
+ dev_info(hdev->dev, "Going to reset device\n");
+ else if (flags & HL_RESET_DEVICE_RELEASE)
+ dev_info(hdev->dev,
+ "Going to reset device after it was released by user\n");
+ else
+ dev_info(hdev->dev,
+ "Going to reset compute engines of inference device\n");
}
again:
@@ -1108,6 +1170,17 @@ kill_processes:
hdev->device_cpu_disabled = false;
hdev->hard_reset_pending = false;
+ if (hdev->reset_trigger_repeated &&
+ (hdev->prev_reset_trigger == HL_RESET_FW_FATAL_ERR)) {
+ /* if there 2 back to back resets from FW,
+ * ensure driver puts the driver in a unusable state
+ */
+ dev_crit(hdev->dev,
+ "Consecutive FW fatal errors received, stopping hard reset\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
if (hdev->kernel_ctx) {
dev_crit(hdev->dev,
"kernel ctx was alive during hard reset, something is terribly wrong\n");
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 8d2568c63f19..4e68fb9d2a6b 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -2162,18 +2162,17 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
}
/**
- * hl_fw_dynamic_report_reset_cause - send a COMMS message with the cause
- * of the newly triggered hard reset
+ * hl_fw_dynamic_send_msg - send a COMMS message with attached data
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
- * @reset_cause: enumerated cause for the recent hard reset
+ * @msg_type: message type
+ * @data: data to be sent
*
* @return 0 on success, otherwise non-zero error code
*/
-static int hl_fw_dynamic_report_reset_cause(struct hl_device *hdev,
- struct fw_load_mgr *fw_loader,
- enum comms_reset_cause reset_cause)
+static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
{
struct lkd_msg_comms msg;
int rc;
@@ -2181,11 +2180,20 @@ static int hl_fw_dynamic_report_reset_cause(struct hl_device *hdev,
memset(&msg, 0, sizeof(msg));
/* create message to be sent */
- msg.header.type = HL_COMMS_RESET_CAUSE_TYPE;
+ msg.header.type = msg_type;
msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
- msg.reset_cause = reset_cause;
+ switch (msg_type) {
+ case HL_COMMS_RESET_CAUSE_TYPE:
+ msg.reset_cause = *(__u8 *) data;
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Send COMMS message - invalid message type %u\n",
+ msg_type);
+ return -EINVAL;
+ }
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
sizeof(struct lkd_msg_comms));
@@ -2252,8 +2260,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
goto protocol_err;
if (hdev->curr_reset_cause) {
- rc = hl_fw_dynamic_report_reset_cause(hdev, fw_loader,
- hdev->curr_reset_cause);
+ rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
+ HL_COMMS_RESET_CAUSE_TYPE, &hdev->curr_reset_cause);
if (rc)
goto protocol_err;
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index bebebcb163ee..a2002cbf794b 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -26,6 +26,7 @@
#include <linux/sched/signal.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/coresight.h>
+#include <linux/dma-buf.h>
#define HL_NAME "habanalabs"
@@ -68,6 +69,9 @@
#define HL_STATE_DUMP_HIST_LEN 5
+/* Default value for device reset trigger , an invalid value */
+#define HL_RESET_TRIGGER_DEFAULT 0xFF
+
#define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
#define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -132,13 +136,18 @@ enum hl_mmu_page_table_location {
* - HL_RESET_FW
* F/W will perform the reset. No need to ask it to reset the device. This is relevant
* only when running with secured f/w
+ *
+ * - HL_RESET_FW_FATAL_ERR
+ * Set if reset is due to a fatal error from FW
*/
+
#define HL_RESET_HARD (1 << 0)
#define HL_RESET_FROM_RESET_THREAD (1 << 1)
#define HL_RESET_HEARTBEAT (1 << 2)
#define HL_RESET_TDR (1 << 3)
#define HL_RESET_DEVICE_RELEASE (1 << 4)
#define HL_RESET_FW (1 << 5)
+#define HL_RESET_FW_FATAL_ERR (1 << 6)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -447,6 +456,9 @@ struct hl_hints_range {
* for hints validity check.
* device_dma_offset_for_host_access: the offset to add to host DMA addresses
* to enable the device to access them.
+ * @max_freq_value: current max clk frequency.
+ * @clk_pll_index: clock PLL index that specify which PLL determines the clock
+ * we display to the user
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
@@ -543,6 +555,8 @@ struct asic_fixed_properties {
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 device_dma_offset_for_host_access;
+ u64 max_freq_value;
+ u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
@@ -601,6 +615,9 @@ struct asic_fixed_properties {
* masters QIDs that multi cs is waiting on
* @error: mark this fence with error
* @timestamp: timestamp upon completion
+ * @mcs_handling_done: indicates that corresponding command submission has
+ * finished msc handling, this does not mean it was part
+ * of the mcs
*/
struct hl_fence {
struct completion completion;
@@ -609,6 +626,7 @@ struct hl_fence {
u32 stream_master_qid_map;
int error;
ktime_t timestamp;
+ u8 mcs_handling_done;
};
/**
@@ -1353,6 +1371,23 @@ struct hl_cs_counters_atomic {
};
/**
+ * struct hl_dmabuf_priv - a dma-buf private object.
+ * @dmabuf: pointer to dma-buf object.
+ * @ctx: pointer to the dma-buf owner's context.
+ * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
+ * memory allocation handle.
+ * @device_address: physical address of the device's memory. Relevant only
+ * if phys_pg_pack is NULL (dma-buf was exported from address).
+ * The total size can be taken from the dmabuf object.
+ */
+struct hl_dmabuf_priv {
+ struct dma_buf *dmabuf;
+ struct hl_ctx *ctx;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ uint64_t device_address;
+};
+
+/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
@@ -1662,6 +1697,7 @@ struct hl_vm_hw_block_list_node {
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
+ * @exporting_cnt: number of dma-buf exporting.
* @asid: the context related to this list.
* @page_size: size of each page in the pack.
* @flags: HL_MEM_* flags related to this list.
@@ -1676,6 +1712,7 @@ struct hl_vm_phys_pg_pack {
u64 npages;
u64 total_size;
atomic_t mapping_cnt;
+ u32 exporting_cnt;
u32 asid;
u32 page_size;
u32 flags;
@@ -2396,6 +2433,7 @@ struct multi_cs_data {
* the error will be ignored by the driver during
* device initialization. Mainly used to debug and
* workaround firmware bugs
+ * @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
* @last_successful_open_jif: timestamp (jiffies) of the last successful
* device open.
* @last_open_session_duration_jif: duration (jiffies) of the last device open
@@ -2440,8 +2478,12 @@ struct multi_cs_data {
* @collective_mon_idx: helper index for collective initialization
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
- * @allow_external_soft_reset: true if soft reset initiated by user or TDR is
- * allowed.
+ * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
+ * initiated by user or TDR. This is only true
+ * in inference ASICs, as there is no real-world
+ * use-case of doing soft-reset in training (due
+ * to the fact that training runs on multiple
+ * devices)
* @supports_cb_mapping: is mapping a CB to the device's MMU supported.
* @needs_reset: true if reset_on_lockup is false and device should be reset
* due to lockup.
@@ -2452,6 +2494,10 @@ struct multi_cs_data {
* @supports_staged_submission: true if staged submissions are supported
* @curr_reset_cause: saves an enumerated reset cause when a hard reset is
* triggered, and cleared after it is shared with preboot.
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ * with a new value on next reset
+ * @reset_trigger_repeated: set if device reset is triggered more than once with
+ * same cause.
* @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
* complete instead.
* @device_cpu_is_halted: Flag to indicate whether the device CPU was already
@@ -2537,6 +2583,7 @@ struct hl_device {
u64 max_power;
u64 clock_gating_mask;
u64 boot_error_status_mask;
+ u64 dram_pci_bar_start;
u64 last_successful_open_jif;
u64 last_open_session_duration_jif;
u64 open_counter;
@@ -2572,13 +2619,15 @@ struct hl_device {
u8 collective_mon_idx;
u8 supports_coresight;
u8 supports_soft_reset;
- u8 allow_external_soft_reset;
+ u8 allow_inference_soft_reset;
u8 supports_cb_mapping;
u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
u8 supports_staged_submission;
u8 curr_reset_cause;
+ u8 prev_reset_trigger;
+ u8 reset_trigger_repeated;
u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
u8 supports_wait_for_multi_cs;
@@ -2956,6 +3005,15 @@ int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
+int hl_set_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
+int hl_get_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_clk_rate(struct hl_device *hdev,
+ u32 *cur_clk, u32 *max_clk);
+void hl_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
+void hl_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
void hw_sob_get(struct hl_hw_sob *hw_sob);
void hw_sob_put(struct hl_hw_sob *hw_sob);
void hl_encaps_handle_do_release(struct kref *ref);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index a75e4fceb9d8..949d1b5c5c41 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -225,6 +225,17 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
if (!hpriv)
return -ENOMEM;
+ /* Prevent other routines from reading partial hpriv data by
+ * initializing hpriv fields before inserting it to the list
+ */
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ hpriv->is_control = true;
+ nonseekable_open(inode, filp);
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
mutex_lock(&hdev->fpriv_list_lock);
if (!hl_device_operational(hdev, NULL)) {
@@ -238,19 +249,15 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
- hpriv->hdev = hdev;
- filp->private_data = hpriv;
- hpriv->filp = filp;
- hpriv->is_control = true;
- nonseekable_open(inode, filp);
-
- hpriv->taskpid = find_get_pid(current->pid);
-
return 0;
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
+ filp->private_data = NULL;
+ put_pid(hpriv->taskpid);
+
kfree(hpriv);
+
return rc;
}
@@ -339,6 +346,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
set_driver_behavior_per_device(hdev);
hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
if (timeout_locked)
hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/common/hwmgr.c
index 9b60eadd4c35..5451019f143f 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/common/hwmgr.c
@@ -1,29 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2019-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
-#include "gaudiP.h"
-#include "../include/gaudi/gaudi_fw_if.h"
+#include "habanalabs.h"
-void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
+void hl_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
-
- if (freq == PLL_LAST)
- hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
+ hdev->asic_prop.max_freq_value);
}
-int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+int hl_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
{
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -33,7 +30,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -51,15 +48,14 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- struct gaudi_device *gaudi = hdev->asic_specific;
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
- gaudi->max_freq_value = value;
+ hdev->asic_prop.max_freq_value = value;
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
@@ -68,7 +64,6 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
u64 value;
@@ -83,9 +78,10 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
goto fail;
}
- gaudi->max_freq_value = value * 1000 * 1000;
+ hdev->asic_prop.max_freq_value = value * 1000 * 1000;
- hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
+ hdev->asic_prop.max_freq_value);
fail:
return count;
@@ -100,7 +96,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
@@ -108,14 +104,14 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
static DEVICE_ATTR_RW(clk_max_freq_mhz);
static DEVICE_ATTR_RO(clk_cur_freq_mhz);
-static struct attribute *gaudi_dev_attrs[] = {
+static struct attribute *hl_dev_attrs[] = {
&dev_attr_clk_max_freq_mhz.attr,
&dev_attr_clk_cur_freq_mhz.attr,
NULL,
};
-void gaudi_add_device_attr(struct hl_device *hdev,
+void hl_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp)
{
- dev_attr_grp->attrs = gaudi_dev_attrs;
+ dev_attr_grp->attrs = hl_dev_attrs;
}
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index 6b421d76b311..e33f65be8a00 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -113,6 +113,9 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
{
struct hl_device *hdev = dev_get_drvdata(dev);
int rc;
+ u32 cpucp_attr;
+ bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
@@ -121,65 +124,134 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
+ cpucp_attr = cpucp_temp_input;
+ break;
case hwmon_temp_max:
+ cpucp_attr = cpucp_temp_max;
+ break;
case hwmon_temp_crit:
+ cpucp_attr = cpucp_temp_crit;
+ break;
case hwmon_temp_max_hyst:
+ cpucp_attr = cpucp_temp_max_hyst;
+ break;
case hwmon_temp_crit_hyst:
+ cpucp_attr = cpucp_temp_crit_hyst;
+ break;
case hwmon_temp_offset:
+ cpucp_attr = cpucp_temp_offset;
+ break;
case hwmon_temp_highest:
+ cpucp_attr = cpucp_temp_highest;
break;
default:
return -EINVAL;
}
- rc = hl_get_temperature(hdev, channel, attr, val);
+ if (use_cpucp_enum)
+ rc = hl_get_temperature(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_temperature(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_input:
+ cpucp_attr = cpucp_in_input;
+ break;
case hwmon_in_min:
+ cpucp_attr = cpucp_in_min;
+ break;
case hwmon_in_max:
+ cpucp_attr = cpucp_in_max;
+ break;
case hwmon_in_highest:
+ cpucp_attr = cpucp_in_highest;
break;
default:
return -EINVAL;
}
- rc = hl_get_voltage(hdev, channel, attr, val);
+ if (use_cpucp_enum)
+ rc = hl_get_voltage(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_voltage(hdev, channel, attr, val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
+ cpucp_attr = cpucp_curr_input;
+ break;
case hwmon_curr_min:
+ cpucp_attr = cpucp_curr_min;
+ break;
case hwmon_curr_max:
+ cpucp_attr = cpucp_curr_max;
+ break;
case hwmon_curr_highest:
+ cpucp_attr = cpucp_curr_highest;
break;
default:
return -EINVAL;
}
- rc = hl_get_current(hdev, channel, attr, val);
+ if (use_cpucp_enum)
+ rc = hl_get_current(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_current(hdev, channel, attr, val);
break;
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
+ cpucp_attr = cpucp_fan_input;
+ break;
case hwmon_fan_min:
+ cpucp_attr = cpucp_fan_min;
+ break;
case hwmon_fan_max:
+ cpucp_attr = cpucp_fan_max;
break;
default:
return -EINVAL;
}
- rc = hl_get_fan_speed(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ rc = hl_get_fan_speed(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_fan_speed(hdev, channel, attr, val);
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
+ cpucp_attr = cpucp_pwm_input;
+ break;
case hwmon_pwm_enable:
+ cpucp_attr = cpucp_pwm_enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_cpucp_enum)
+ rc = hl_get_pwm_info(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_pwm_info(hdev, channel, attr, val);
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ cpucp_attr = CPUCP_POWER_INPUT;
+ break;
+ case hwmon_power_input_highest:
+ cpucp_attr = CPUCP_POWER_INPUT_HIGHEST;
break;
default:
return -EINVAL;
}
- rc = hl_get_pwm_info(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ rc = hl_get_power(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_power(hdev, channel, attr, val);
break;
default:
return -EINVAL;
@@ -191,6 +263,9 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ u32 cpucp_attr;
+ bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
@@ -199,40 +274,78 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_offset:
+ cpucp_attr = cpucp_temp_offset;
+ break;
case hwmon_temp_reset_history:
+ cpucp_attr = cpucp_temp_reset_history;
break;
default:
return -EINVAL;
}
- hl_set_temperature(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_temperature(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_temperature(hdev, channel, attr, val);
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
+ cpucp_attr = cpucp_pwm_input;
+ break;
case hwmon_pwm_enable:
+ cpucp_attr = cpucp_pwm_enable;
break;
default:
return -EINVAL;
}
- hl_set_pwm_info(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_pwm_info(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_pwm_info(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_reset_history:
+ cpucp_attr = cpucp_in_reset_history;
break;
default:
return -EINVAL;
}
- hl_set_voltage(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_voltage(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_voltage(hdev, channel, attr, val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_reset_history:
+ cpucp_attr = cpucp_curr_reset_history;
break;
default:
return -EINVAL;
}
- hl_set_current(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_current(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_current(hdev, channel, attr, val);
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_reset_history:
+ cpucp_attr = CPUCP_POWER_RESET_INPUT_HISTORY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_cpucp_enum)
+ hl_set_power(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_power(hdev, channel, attr, val);
break;
default:
return -EINVAL;
@@ -296,6 +409,15 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
return 0644;
}
break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ return 0444;
+ case hwmon_power_reset_history:
+ return 0200;
+ }
+ break;
default:
break;
}
@@ -551,6 +673,60 @@ int hl_set_current(struct hl_device *hdev,
return rc;
}
+int hl_set_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct cpucp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set power of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
+int hl_get_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
+{
+ struct cpucp_packet pkt;
+ u64 result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, &result);
+
+ *value = (long) result;
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get power of sensor %d, error %d\n",
+ sensor_index, rc);
+ *value = 0;
+ }
+
+ return rc;
+}
+
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 39b14a933393..96d82b682674 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -141,10 +141,13 @@ static void handle_user_cq(struct hl_device *hdev,
struct hl_user_interrupt *user_cq)
{
struct hl_user_pending_interrupt *pend;
+ ktime_t now = ktime_get();
spin_lock(&user_cq->wait_list_lock);
- list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node)
+ list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) {
+ pend->fence.timestamp = now;
complete_all(&pend->fence.completion);
+ }
spin_unlock(&user_cq->wait_list_lock);
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 33986933aa9e..9bd626a00de3 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -11,6 +11,9 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/pci-p2pdma.h>
+
+MODULE_IMPORT_NS(DMA_BUF);
#define HL_MMU_DEBUG 0
@@ -347,6 +350,12 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
return -EINVAL;
}
+ if (phys_pg_pack->exporting_cnt) {
+ dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
+ spin_unlock(&vm->idr_lock);
+ return -EINVAL;
+ }
+
/*
* must remove from idr before the freeing of the physical
* pages as the refcount of the pool is also the trigger of the
@@ -1487,13 +1496,487 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
return 0;
}
+static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
+ struct device *dev, enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+ int rc;
+
+ addr = dma_map_resource(dev, bar_address, chunk_size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ rc = dma_mapping_error(dev, addr);
+ if (rc)
+ return rc;
+
+ sg_set_page(sg, NULL, chunk_size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = chunk_size;
+
+ return 0;
+}
+
+static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
+ u64 page_size, struct device *dev,
+ enum dma_data_direction dir)
+{
+ u64 chunk_size, bar_address, dma_max_seg_size;
+ struct asic_fixed_properties *prop;
+ int rc, i, j, nents, cur_page;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+
+ prop = &hdev->asic_prop;
+
+ dma_max_seg_size = dma_get_max_seg_size(dev);
+
+ /* We would like to align the max segment size to PAGE_SIZE, so the
+ * SGL will contain aligned addresses that can be easily mapped to
+ * an MMU
+ */
+ dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
+ if (dma_max_seg_size < PAGE_SIZE) {
+ dev_err_ratelimited(hdev->dev,
+ "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
+ dma_max_seg_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ /* If the size of each page is larger than the dma max segment size,
+ * then we can't combine pages and the number of entries in the SGL
+ * will just be the
+ * <number of pages> * <chunks of max segment size in each page>
+ */
+ if (page_size > dma_max_seg_size)
+ nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
+ else
+ /* Get number of non-contiguous chunks */
+ for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
+ if (pages[i - 1] + page_size != pages[i] ||
+ chunk_size + page_size > dma_max_seg_size) {
+ nents++;
+ chunk_size = page_size;
+ continue;
+ }
+
+ chunk_size += page_size;
+ }
+
+ rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
+ if (rc)
+ goto error_free;
+
+ cur_page = 0;
+
+ if (page_size > dma_max_seg_size) {
+ u64 size_left, cur_device_address = 0;
+
+ size_left = page_size;
+
+ /* Need to split each page into the number of chunks of
+ * dma_max_seg_size
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (size_left == page_size)
+ cur_device_address =
+ pages[cur_page] - prop->dram_base_address;
+ else
+ cur_device_address += dma_max_seg_size;
+
+ chunk_size = min(size_left, dma_max_seg_size);
+
+ bar_address = hdev->dram_pci_bar_start + cur_device_address;
+
+ rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
+ if (rc)
+ goto error_unmap;
+
+ if (size_left > dma_max_seg_size) {
+ size_left -= dma_max_seg_size;
+ } else {
+ cur_page++;
+ size_left = page_size;
+ }
+ }
+ } else {
+ /* Merge pages and put them into the scatterlist */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ chunk_size = page_size;
+ for (j = cur_page + 1 ; j < npages ; j++) {
+ if (pages[j - 1] + page_size != pages[j] ||
+ chunk_size + page_size > dma_max_seg_size)
+ break;
+
+ chunk_size += page_size;
+ }
+
+ bar_address = hdev->dram_pci_bar_start +
+ (pages[cur_page] - prop->dram_base_address);
+
+ rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
+ if (rc)
+ goto error_unmap;
+
+ cur_page = j;
+ }
+ }
+
+ /* Because we are not going to include a CPU list we want to have some
+ * chance that other users will detect this by setting the orig_nents
+ * to 0 and using only nents (length of DMA list) when going over the
+ * sgl
+ */
+ sgt->orig_nents = 0;
+
+ return sgt;
+
+error_unmap:
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (!sg_dma_len(sg))
+ continue;
+
+ dma_unmap_resource(dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+
+ sg_free_table(sgt);
+
+error_free:
+ kfree(sgt);
+ return ERR_PTR(rc);
+}
+
+static int hl_dmabuf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev;
+ int rc;
+
+ hl_dmabuf = dmabuf->priv;
+ hdev = hl_dmabuf->ctx->hdev;
+
+ rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true);
+
+ if (rc < 0)
+ attachment->peer2peer = false;
+ return 0;
+}
+
+static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct dma_buf *dma_buf = attachment->dmabuf;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev;
+ struct sg_table *sgt;
+
+ hl_dmabuf = dma_buf->priv;
+ hdev = hl_dmabuf->ctx->hdev;
+ phys_pg_pack = hl_dmabuf->phys_pg_pack;
+
+ if (!attachment->peer2peer) {
+ dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
+ return ERR_PTR(-EPERM);
+ }
+
+ if (phys_pg_pack)
+ sgt = alloc_sgt_from_device_pages(hdev,
+ phys_pg_pack->pages,
+ phys_pg_pack->npages,
+ phys_pg_pack->page_size,
+ attachment->dev,
+ dir);
+ else
+ sgt = alloc_sgt_from_device_pages(hdev,
+ &hl_dmabuf->device_address,
+ 1,
+ hl_dmabuf->dmabuf->size,
+ attachment->dev,
+ dir);
+
+ if (IS_ERR(sgt))
+ dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
+
+ return sgt;
+}
+
+static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
+ * only in the 'device' domain (after all, it maps a PCI bar address which points to the
+ * device memory).
+ *
+ * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
+ * a sync of the memory to the CPU's cache, as it never resided inside that cache.
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ dma_unmap_resource(attachment->dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ /* Need to restore orig_nents because sg_free_table use that field */
+ sgt->orig_nents = sgt->nents;
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void hl_release_dmabuf(struct dma_buf *dmabuf)
+{
+ struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
+ struct hl_ctx *ctx = hl_dmabuf->ctx;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+
+ if (hl_dmabuf->phys_pg_pack) {
+ spin_lock(&vm->idr_lock);
+ hl_dmabuf->phys_pg_pack->exporting_cnt--;
+ spin_unlock(&vm->idr_lock);
+ }
+
+ hl_ctx_put(hl_dmabuf->ctx);
+
+ kfree(hl_dmabuf);
+}
+
+static const struct dma_buf_ops habanalabs_dmabuf_ops = {
+ .attach = hl_dmabuf_attach,
+ .map_dma_buf = hl_map_dmabuf,
+ .unmap_dma_buf = hl_unmap_dmabuf,
+ .release = hl_release_dmabuf,
+};
+
+static int export_dmabuf_common(struct hl_ctx *ctx,
+ struct hl_dmabuf_priv *hl_dmabuf,
+ u64 total_size, int flags, int *dmabuf_fd)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct hl_device *hdev = ctx->hdev;
+ int rc, fd;
+
+ exp_info.ops = &habanalabs_dmabuf_ops;
+ exp_info.size = total_size;
+ exp_info.flags = flags;
+ exp_info.priv = hl_dmabuf;
+
+ hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(hl_dmabuf->dmabuf)) {
+ dev_err(hdev->dev, "failed to export dma-buf\n");
+ return PTR_ERR(hl_dmabuf->dmabuf);
+ }
+
+ fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
+ rc = fd;
+ goto err_dma_buf_put;
+ }
+
+ hl_dmabuf->ctx = ctx;
+ hl_ctx_get(hdev, hl_dmabuf->ctx);
+
+ *dmabuf_fd = fd;
+
+ return 0;
+
+err_dma_buf_put:
+ dma_buf_put(hl_dmabuf->dmabuf);
+ return rc;
+}
+
+/**
+ * export_dmabuf_from_addr() - export a dma-buf object for the given memory
+ * address and size.
+ * @ctx: pointer to the context structure.
+ * @device_addr: device memory physical address.
+ * @size: size of device memory.
+ * @flags: DMA-BUF file/FD flags.
+ * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
+ *
+ * Create and export a dma-buf object for an existing memory allocation inside
+ * the device memory, and return a FD which is associated with the dma-buf
+ * object.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
+ u64 size, int flags, int *dmabuf_fd)
+{
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop;
+ u64 bar_address;
+ int rc;
+
+ prop = &hdev->asic_prop;
+
+ if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
+ dev_dbg(hdev->dev,
+ "exported device memory address 0x%llx should be aligned to 0x%lx\n",
+ device_addr, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (size < PAGE_SIZE) {
+ dev_dbg(hdev->dev,
+ "exported device memory size %llu should be equal to or greater than %lu\n",
+ size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (device_addr < prop->dram_user_base_address ||
+ device_addr + size > prop->dram_end_address ||
+ device_addr + size < device_addr) {
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
+ device_addr, size);
+ return -EINVAL;
+ }
+
+ bar_address = hdev->dram_pci_bar_start +
+ (device_addr - prop->dram_base_address);
+
+ if (bar_address + size >
+ hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
+ bar_address + size < bar_address) {
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
+ device_addr, size);
+ return -EINVAL;
+ }
+
+ hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
+ if (!hl_dmabuf)
+ return -ENOMEM;
+
+ hl_dmabuf->device_address = device_addr;
+
+ rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
+ if (rc)
+ goto err_free_dmabuf_wrapper;
+
+ return 0;
+
+err_free_dmabuf_wrapper:
+ kfree(hl_dmabuf);
+ return rc;
+}
+
+/**
+ * export_dmabuf_from_handle() - export a dma-buf object for the given memory
+ * handle.
+ * @ctx: pointer to the context structure.
+ * @handle: device memory allocation handle.
+ * @flags: DMA-BUF file/FD flags.
+ * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
+ *
+ * Create and export a dma-buf object for an existing memory allocation inside
+ * the device memory, and return a FD which is associated with the dma-buf
+ * object.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
+ int *dmabuf_fd)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop;
+ struct hl_vm *vm = &hdev->vm;
+ u64 bar_address;
+ int rc, i;
+
+ prop = &hdev->asic_prop;
+
+ if (upper_32_bits(handle)) {
+ dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
+ return -EINVAL;
+ }
+
+ spin_lock(&vm->idr_lock);
+
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
+ return -EINVAL;
+ }
+
+ /* increment now to avoid freeing device memory while exporting */
+ phys_pg_pack->exporting_cnt++;
+
+ spin_unlock(&vm->idr_lock);
+
+ if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
+ dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
+ rc = -EINVAL;
+ goto err_dec_exporting_cnt;
+ }
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+
+ bar_address = hdev->dram_pci_bar_start +
+ (phys_pg_pack->pages[i] -
+ prop->dram_base_address);
+
+ if (bar_address + phys_pg_pack->page_size >
+ hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
+ bar_address + phys_pg_pack->page_size < bar_address) {
+
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+
+ rc = -EINVAL;
+ goto err_dec_exporting_cnt;
+ }
+ }
+
+ hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
+ if (!hl_dmabuf) {
+ rc = -ENOMEM;
+ goto err_dec_exporting_cnt;
+ }
+
+ hl_dmabuf->phys_pg_pack = phys_pg_pack;
+
+ rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
+ flags, dmabuf_fd);
+ if (rc)
+ goto err_free_dmabuf_wrapper;
+
+ return 0;
+
+err_free_dmabuf_wrapper:
+ kfree(hl_dmabuf);
+
+err_dec_exporting_cnt:
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack->exporting_cnt--;
+ spin_unlock(&vm->idr_lock);
+
+ return rc;
+}
+
static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
u64 block_handle, device_addr = 0;
u32 handle = 0, block_size;
- int rc;
+ int rc, dmabuf_fd = -EBADF;
switch (args->in.op) {
case HL_MEM_OP_ALLOC:
@@ -1542,6 +2025,16 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
args->out.block_size = block_size;
break;
+ case HL_MEM_OP_EXPORT_DMABUF_FD:
+ rc = export_dmabuf_from_addr(ctx,
+ args->in.export_dmabuf_fd.handle,
+ args->in.export_dmabuf_fd.mem_size,
+ args->in.flags,
+ &dmabuf_fd);
+ memset(args, 0, sizeof(*args));
+ args->out.fd = dmabuf_fd;
+ break;
+
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -ENOTTY;
@@ -1560,7 +2053,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
struct hl_ctx *ctx = hpriv->ctx;
u64 block_handle, device_addr = 0;
u32 handle = 0, block_size;
- int rc;
+ int rc, dmabuf_fd = -EBADF;
if (!hl_device_operational(hdev, &status)) {
dev_warn_ratelimited(hdev->dev,
@@ -1651,6 +2144,22 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
args->out.block_size = block_size;
break;
+ case HL_MEM_OP_EXPORT_DMABUF_FD:
+ if (hdev->asic_prop.dram_supports_virtual_memory)
+ rc = export_dmabuf_from_handle(ctx,
+ args->in.export_dmabuf_fd.handle,
+ args->in.flags,
+ &dmabuf_fd);
+ else
+ rc = export_dmabuf_from_addr(ctx,
+ args->in.export_dmabuf_fd.handle,
+ args->in.export_dmabuf_fd.mem_size,
+ args->in.flags,
+ &dmabuf_fd);
+ memset(args, 0, sizeof(*args));
+ args->out.fd = dmabuf_fd;
+ break;
+
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 792d25b79ea6..aa96917f62e5 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -501,23 +501,25 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
!is_power_of_2(prop->dram_page_size)) {
- unsigned long dram_page_size = prop->dram_page_size;
- u64 page_offset_mask;
- u64 phys_addr_mask;
- u32 bit;
+ u64 dram_page_size, dram_base, abs_phys_addr, abs_virt_addr,
+ page_id, page_start;
+ u32 page_off;
/*
- * find last set bit in page_size to cover all bits of page
- * offset. note that 1 has to be added to bit index.
- * note that the internal ulong variable is used to avoid
- * alignment issue.
+ * Bit arithmetics cannot be used for non power of two page
+ * sizes. In addition, since bit arithmetics is not used,
+ * we cannot ignore dram base. All that shall be considerd.
*/
- bit = find_last_bit(&dram_page_size,
- sizeof(dram_page_size) * BITS_PER_BYTE) + 1;
- page_offset_mask = (BIT_ULL(bit) - 1);
- phys_addr_mask = ~page_offset_mask;
- *phys_addr = (tmp_phys_addr & phys_addr_mask) |
- (virt_addr & page_offset_mask);
+
+ dram_page_size = prop->dram_page_size;
+ dram_base = prop->dram_base_address;
+ abs_phys_addr = tmp_phys_addr - dram_base;
+ abs_virt_addr = virt_addr - dram_base;
+ page_id = DIV_ROUND_DOWN_ULL(abs_phys_addr, dram_page_size);
+ page_start = page_id * dram_page_size;
+ div_u64_rem(abs_virt_addr, dram_page_size, &page_off);
+
+ *phys_addr = page_start + page_off + dram_base;
} else {
/*
* find the correct hop shift field in hl_mmu_properties
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 34f9f2779962..42c1769ad25d 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -206,12 +206,12 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
- if (!hdev->allow_external_soft_reset) {
- dev_err(hdev->dev, "Device does not support soft-reset\n");
+ if (!hdev->allow_inference_soft_reset) {
+ dev_err(hdev->dev, "Device does not support inference soft-reset\n");
goto out;
}
- dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n");
+ dev_warn(hdev->dev, "Inference Soft-Reset requested through sysfs\n");
hl_device_reset(hdev, 0);
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
index c9f4703cff24..10577c33a816 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
+HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_security.o \
gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 14da87b38e83..825737dfe381 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -661,6 +661,9 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+ prop->clk_pll_index = HL_GAUDI_MME_PLL;
+ prop->max_freq_value = GAUDI_MAX_CLK_FREQ;
+
return 0;
}
@@ -795,6 +798,7 @@ static int gaudi_early_init(struct hl_device *hdev)
}
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
+ hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) {
@@ -1837,8 +1841,6 @@ static int gaudi_sw_init(struct hl_device *hdev)
gaudi->cpucp_info_get = gaudi_cpucp_info_get;
- gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ;
-
hdev->asic_specific = gaudi;
/* Create DMA pool for small allocations */
@@ -2616,7 +2618,7 @@ static void gaudi_init_e2e(struct hl_device *hdev)
static void gaudi_init_hbm_cred(struct hl_device *hdev)
{
- uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
+ u32 hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
if (hdev->asic_prop.fw_security_enabled)
return;
@@ -7932,6 +7934,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ u32 fw_fatal_err_flag = 0;
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
bool reset_required;
@@ -7972,6 +7975,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_GIC500:
@@ -7979,6 +7983,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
+ fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
@@ -7989,6 +7994,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
+ fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
@@ -8171,9 +8177,9 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
reset_device:
if (hdev->asic_prop.fw_security_enabled)
- hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW);
+ hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW | fw_fatal_err_flag);
else if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_RESET_HARD | fw_fatal_err_flag);
else
hl_fw_unmask_irq(hdev, event_type);
}
@@ -9439,9 +9445,9 @@ static const struct hl_asic_funcs gaudi_funcs = {
.debugfs_read64 = gaudi_debugfs_read64,
.debugfs_write64 = gaudi_debugfs_write64,
.debugfs_read_dma = gaudi_debugfs_read_dma,
- .add_device_attr = gaudi_add_device_attr,
+ .add_device_attr = hl_add_device_attr,
.handle_eqe = gaudi_handle_eqe,
- .set_pll_profile = gaudi_set_pll_profile,
+ .set_pll_profile = hl_set_pll_profile,
.get_events_stat = gaudi_get_events_stat,
.read_pte = gaudi_read_pte,
.write_pte = gaudi_write_pte,
@@ -9465,7 +9471,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.halt_coresight = gaudi_halt_coresight,
.ctx_init = gaudi_ctx_init,
.ctx_fini = gaudi_ctx_fini,
- .get_clk_rate = gaudi_get_clk_rate,
+ .get_clk_rate = hl_get_clk_rate,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index bbbf1c343e75..f325e36a71e6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -319,7 +319,6 @@ struct gaudi_internal_qman_info {
* the actual number of internal queues because they are not in
* consecutive order.
* @hbm_bar_cur_addr: current address of HBM PCI bar.
- * @max_freq_value: current max clk frequency.
* @events: array that holds all event id's
* @events_stat: array that holds histogram of all received events.
* @events_stat_aggregate: same as events_stat but doesn't get cleared on reset
@@ -345,7 +344,6 @@ struct gaudi_device {
struct gaudi_collective_properties collective_props;
u64 hbm_bar_cur_addr;
- u64 max_freq_value;
u32 events[GAUDI_EVENT_SIZE];
u32 events_stat[GAUDI_EVENT_SIZE];
@@ -359,10 +357,8 @@ void gaudi_init_security(struct hl_device *hdev);
void gaudi_ack_protection_bits_errors(struct hl_device *hdev);
void gaudi_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
-void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
int gaudi_debug_coresight(struct hl_device *hdev, void *data);
void gaudi_halt_coresight(struct hl_device *hdev);
-int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid);
#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 031c1849da14..5536e8c27bd5 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -471,6 +471,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+ prop->clk_pll_index = HL_GOYA_MME_PLL;
+
return 0;
}
@@ -622,6 +624,7 @@ static int goya_early_init(struct hl_device *hdev)
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
+ hdev->dram_pci_bar_start = pci_resource_start(pdev, DDR_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) {
@@ -959,7 +962,7 @@ static int goya_sw_init(struct hl_device *hdev)
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
hdev->supports_soft_reset = true;
- hdev->allow_external_soft_reset = true;
+ hdev->allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
hdev->asic_funcs->set_pci_memory_regions(hdev);
@@ -4829,6 +4832,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
+ goya_print_irq_info(hdev, event_type, false);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, (HL_RESET_HARD |
+ HL_RESET_FW_FATAL_ERR));
+ break;
+
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
@@ -5649,7 +5658,7 @@ static const struct hl_asic_funcs goya_funcs = {
.halt_coresight = goya_halt_coresight,
.ctx_init = goya_ctx_init,
.ctx_fini = goya_ctx_fini,
- .get_clk_rate = goya_get_clk_rate,
+ .get_clk_rate = hl_get_clk_rate,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 0b05da614729..97add7b04f82 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -235,7 +235,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
-int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
u64 goya_get_device_time(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 7d007125727f..59b2624ff81a 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -32,37 +32,6 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
}
}
-int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
-{
- long value;
-
- if (!hl_device_operational(hdev, NULL))
- return -ENODEV;
-
- value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
-
- if (value < 0) {
- dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
- value);
- return value;
- }
-
- *max_clk = (value / 1000 / 1000);
-
- value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
-
- if (value < 0) {
- dev_err(hdev->dev,
- "Failed to retrieve device current clock %ld\n",
- value);
- return value;
- }
-
- *cur_clk = (value / 1000 / 1000);
-
- return 0;
-}
-
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 9ff6a448f0d4..ae13231fda94 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -542,11 +542,14 @@ enum cpucp_packet_rc {
*/
enum cpucp_temp_type {
cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
cpucp_temp_max = 6,
cpucp_temp_max_hyst,
cpucp_temp_crit,
cpucp_temp_crit_hyst,
cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
cpucp_temp_highest = 22,
cpucp_temp_reset_history = 23
};
@@ -555,6 +558,7 @@ enum cpucp_in_attributes {
cpucp_in_input,
cpucp_in_min,
cpucp_in_max,
+ cpucp_in_lowest = 6,
cpucp_in_highest = 7,
cpucp_in_reset_history
};
@@ -563,6 +567,7 @@ enum cpucp_curr_attributes {
cpucp_curr_input,
cpucp_curr_min,
cpucp_curr_max,
+ cpucp_curr_lowest = 6,
cpucp_curr_highest = 7,
cpucp_curr_reset_history
};
@@ -599,6 +604,16 @@ enum cpucp_pll_type_attributes {
};
/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
* MSI type enumeration table for all ASICs and future SW versions.
* For future ASIC-LKD compatibility, we can only add new enumerations.
* at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
@@ -731,6 +746,9 @@ struct cpucp_security_info {
* @pll_map: Bit map of supported PLLs for current ASIC version.
* @mme_binning_mask: MME binning mask,
* (0 = functional, 1 = binned)
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
*/
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -749,7 +767,9 @@ struct cpucp_info {
__le64 reserved3;
__le64 reserved4;
__u8 reserved5;
- __u8 pad[7];
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 pad[5];
struct cpucp_security_info sec_info;
__le32 reserved6;
__u8 pll_map[PLL_MAP_LEN];
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index 3099653234e4..2626df6ef3ef 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -15,6 +15,28 @@
#define VERSION_MAX_LEN 128
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
/*
* CPU error bits in BOOT_ERROR registers
*
@@ -78,25 +100,13 @@
* CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
* should be contacted.
*
- * CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD HALT ACK from ARC0 is not received
- * within specified retries after issuing
- * HALT request. ARC0 appears to be in bad
- * reset.
- *
- * CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD HALT ACK from ARC1 is not received
- * within specified retries after issuing
- * HALT request. ARC1 appears to be in bad
- * reset.
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
*
- * CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD RUN ACK from ARC0 is not received
- * within specified timeout after issuing
- * RUN request. ARC0 appears to be in bad
- * reset.
- *
- * CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD RUN ACK from ARC1 is not received
- * within specified timeout after issuing
- * RUN request. ARC1 appears to be in bad
- * reset.
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
*
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
@@ -104,26 +114,57 @@
* registers. Meaning the error bits are
* not garbage, but actual error statuses.
*/
-#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0)
-#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1)
-#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2)
-#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << 3)
-#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
-#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
-#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
-#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7)
-#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8)
-#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9)
-#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10)
-#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
-#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
-#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
-#define CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD (1 << 14)
-#define CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD (1 << 15)
-#define CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD (1 << 16)
-#define CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD (1 << 17)
-#define CPU_BOOT_ERR0_ENABLED (1 << 31)
-#define CPU_BOOT_ERR1_ENABLED (1 << 31)
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
/*
* BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
@@ -233,7 +274,7 @@
* was not served before.
* Initialized in: linux
*
- * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
* prevent IRQs overriding each other.
* Initialized in: linux
*
@@ -252,6 +293,11 @@
* where a bit is set if the engine is not idle.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -261,34 +307,35 @@
* Initialized in: preboot
*
*/
-#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0)
-#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1)
-#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << 2)
-#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << 3)
-#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << 4)
-#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << 5)
-#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << 6)
-#define CPU_BOOT_DEV_STS0_RL_EN (1 << 7)
-#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << 8)
-#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
-#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
-#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
-#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12)
-#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
-#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14)
-#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
-#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
-#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
-#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << 18)
-#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
-#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << 20)
-#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << 21)
-#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << 22)
-#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << 23)
-#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << 24)
-#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << 25)
-#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
-#define CPU_BOOT_DEV_STS1_ENABLED (1 << 31)
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
enum cpu_boot_status {
CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
@@ -405,6 +452,8 @@ struct cpu_dyn_regs {
enum comms_msg_type {
HL_COMMS_DESC_TYPE = 0,
HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
};
/* TODO: remove this struct after the code is updated to use message */
@@ -464,6 +513,9 @@ struct lkd_fw_comms_msg {
struct {
__u8 reset_cause;
};
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
};
};
@@ -507,8 +559,6 @@ struct lkd_fw_comms_msg {
* COMMS_SKIP_BMC Perform actions required for BMC-less servers.
* Do not wait for BMC response.
*
- * COMMS_LOW_PLL_OPP Initialize PLLs for low OPP.
- *
* COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
* space is allocated in a ELBI access only
* address range.
@@ -524,7 +574,6 @@ enum comms_cmd {
COMMS_RST_DEV = 6,
COMMS_GOTO_WFE = 7,
COMMS_SKIP_BMC = 8,
- COMMS_LOW_PLL_OPP = 9,
COMMS_PREP_DESC_ELBI = 10,
COMMS_INVLD_LAST
};
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
index 34ca4fe50d91..2dba02757d37 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -8,8 +8,6 @@
#ifndef GAUDI_FW_IF_H
#define GAUDI_FW_IF_H
-#include <linux/types.h>
-
#define GAUDI_EVENT_QUEUE_MSI_IDX 8
#define GAUDI_NIC_PORT1_MSI_IDX 10
#define GAUDI_NIC_PORT3_MSI_IDX 12
@@ -78,13 +76,13 @@ struct gaudi_nic_status {
__u32 high_ber_cnt;
};
-struct gaudi_flops_2_data {
+struct gaudi_cold_rst_data {
union {
struct {
- __u32 spsram_init_done : 1;
- __u32 reserved : 31;
+ u32 spsram_init_done : 1;
+ u32 reserved : 31;
};
- __u32 data;
+ __le32 data;
};
};
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index b9bd5a7f71eb..92f25c2ae083 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -33,6 +33,7 @@
#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
+#define mmCOLD_RST_DATA mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2
#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
#endif /* GAUDI_REG_MAP_H_ */
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index 989d7d129469..2165ec35a343 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -34,7 +34,6 @@ struct hisi_hikey_usb {
struct device *dev;
struct gpio_desc *otg_switch;
struct gpio_desc *typec_vbus;
- struct gpio_desc *hub_vbus;
struct gpio_desc *reset;
struct regulator *regulator;
@@ -54,9 +53,6 @@ static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value)
{
int ret, status;
- if (hisi_hikey_usb->hub_vbus)
- gpiod_set_value_cansleep(hisi_hikey_usb->hub_vbus, value);
-
if (!hisi_hikey_usb->regulator)
return;
@@ -147,75 +143,50 @@ static int hub_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role rol
return 0;
}
-static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev,
+static int hisi_hikey_usb_of_role_switch(struct platform_device *pdev,
struct hisi_hikey_usb *hisi_hikey_usb)
{
- struct regulator *regulator;
-
- regulator = devm_regulator_get(&pdev->dev, "hub-vdd");
- if (IS_ERR(regulator)) {
- if (PTR_ERR(regulator) == -EPROBE_DEFER) {
- dev_info(&pdev->dev,
- "waiting for hub-vdd-supply to be probed\n");
- return PTR_ERR(regulator);
- }
- dev_err(&pdev->dev,
- "get hub-vdd-supply failed with error %ld\n",
- PTR_ERR(regulator));
- return PTR_ERR(regulator);
- }
- hisi_hikey_usb->regulator = regulator;
-
- hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio",
- GPIOD_OUT_HIGH);
- return PTR_ERR_OR_ZERO(hisi_hikey_usb->reset);
-}
-
-static int hisi_hikey_usb_probe(struct platform_device *pdev)
-{
struct device *dev = &pdev->dev;
- struct hisi_hikey_usb *hisi_hikey_usb;
struct usb_role_switch_desc hub_role_switch = {NULL};
- int ret;
- hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL);
- if (!hisi_hikey_usb)
- return -ENOMEM;
-
- hisi_hikey_usb->dev = &pdev->dev;
+ if (!device_property_read_bool(dev, "usb-role-switch"))
+ return 0;
hisi_hikey_usb->otg_switch = devm_gpiod_get(dev, "otg-switch",
GPIOD_OUT_HIGH);
- if (IS_ERR(hisi_hikey_usb->otg_switch))
+ if (IS_ERR(hisi_hikey_usb->otg_switch)) {
+ dev_err(dev, "get otg-switch failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->otg_switch));
return PTR_ERR(hisi_hikey_usb->otg_switch);
+ }
hisi_hikey_usb->typec_vbus = devm_gpiod_get(dev, "typec-vbus",
GPIOD_OUT_LOW);
- if (IS_ERR(hisi_hikey_usb->typec_vbus))
+ if (IS_ERR(hisi_hikey_usb->typec_vbus)) {
+ dev_err(dev, "get typec-vbus failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->typec_vbus));
return PTR_ERR(hisi_hikey_usb->typec_vbus);
+ }
- /* Parse Kirin 970-specific OF data */
- if (of_device_is_compatible(pdev->dev.of_node,
- "hisilicon,kirin970_hikey_usbhub")) {
- ret = hisi_hikey_usb_parse_kirin970(pdev, hisi_hikey_usb);
- if (ret)
- return ret;
- } else {
- /* hub-vdd33-en is optional */
- hisi_hikey_usb->hub_vbus = devm_gpiod_get_optional(dev, "hub-vdd33-en",
- GPIOD_OUT_HIGH);
- if (IS_ERR(hisi_hikey_usb->hub_vbus))
- return PTR_ERR(hisi_hikey_usb->hub_vbus);
+ hisi_hikey_usb->reset = devm_gpiod_get_optional(dev,
+ "hub-reset-en",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hisi_hikey_usb->reset)) {
+ dev_err(dev, "get hub-reset-en failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->reset));
+ return PTR_ERR(hisi_hikey_usb->reset);
}
hisi_hikey_usb->dev_role_sw = usb_role_switch_get(dev);
if (!hisi_hikey_usb->dev_role_sw)
return -EPROBE_DEFER;
- if (IS_ERR(hisi_hikey_usb->dev_role_sw))
+ if (IS_ERR(hisi_hikey_usb->dev_role_sw)) {
+ dev_err(dev, "get device role switch failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->dev_role_sw));
return PTR_ERR(hisi_hikey_usb->dev_role_sw);
+ }
INIT_WORK(&hisi_hikey_usb->work, relay_set_role_switch);
- mutex_init(&hisi_hikey_usb->lock);
hub_role_switch.fwnode = dev_fwnode(dev);
hub_role_switch.set = hub_usb_role_switch_set;
@@ -225,10 +196,44 @@ static int hisi_hikey_usb_probe(struct platform_device *pdev)
&hub_role_switch);
if (IS_ERR(hisi_hikey_usb->hub_role_sw)) {
+ dev_err(dev,
+ "failed to register hub role with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->hub_role_sw));
usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
return PTR_ERR(hisi_hikey_usb->hub_role_sw);
}
+ return 0;
+}
+
+static int hisi_hikey_usb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_hikey_usb *hisi_hikey_usb;
+ int ret;
+
+ hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL);
+ if (!hisi_hikey_usb)
+ return -ENOMEM;
+
+ hisi_hikey_usb->dev = &pdev->dev;
+ mutex_init(&hisi_hikey_usb->lock);
+
+ hisi_hikey_usb->regulator = devm_regulator_get(dev, "hub-vdd");
+ if (IS_ERR(hisi_hikey_usb->regulator)) {
+ if (PTR_ERR(hisi_hikey_usb->regulator) == -EPROBE_DEFER) {
+ dev_info(dev, "waiting for hub-vdd-supply\n");
+ return PTR_ERR(hisi_hikey_usb->regulator);
+ }
+ dev_err(dev, "get hub-vdd-supply failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->regulator));
+ return PTR_ERR(hisi_hikey_usb->regulator);
+ }
+
+ ret = hisi_hikey_usb_of_role_switch(pdev, hisi_hikey_usb);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, hisi_hikey_usb);
return 0;
@@ -238,18 +243,20 @@ static int hisi_hikey_usb_remove(struct platform_device *pdev)
{
struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev);
- if (hisi_hikey_usb->hub_role_sw)
+ if (hisi_hikey_usb->hub_role_sw) {
usb_role_switch_unregister(hisi_hikey_usb->hub_role_sw);
- if (hisi_hikey_usb->dev_role_sw)
- usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
+ if (hisi_hikey_usb->dev_role_sw)
+ usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
+ } else {
+ hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
+ }
return 0;
}
static const struct of_device_id id_table_hisi_hikey_usb[] = {
- { .compatible = "hisilicon,gpio_hubv1" },
- { .compatible = "hisilicon,kirin970_hikey_usbhub" },
+ { .compatible = "hisilicon,usbhub" },
{}
};
MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 70c5bb1e6f49..3a7808b796b1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -878,7 +878,7 @@ static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
}
-int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
+void lis3lv02d_remove_fs(struct lis3lv02d *lis3)
{
sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
platform_device_unregister(lis3->pdev);
@@ -894,7 +894,6 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
pm_runtime_set_suspended(lis3->pm_dev);
}
kfree(lis3->reg_cache);
- return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index c394c0b08519..195bd2fd2eb5 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -312,7 +312,7 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3);
void lis3lv02d_joystick_disable(struct lis3lv02d *lis3);
void lis3lv02d_poweroff(struct lis3lv02d *lis3);
int lis3lv02d_poweron(struct lis3lv02d *lis3);
-int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
+void lis3lv02d_remove_fs(struct lis3lv02d *lis3);
int lis3lv02d_init_dt(struct lis3lv02d *lis3);
extern struct lis3lv02d lis3_dev;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index f664ed123730..9e40dfb60742 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -102,7 +102,9 @@ static int lis302dl_spi_remove(struct spi_device *spi)
lis3lv02d_joystick_disable(lis3);
lis3lv02d_poweroff(lis3);
- return lis3lv02d_remove_fs(&lis3_dev);
+ lis3lv02d_remove_fs(&lis3_dev);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 4282b625200f..f4cb94a9aa9c 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -151,6 +151,83 @@ void lkdtm_REPORT_STACK(void)
pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
}
+static pid_t stack_canary_pid;
+static unsigned long stack_canary;
+static unsigned long stack_canary_offset;
+
+static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
+{
+ int i = 0;
+ pid_t pid = task_pid_nr(current);
+ unsigned long *canary = (unsigned long *)stack;
+ unsigned long current_offset = 0, init_offset = 0;
+
+ /* Do our best to find the canary in a 16 word window ... */
+ for (i = 1; i < 16; i++) {
+ canary = (unsigned long *)stack + i;
+#ifdef CONFIG_STACKPROTECTOR
+ if (*canary == current->stack_canary)
+ current_offset = i;
+ if (*canary == init_task.stack_canary)
+ init_offset = i;
+#endif
+ }
+
+ if (current_offset == 0) {
+ /*
+ * If the canary doesn't match what's in the task_struct,
+ * we're either using a global canary or the stack frame
+ * layout changed.
+ */
+ if (init_offset != 0) {
+ pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
+ init_offset, pid);
+ } else {
+ pr_warn("FAIL: did not correctly locate stack canary :(\n");
+ pr_expected_config(CONFIG_STACKPROTECTOR);
+ }
+
+ return;
+ } else if (init_offset != 0) {
+ pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
+ }
+
+ canary = (unsigned long *)stack + current_offset;
+ if (stack_canary_pid == 0) {
+ stack_canary = *canary;
+ stack_canary_pid = pid;
+ stack_canary_offset = current_offset;
+ pr_info("Recorded stack canary for pid %d at offset %ld\n",
+ stack_canary_pid, stack_canary_offset);
+ } else if (pid == stack_canary_pid) {
+ pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
+ } else {
+ if (current_offset != stack_canary_offset) {
+ pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
+ stack_canary_offset, current_offset);
+ return;
+ }
+
+ if (*canary == stack_canary) {
+ pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
+ stack_canary_pid, pid, current_offset);
+ } else {
+ pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
+ stack_canary_pid, pid, current_offset);
+ /* Reset the test. */
+ stack_canary_pid = 0;
+ }
+ }
+}
+
+void lkdtm_REPORT_STACK_CANARY(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8] __aligned(sizeof(void *)) = { };
+
+ __lkdtm_REPORT_STACK_CANARY((void *)&data);
+}
+
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index fe6fd34b8caf..609d9ee2acc0 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -111,6 +111,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(REPORT_STACK),
+ CRASHTYPE(REPORT_STACK_CANARY),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index c212a253edde..d6137c70ebbe 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -69,6 +69,7 @@ void lkdtm_EXHAUST_STACK(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_CORRUPT_STACK_STRONG(void);
void lkdtm_REPORT_STACK(void);
+void lkdtm_REPORT_STACK_CANARY(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index f5fd5b786607..0e0bcd0da852 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -47,3 +47,5 @@ config INTEL_MEI_TXE
Intel Bay Trail
source "drivers/misc/mei/hdcp/Kconfig"
+source "drivers/misc/mei/pxp/Kconfig"
+
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index f1c76f7ee804..d8e5165917f2 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -26,3 +26,4 @@ mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
+obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index aec0483b8e72..fa20d9a27813 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -69,9 +69,9 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto end;
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
new file mode 100644
index 000000000000..4029b96afc04
--- /dev/null
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -0,0 +1,13 @@
+
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020, Intel Corporation. All rights reserved.
+#
+config INTEL_MEI_PXP
+ tristate "Intel PXP services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for PXP Services on Intel platforms.
+
+ Enables the ME FW services required for PXP support through
+ I915 display driver of Intel.
diff --git a/drivers/misc/mei/pxp/Makefile b/drivers/misc/mei/pxp/Makefile
new file mode 100644
index 000000000000..0329950d5794
--- /dev/null
+++ b/drivers/misc/mei/pxp/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2020, Intel Corporation. All rights reserved.
+#
+# Makefile - PXP client driver for Intel MEI Bus Driver.
+
+obj-$(CONFIG_INTEL_MEI_PXP) += mei_pxp.o
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
new file mode 100644
index 000000000000..f7380d387bab
--- /dev/null
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2020 - 2021 Intel Corporation
+ */
+
+/**
+ * DOC: MEI_PXP Client Driver
+ *
+ * The mei_pxp driver acts as a translation layer between PXP
+ * protocol implementer (I915) and ME FW by translating PXP
+ * negotiation messages to ME FW command payloads and vice versa.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/component.h>
+#include <drm/drm_connector.h>
+#include <drm/i915_component.h>
+#include <drm/i915_pxp_tee_interface.h>
+
+#include "mei_pxp.h"
+
+/**
+ * mei_pxp_send_message() - Sends a PXP message to ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @message: a message buffer to send
+ * @size: size of the message
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_pxp_send_message(struct device *dev, const void *message, size_t size)
+{
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !message)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ /* temporary drop const qualifier till the API is fixed */
+ byte = mei_cldev_send(cldev, (u8 *)message, size);
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_pxp_receive_message() - Receives a PXP message from ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @buffer: a message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes sent on Success, <0 on Failure
+ */
+static int
+mei_pxp_receive_message(struct device *dev, void *buffer, size_t size)
+{
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !buffer)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ byte = mei_cldev_recv(cldev, buffer, size);
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ return byte;
+}
+
+static const struct i915_pxp_component_ops mei_pxp_ops = {
+ .owner = THIS_MODULE,
+ .send = mei_pxp_send_message,
+ .recv = mei_pxp_receive_message,
+};
+
+static int mei_component_master_bind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ comp_master->ops = &mei_pxp_ops;
+ comp_master->tee_dev = dev;
+ ret = component_bind_all(dev, comp_master);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_component_master_unbind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+
+ component_unbind_all(dev, comp_master);
+}
+
+static const struct component_master_ops mei_component_master_ops = {
+ .bind = mei_component_master_bind,
+ .unbind = mei_component_master_unbind,
+};
+
+/**
+ * mei_pxp_component_match - compare function for matching mei pxp.
+ *
+ * The function checks if the driver is i915, the subcomponent is PXP
+ * and the grand parent of pxp and the parent of i915 are the same
+ * PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_PXP)
+ * @data: compare data (mei pxp device)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
+static int mei_pxp_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ struct device *base = data;
+
+ if (strcmp(dev->driver->name, "i915") ||
+ subcomponent != I915_COMPONENT_PXP)
+ return 0;
+
+ base = base->parent;
+ if (!base)
+ return 0;
+
+ base = base->parent;
+ dev = dev->parent;
+
+ return (base && dev && dev == base);
+}
+
+static int mei_pxp_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct i915_pxp_component *comp_master;
+ struct component_match *master_match;
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
+ goto enable_err_exit;
+ }
+
+ comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
+ if (!comp_master) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ master_match = NULL;
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_pxp_component_match, &cldev->dev);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mei_cldev_set_drvdata(cldev, comp_master);
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_component_master_ops,
+ master_match);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ mei_cldev_set_drvdata(cldev, NULL);
+ kfree(comp_master);
+ mei_cldev_disable(cldev);
+enable_err_exit:
+ return ret;
+}
+
+static void mei_pxp_remove(struct mei_cl_device *cldev)
+{
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ component_master_del(&cldev->dev, &mei_component_master_ops);
+ kfree(comp_master);
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ ret = mei_cldev_disable(cldev);
+ if (ret)
+ dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
+}
+
+/* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
+#define MEI_GUID_PXP GUID_INIT(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+ 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+
+static struct mei_cl_device_id mei_pxp_tbl[] = {
+ { .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_pxp_tbl);
+
+static struct mei_cl_driver mei_pxp_driver = {
+ .id_table = mei_pxp_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = mei_pxp_probe,
+ .remove = mei_pxp_remove,
+};
+
+module_mei_cl_driver(mei_pxp_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI PXP");
diff --git a/drivers/misc/mei/pxp/mei_pxp.h b/drivers/misc/mei/pxp/mei_pxp.h
new file mode 100644
index 000000000000..e7b15373fefd
--- /dev/null
+++ b/drivers/misc/mei/pxp/mei_pxp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Authors:
+ * Vitaly Lubart <vitaly.lubart@intel.com>
+ */
+
+#ifndef __MEI_PXP_H__
+#define __MEI_PXP_H__
+
+/* me_pxp_status: Enumeration of all PXP Status Codes */
+enum me_pxp_status {
+ ME_PXP_STATUS_SUCCESS = 0x0000,
+
+};
+
+#endif /* __MEI_PXP_H__ */
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index be4016084979..eb97167c03fb 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -24,8 +24,7 @@ MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic-mmio device driver");
MODULE_LICENSE("GPL");
-static ssize_t capability_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -33,14 +32,14 @@ static ssize_t capability_show(struct device *dev,
}
static DEVICE_ATTR_RO(capability);
-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->events);
}
-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -100,7 +99,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
pi->base = base;
pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
- /* initlize capability by RDPT */
+ /* initialize capability by RDPT */
pi->capability &= ioread8(base);
pi->events = pi->capability;
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 741116b3d995..07eddb5ea30f 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -19,16 +19,10 @@
#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
-MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
-static const struct pci_device_id pvpanic_pci_id_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
- {}
-};
-
-static ssize_t capability_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -36,14 +30,14 @@ static ssize_t capability_show(struct device *dev,
}
static DEVICE_ATTR_RO(capability);
-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->events);
}
-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -70,8 +64,7 @@ static struct attribute *pvpanic_pci_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(pvpanic_pci_dev);
-static int pvpanic_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pvpanic_instance *pi;
void __iomem *base;
@@ -99,6 +92,12 @@ static int pvpanic_pci_probe(struct pci_dev *pdev,
return devm_pvpanic_probe(&pdev->dev, pi);
}
+static const struct pci_device_id pvpanic_pci_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
+ {}
+};
+MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
+
static struct pci_driver pvpanic_pci_driver = {
.name = "pvpanic-pci",
.id_table = pvpanic_pci_id_tbl,
@@ -107,7 +106,4 @@ static struct pci_driver pvpanic_pci_driver = {
.dev_groups = pvpanic_pci_dev_groups,
},
};
-
-MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
-
module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index bb7aa6368538..4b8f1c7d726d 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -23,7 +23,7 @@
#include "pvpanic.h"
MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
-MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
static struct list_head pvpanic_list;
@@ -43,8 +43,7 @@ pvpanic_send_event(unsigned int event)
}
static int
-pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
- void *unused)
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused)
{
unsigned int event = PVPANIC_PANICKED;
@@ -58,7 +57,7 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
- .priority = 1, /* let this called before broken drm_fb_helper */
+ .priority = 1, /* let this called before broken drm_fb_helper() */
};
static void pvpanic_remove(void *param)
@@ -96,18 +95,15 @@ static int pvpanic_init(void)
INIT_LIST_HEAD(&pvpanic_list);
spin_lock_init(&pvpanic_lock);
- atomic_notifier_chain_register(&panic_notifier_list,
- &pvpanic_panic_nb);
+ atomic_notifier_chain_register(&panic_notifier_list, &pvpanic_panic_nb);
return 0;
}
+module_init(pvpanic_init);
static void pvpanic_exit(void)
{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &pvpanic_panic_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list, &pvpanic_panic_nb);
}
-
-module_init(pvpanic_init);
module_exit(pvpanic_exit);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 2508f83bdc3f..dab7b92db790 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -514,6 +514,7 @@ static const struct net_device_ops xpnet_netdev_ops = {
static int __init
xpnet_init(void)
{
+ u8 addr[ETH_ALEN];
int result;
if (!is_uv_system())
@@ -545,15 +546,17 @@ xpnet_init(void)
xpnet_device->min_mtu = XPNET_MIN_MTU;
xpnet_device->max_mtu = XPNET_MAX_MTU;
+ memset(addr, 0, sizeof(addr));
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
- xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+ addr[0] = 0x02; /* locally administered, no OUI */
- xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
- xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+ addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
+ addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+ eth_hw_addr_set(xpnet_device, addr);
/*
* ether_setup() sets this to a multicast device. We are
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 228f2eb1d476..017c2f7d6287 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -311,7 +311,7 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
int pci_dev_busy = 0;
int rc;
- rc = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 52656fc87e99..a3098fea3bf7 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -176,8 +176,7 @@ struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
{
struct tifm_adapter *fm;
- fm = kzalloc(sizeof(struct tifm_adapter)
- + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL);
+ fm = kzalloc(struct_size(fm, sockets, num_sockets), GFP_KERNEL);
if (fm) {
fm->dev.class = &tifm_adapter_class;
fm->dev.parent = dev;
@@ -293,14 +292,15 @@ EXPORT_SYMBOL(tifm_has_ms_pif);
int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
int direction)
{
- return pci_map_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
+ return dma_map_sg(&to_pci_dev(sock->dev.parent)->dev, sg, nents,
+ direction);
}
EXPORT_SYMBOL(tifm_map_sg);
void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
int direction)
{
- pci_unmap_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
+ dma_unmap_sg(&to_pci_dev(sock->dev.parent)->dev, sg, nents, direction);
}
EXPORT_SYMBOL(tifm_unmap_sg);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 431af5e8be2f..90e1bcd03b46 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -258,7 +258,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
mq = &md->queue;
/* Dispatch locking to the block layer */
- req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
@@ -266,7 +266,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -646,7 +646,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl() into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -660,7 +660,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_done:
kfree(idata->buf);
@@ -716,7 +716,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl()s into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -733,7 +733,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
for (i = 0; i < num_of_cmds && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_err:
for (i = 0; i < num_of_cmds; i++) {
@@ -2442,9 +2442,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ if (ret)
+ goto err_cleanup_queue;
return md;
+ err_cleanup_queue:
+ blk_cleanup_queue(md->disk->queue);
+ blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
out:
@@ -2730,7 +2735,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
int ret;
/* Ask the block layer about the card status */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
@@ -2740,7 +2745,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
*val = ret;
ret = 0;
}
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
@@ -2766,7 +2771,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
return -ENOMEM;
/* Ask the block layer for the EXT CSD */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
@@ -2775,7 +2780,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (err) {
pr_err("FAILED %d\n", err);
goto out_free;
diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c
index 67557808cada..fec4fbf16a5b 100644
--- a/drivers/mmc/core/crypto.c
+++ b/drivers/mmc/core/crypto.c
@@ -16,13 +16,13 @@ void mmc_crypto_set_initial_state(struct mmc_host *host)
{
/* Reset might clear all keys, so reprogram all the keys. */
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_reprogram_all_keys(&host->ksm);
+ blk_crypto_reprogram_all_keys(&host->crypto_profile);
}
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
{
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_register(&host->ksm, q);
+ blk_crypto_register(&host->crypto_profile, q);
}
EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue);
@@ -30,12 +30,15 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct blk_crypto_keyslot *keyslot;
if (!req->crypt_ctx)
return;
mrq->crypto_ctx = req->crypt_ctx;
- if (req->crypt_keyslot)
- mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
+
+ keyslot = req->crypt_keyslot;
+ if (keyslot)
+ mrq->crypto_key_slot = blk_crypto_keyslot_index(keyslot);
}
EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 29e58ffae379..b1c1716dacf0 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1224,6 +1224,14 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
+ if (host->ops->execute_hs400_tuning) {
+ mmc_retune_disable(host);
+ err = host->ops->execute_hs400_tuning(host, card);
+ mmc_retune_enable(host);
+ if (err)
+ goto out_err;
+ }
+
if (host->ops->hs400_complete)
host->ops->hs400_complete(host);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index ae25ffc2e870..e5e94567a9a9 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -38,7 +38,6 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4646b7a03db6..c9db24e16af1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 05e907451df9..dd2a4b6ab6ad 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -39,24 +39,24 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
int mmc_gpio_alloc(struct mmc_host *host)
{
- struct mmc_gpio *ctx = devm_kzalloc(host->parent,
- sizeof(*ctx), GFP_KERNEL);
-
- if (ctx) {
- ctx->cd_debounce_delay_ms = 200;
- ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s cd", dev_name(host->parent));
- if (!ctx->cd_label)
- return -ENOMEM;
- ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s ro", dev_name(host->parent));
- if (!ctx->ro_label)
- return -ENOMEM;
- host->slot.handler_priv = ctx;
- host->slot.cd_irq = -EINVAL;
- }
+ const char *devname = dev_name(host->parent);
+ struct mmc_gpio *ctx;
+
+ ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->cd_debounce_delay_ms = 200;
+ ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname);
+ if (!ctx->cd_label)
+ return -ENOMEM;
+ ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname);
+ if (!ctx->ro_label)
+ return -ENOMEM;
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
- return ctx ? 0 : -ENOMEM;
+ return 0;
}
int mmc_gpio_get_ro(struct mmc_host *host)
@@ -178,6 +178,10 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->cd_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
@@ -226,6 +230,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->ro_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 95b3511b0560..5af8494c31b5 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -315,15 +315,17 @@ config MMC_SDHCI_TEGRA
If unsure, say N.
config MMC_SDHCI_S3C
- tristate "SDHCI support on Samsung S3C SoC"
+ tristate "SDHCI support on Samsung S3C/S5P/Exynos SoC"
depends on MMC_SDHCI
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
- range of SoC.
+ (S3C2416, S3C2443, S3C6410), S5Pv210 and Exynos (Exynso4210,
+ Exynos4412) SoCs.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface (thereforeyou build for
+ such Samsung SoC), say Y or M here.
If unsure, say N.
@@ -506,7 +508,7 @@ config MMC_OMAP_HS
config MMC_WBSD
tristate "Winbond W83L51xD SD/MMC Card Interface support"
- depends on ISA_DMA_API
+ depends on ISA_DMA_API && !M68K
help
This selects the Winbond(R) W83L51xD Secure digital and
Multimedia card Interface.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 14004cc09aaa..ea36d379bd3c 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \
sdhci-pci-dwc-mshc.o sdhci-pci-gli.o
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 38559a956330..b0d30c35c390 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -282,6 +282,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+
mmc->cqe_on = true;
if (cq_host->ops->enable)
@@ -899,8 +902,8 @@ static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
spin_unlock_irqrestore(&cq_host->lock, flags);
if (timed_out) {
- pr_err("%s: cqhci: timeout for tag %d\n",
- mmc_hostname(mmc), tag);
+ pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
+ mmc_hostname(mmc), tag, cq_host->qcnt);
cqhci_dumpregs(cq_host);
}
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index 6419cfbb4ab7..d5f4b6972f63 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -6,7 +6,7 @@
*/
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <linux/mmc/host.h>
#include "cqhci-crypto.h"
@@ -23,9 +23,10 @@ static const struct cqhci_crypto_alg_entry {
};
static inline struct cqhci_host *
-cqhci_host_from_ksm(struct blk_keyslot_manager *ksm)
+cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm);
+ struct mmc_host *mmc =
+ container_of(profile, struct mmc_host, crypto_profile);
return mmc->cqe_private;
}
@@ -57,12 +58,12 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
return 0;
}
-static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
const union cqhci_crypto_cap_entry *ccap_array =
cq_host->crypto_cap_array;
const struct cqhci_crypto_alg_entry *alg =
@@ -115,11 +116,11 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
return cqhci_crypto_program_key(cq_host, &cfg, slot);
}
-static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
return cqhci_crypto_clear_keyslot(cq_host, slot);
}
@@ -132,7 +133,7 @@ static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
* "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the
* CQHCI_CFG register. But the hardware allows that.
*/
-static const struct blk_ksm_ll_ops cqhci_ksm_ops = {
+static const struct blk_crypto_ll_ops cqhci_crypto_ops = {
.keyslot_program = cqhci_crypto_keyslot_program,
.keyslot_evict = cqhci_crypto_keyslot_evict,
};
@@ -157,8 +158,8 @@ cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap)
*
* If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
* CQHCI_CAP_CS, initialize the crypto support. This involves reading the
- * crypto capability registers, initializing the keyslot manager, clearing all
- * keyslots, and enabling 128-bit task descriptors.
+ * crypto capability registers, initializing the blk_crypto_profile, clearing
+ * all keyslots, and enabling 128-bit task descriptors.
*
* Return: 0 if crypto was initialized or isn't supported; whether
* MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
@@ -168,7 +169,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
- struct blk_keyslot_manager *ksm = &mmc->ksm;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
unsigned int num_keyslots;
unsigned int cap_idx;
enum blk_crypto_mode_num blk_mode_num;
@@ -199,15 +200,15 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
*/
num_keyslots = cq_host->crypto_capabilities.config_count + 1;
- err = devm_blk_ksm_init(dev, ksm, num_keyslots);
+ err = devm_blk_crypto_profile_init(dev, profile, num_keyslots);
if (err)
goto out;
- ksm->ksm_ll_ops = cqhci_ksm_ops;
- ksm->dev = dev;
+ profile->ll_ops = cqhci_crypto_ops;
+ profile->dev = dev;
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
- ksm->max_dun_bytes_supported = 4;
+ profile->max_dun_bytes_supported = 4;
/*
* Cache all the crypto capabilities and advertise the supported crypto
@@ -223,7 +224,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
cq_host->crypto_cap_array[cap_idx]);
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
continue;
- ksm->crypto_modes_supported[blk_mode_num] |=
+ profile->modes_supported[blk_mode_num] |=
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 0c75810812a0..c2dd29ef45c6 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -442,14 +442,14 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
return sample;
}
-static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candidates)
{
const u8 iter = 8;
u8 __c;
s8 i, loc = -1;
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0xc7) == 0xc7) {
loc = i;
goto out;
@@ -457,13 +457,25 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
}
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0x83) == 0x83) {
loc = i;
goto out;
}
}
+ /*
+ * If there is no cadiates value, then it needs to return -EIO.
+ * If there are candidates values and don't find bset clk sample value,
+ * then use a first candidates clock sample value.
+ */
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candidates, i);
+ if ((__c & 0x1) == 0x1) {
+ loc = i;
+ goto out;
+ }
+ }
out:
return loc;
}
@@ -473,7 +485,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
struct mmc_host *mmc = slot->mmc;
- u8 start_smpl, smpl, candiates = 0;
+ u8 start_smpl, smpl, candidates = 0;
s8 found;
int ret = 0;
@@ -484,16 +496,18 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
smpl = dw_mci_exynos_move_next_clksmpl(host);
if (!mmc_send_tuning(mmc, opcode, NULL))
- candiates |= (1 << smpl);
+ candidates |= (1 << smpl);
} while (start_smpl != smpl);
- found = dw_mci_exynos_get_best_clksmpl(candiates);
+ found = dw_mci_exynos_get_best_clksmpl(candidates);
if (found >= 0) {
dw_mci_exynos_set_clksmpl(host, found);
priv->tuned_sample = found;
} else {
ret = -EIO;
+ dev_warn(&mmc->class_dev,
+ "There is no candidates value about clksmpl!\n");
}
return ret;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 380f9aa56eb2..d977f34f6b55 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1611,37 +1611,32 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
usleep_range(200, 300);
}
-static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
{
- struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+ u32 clk_en_a_old;
+ u32 clk_en_a;
/*
* Low power mode will stop the card clock when idle. According to the
* description of the CLKENA register we should disable low power mode
* for SDIO cards if we need SDIO interrupts to work.
*/
- if (mmc->caps & MMC_CAP_SDIO_IRQ) {
- const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
- u32 clk_en_a_old;
- u32 clk_en_a;
- clk_en_a_old = mci_readl(host, CLKENA);
-
- if (card->type == MMC_TYPE_SDIO ||
- card->type == MMC_TYPE_SD_COMBO) {
- set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old & ~clken_low_pwr;
- } else {
- clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old | clken_low_pwr;
- }
+ clk_en_a_old = mci_readl(host, CLKENA);
+ if (prepare) {
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old & ~clken_low_pwr;
+ } else {
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old | clken_low_pwr;
+ }
- if (clk_en_a != clk_en_a_old) {
- mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
- }
+ if (clk_en_a != clk_en_a_old) {
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
+ 0);
}
}
@@ -1669,6 +1664,7 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ dw_mci_prepare_sdio_irq(slot, enb);
__dw_mci_enable_sdio_irq(slot, enb);
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
@@ -1790,7 +1786,6 @@ static const struct mmc_host_ops dw_mci_ops = {
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
- .init_card = dw_mci_init_card,
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
@@ -2086,7 +2081,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if (err != -ETIMEDOUT) {
+ if (err != -ETIMEDOUT &&
+ host->dir_status == DW_MCI_RECV_STATUS) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 3765e2f4ad98..c9cacd4d5b22 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1394,6 +1394,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ /*
+ * This will wake up mmci_irq_thread() which will issue
+ * a hardware reset of the MMCI block.
+ */
host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 6c9d38132f74..16d1c7a43d33 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -566,37 +566,37 @@ static int moxart_probe(struct platform_device *pdev)
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
ret = -ENOMEM;
- goto out;
+ goto out_mmc;
}
ret = of_address_to_resource(node, 0, &res_mmc);
if (ret) {
dev_err(dev, "of_address_to_resource failed\n");
- goto out;
+ goto out_mmc;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
dev_err(dev, "irq_of_parse_and_map failed\n");
ret = -EINVAL;
- goto out;
+ goto out_mmc;
}
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto out;
+ goto out_mmc;
}
reg_mmc = devm_ioremap_resource(dev, &res_mmc);
if (IS_ERR(reg_mmc)) {
ret = PTR_ERR(reg_mmc);
- goto out;
+ goto out_mmc;
}
ret = mmc_of_parse(mmc);
if (ret)
- goto out;
+ goto out_mmc;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -621,6 +621,14 @@ static int moxart_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto out;
}
+ if (!IS_ERR(host->dma_chan_tx)) {
+ dma_release_channel(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
+ }
+ if (!IS_ERR(host->dma_chan_rx)) {
+ dma_release_channel(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
+ }
dev_dbg(dev, "PIO mode transfer enabled\n");
host->have_dma = false;
} else {
@@ -675,6 +683,11 @@ static int moxart_probe(struct platform_device *pdev)
return 0;
out:
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
+out_mmc:
if (mmc)
mmc_free_host(mmc);
return ret;
@@ -687,9 +700,9 @@ static int moxart_remove(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, NULL);
- if (!IS_ERR(host->dma_chan_tx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
- if (!IS_ERR(host->dma_chan_rx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4dfc246c5f95..943940b44e83 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of_address.h>
@@ -258,6 +259,7 @@
#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
+#define PAD_DS_TUNE_DLY_SEL (0x1 << 0) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
@@ -301,6 +303,11 @@
#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
+/* EMMC50_PAD_DS_TUNE mask */
+#define PAD_DS_DLY_SEL (0x1 << 16) /* RW */
+#define PAD_DS_DLY1 (0x1f << 10) /* RW */
+#define PAD_DS_DLY3 (0x1f << 0) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -448,11 +455,13 @@ struct msdc_host {
bool vqmmc_enabled;
u32 latch_ck;
u32 hs400_ds_delay;
+ u32 hs400_ds_dly3;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
+ bool hs400_tuning; /* hs400 mode online tuning */
bool internal_cd; /* Use internal card-detect logic */
bool cqhci; /* support eMMC hw cmdq */
struct msdc_save_para save_para; /* used when gate HCLK */
@@ -961,7 +970,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_command *cmd)
{
u32 resp;
@@ -997,7 +1006,7 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
* stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
*/
u32 opcode = cmd->opcode;
- u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
+ u32 resp = msdc_cmd_find_resp(host, cmd);
u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
host->cmd_rsp = resp;
@@ -1043,8 +1052,8 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
return rawcmd;
}
-static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd,
+ struct mmc_data *data)
{
bool read;
@@ -1112,8 +1121,7 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host)
}
}
-static void msdc_track_cmd_data(struct msdc_host *host,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
{
if (host->error)
dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
@@ -1134,7 +1142,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
- msdc_track_cmd_data(host, mrq->cmd, mrq->data);
+ msdc_track_cmd_data(host, mrq->cmd);
if (mrq->data)
msdc_unprepare_data(host, mrq->data);
if (host->error)
@@ -1190,7 +1198,8 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
if (events & MSDC_INT_CMDTMO ||
(cmd->opcode != MMC_SEND_TUNING_BLOCK &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+ !host->hs400_tuning))
/*
* should not clear fifo/interrupt as the tune data
* may have alreay come when cmd19/cmd21 gets response
@@ -1287,7 +1296,8 @@ static void msdc_cmd_next(struct msdc_host *host,
if ((cmd->error &&
!(cmd->error == -EILSEQ &&
(cmd->opcode == MMC_SEND_TUNING_BLOCK ||
- cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 ||
+ host->hs400_tuning))) ||
(mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
@@ -1295,7 +1305,7 @@ static void msdc_cmd_next(struct msdc_host *host,
else if (!cmd->data)
msdc_request_done(host, mrq);
else
- msdc_start_data(host, mrq, cmd, cmd->data);
+ msdc_start_data(host, cmd, cmd->data);
}
static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -2251,6 +2261,67 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
+static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ struct msdc_delay_phase dly1_delay;
+ u32 val, result_dly1 = 0;
+ u8 *ext_csd;
+ int i, ret;
+
+ if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY_SEL);
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ } else {
+ sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ }
+
+ host->hs400_tuning = true;
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, i);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, i);
+ ret = mmc_get_ext_csd(card, &ext_csd);
+ if (!ret)
+ result_dly1 |= (1 << i);
+ }
+ host->hs400_tuning = false;
+
+ dly1_delay = get_best_delay(host, result_dly1);
+ if (dly1_delay.maxlen == 0) {
+ dev_err(host->dev, "Failed to get DLY1 delay!\n");
+ goto fail;
+ }
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, dly1_delay.final_phase);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, dly1_delay.final_phase);
+
+ if (host->top_base)
+ val = readl(host->top_base + EMMC50_PAD_DS_TUNE);
+ else
+ val = readl(host->base + PAD_DS_TUNE);
+
+ dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val);
+
+ return 0;
+
+fail:
+ dev_err(host->dev, "Failed to tuning DS pin delay!\n");
+ return -EIO;
+}
+
static void msdc_hw_reset(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
@@ -2330,6 +2401,7 @@ static void msdc_cqe_enable(struct mmc_host *mmc)
static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct msdc_host *host = mmc_priv(mmc);
+ unsigned int val = 0;
/* disable cmdq irq */
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
@@ -2339,6 +2411,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
+ !(val & MSDC_DMA_CFG_STS), 1, 3000)))
+ return;
msdc_reset_hw(host);
}
}
@@ -2377,6 +2452,7 @@ static const struct mmc_host_ops mt_msdc_ops = {
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
+ .execute_hs400_tuning = msdc_execute_hs400_tuning,
.hw_reset = msdc_hw_reset,
};
@@ -2396,6 +2472,9 @@ static void msdc_of_property_parse(struct platform_device *pdev,
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
+ of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3",
+ &host->hs400_ds_dly3);
+
of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
&host->hs200_cmd_int_delay);
@@ -2577,6 +2656,25 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->dma_mask = DMA_BIT_MASK(32);
mmc_dev(mmc)->dma_mask = &host->dma_mask;
+ host->timeout_clks = 3 * 1048576;
+ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+ 2 * sizeof(struct mt_gpdma_desc),
+ &host->dma.gpd_addr, GFP_KERNEL);
+ host->dma.bd = dma_alloc_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ &host->dma.bd_addr, GFP_KERNEL);
+ if (!host->dma.gpd || !host->dma.bd) {
+ ret = -ENOMEM;
+ goto release_mem;
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+ INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
+ spin_lock_init(&host->lock);
+
+ platform_set_drvdata(pdev, mmc);
+ msdc_ungate_clock(host);
+ msdc_init_hw(host);
+
if (mmc->caps2 & MMC_CAP2_CQE) {
host->cq_host = devm_kzalloc(mmc->parent,
sizeof(*host->cq_host),
@@ -2597,25 +2695,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->max_seg_size = 64 * 1024;
}
- host->timeout_clks = 3 * 1048576;
- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
- 2 * sizeof(struct mt_gpdma_desc),
- &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- &host->dma.bd_addr, GFP_KERNEL);
- if (!host->dma.gpd || !host->dma.bd) {
- ret = -ENOMEM;
- goto release_mem;
- }
- msdc_init_gpd_bd(host, &host->dma);
- INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
- spin_lock_init(&host->lock);
-
- platform_set_drvdata(pdev, mmc);
- msdc_ungate_clock(host);
- msdc_init_hw(host);
-
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
IRQF_TRIGGER_NONE, pdev->name, host);
if (ret)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 947581de7860..8c3655d3be96 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -552,6 +552,11 @@ static const struct of_device_id mxs_mmc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+static void mxs_mmc_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -591,6 +596,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
"Failed to enable vmmc regulator: %d\n", ret);
goto out_mmc_free;
}
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
+ reg_vmmc);
+ if (ret)
+ goto out_mmc_free;
}
ssp->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 2f8038d69f67..9dafcbf969d9 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -702,11 +702,6 @@ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
#else
-static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
-{
- return 0;
-}
-
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
}
@@ -1515,7 +1510,7 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
* REVISIT: should be moved to sdio core and made more
* general e.g. by expanding the DT bindings of child nodes
* to provide a mechanism to provide this information:
- * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ * Documentation/devicetree/bindings/mmc/mmc-card.yaml
*/
np = of_get_compatible_child(np, "ti,wl1251");
@@ -2086,6 +2081,7 @@ static int omap_hsmmc_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
struct omap_hsmmc_host *host;
@@ -2153,11 +2149,11 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
}
+#endif
static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
- .runtime_suspend = omap_hsmmc_runtime_suspend,
- .runtime_resume = omap_hsmmc_runtime_resume,
+ SET_RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
};
static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8fe65f172a61..f1ef0d28b0dd 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -362,23 +362,11 @@ static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
-
- return ret;
+ return sdhci_get_cd_nogpio(mmc);
}
static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f18d169bc8ff..afaf33707d46 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -196,6 +196,9 @@
*/
#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
+/* ERR004536 is not applicable for the IP */
+#define ESDHC_FLAG_SKIP_ERR004536 BIT(17)
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -289,6 +292,13 @@ static const struct esdhc_soc_data usdhc_imx7d_data = {
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
+static struct esdhc_soc_data usdhc_s32g2_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_SKIP_ERR004536,
+};
+
static struct esdhc_soc_data usdhc_imx7ulp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
@@ -347,6 +357,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
+ { .compatible = "nxp,s32g2-usdhc", .data = &usdhc_s32g2_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -1187,6 +1198,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 ctrl;
+ int ret;
/* Reset the tuning circuit */
if (esdhc_is_usdhc(imx_data)) {
@@ -1199,7 +1211,22 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
+ /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
+ ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
+ ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "Warning! clear execute tuning bit failed\n");
+ /*
+ * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
+ * usdhc IP internal logic flag execute_tuning_with_clr_buf, which
+ * will finally make sure the normal data transfer logic correct.
+ */
+ ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
+ ctrl |= SDHCI_INT_DATA_AVAIL;
+ writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
}
}
}
@@ -1359,8 +1386,10 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
* TO1.1, it's harmless for MX6SL
*/
- writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
- host->ioaddr + 0x6c);
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_ERR004536)) {
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
+ host->ioaddr + 0x6c);
+ }
/* disable DLL_CTRL delay line settings */
writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 737e2bfdedc2..6a2e5a468424 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -191,6 +191,13 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map thunderbay_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
+ .clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
+ .support64b = { .reg = 0x4, .width = 1, .shift = 24 },
+ .hiword_update = false,
+};
+
static const struct sdhci_arasan_soc_ctl_map intel_keembay_soc_ctl_map = {
.baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
.clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
@@ -456,6 +463,15 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static const struct sdhci_pltfm_data sdhci_arasan_thunderbay_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -1132,6 +1148,12 @@ static struct sdhci_arasan_of_data sdhci_arasan_generic_data = {
.clk_ops = &arasan_clk_ops,
};
+static const struct sdhci_arasan_of_data sdhci_arasan_thunderbay_data = {
+ .soc_ctl_map = &thunderbay_soc_ctl_map,
+ .pdata = &sdhci_arasan_thunderbay_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
static const struct sdhci_pltfm_data sdhci_keembay_emmc_pdata = {
.ops = &sdhci_arasan_cqe_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -1265,6 +1287,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,keembay-sdhci-5.1-sdio",
.data = &intel_keembay_sdio_data,
},
+ {
+ .compatible = "intel,thunderbay-sdhci-5.1",
+ .data = &sdhci_arasan_thunderbay_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -1626,7 +1652,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
- of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
+ of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio") ||
+ of_device_is_compatible(np, "intel,thunderbay-sdhci-5.1")) {
sdhci_arasan_update_clockmultiplier(host, 0x0);
sdhci_arasan_update_support64b(host, 0x0);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 8f4d1f003f65..64e27c2821f9 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sys_soc.h>
@@ -21,7 +23,14 @@
#include "sdhci-pltfm.h"
-#define SDHCI_OMAP_CON 0x12c
+/*
+ * Note that the register offsets used here are from omap_regs
+ * base which is 0x100 for omap4 and later, and 0 for omap3 and
+ * earlier.
+ */
+#define SDHCI_OMAP_SYSCONFIG 0x10
+
+#define SDHCI_OMAP_CON 0x2c
#define CON_DW8 BIT(5)
#define CON_DMA_MASTER BIT(20)
#define CON_DDR BIT(19)
@@ -31,20 +40,20 @@
#define CON_INIT BIT(1)
#define CON_OD BIT(0)
-#define SDHCI_OMAP_DLL 0x0134
+#define SDHCI_OMAP_DLL 0x34
#define DLL_SWT BIT(20)
#define DLL_FORCE_SR_C_SHIFT 13
#define DLL_FORCE_SR_C_MASK (0x7f << DLL_FORCE_SR_C_SHIFT)
#define DLL_FORCE_VALUE BIT(12)
#define DLL_CALIB BIT(1)
-#define SDHCI_OMAP_CMD 0x20c
+#define SDHCI_OMAP_CMD 0x10c
-#define SDHCI_OMAP_PSTATE 0x0224
+#define SDHCI_OMAP_PSTATE 0x124
#define PSTATE_DLEV_DAT0 BIT(20)
#define PSTATE_DATI BIT(1)
-#define SDHCI_OMAP_HCTL 0x228
+#define SDHCI_OMAP_HCTL 0x128
#define HCTL_SDBP BIT(8)
#define HCTL_SDVS_SHIFT 9
#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
@@ -52,26 +61,28 @@
#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
-#define SDHCI_OMAP_SYSCTL 0x22c
+#define SDHCI_OMAP_SYSCTL 0x12c
#define SYSCTL_CEN BIT(2)
#define SYSCTL_CLKD_SHIFT 6
#define SYSCTL_CLKD_MASK 0x3ff
-#define SDHCI_OMAP_STAT 0x230
+#define SDHCI_OMAP_STAT 0x130
-#define SDHCI_OMAP_IE 0x234
+#define SDHCI_OMAP_IE 0x134
#define INT_CC_EN BIT(0)
-#define SDHCI_OMAP_AC12 0x23c
+#define SDHCI_OMAP_ISE 0x138
+
+#define SDHCI_OMAP_AC12 0x13c
#define AC12_V1V8_SIGEN BIT(19)
#define AC12_SCLK_SEL BIT(23)
-#define SDHCI_OMAP_CAPA 0x240
+#define SDHCI_OMAP_CAPA 0x140
#define CAPA_VS33 BIT(24)
#define CAPA_VS30 BIT(25)
#define CAPA_VS18 BIT(26)
-#define SDHCI_OMAP_CAPA2 0x0244
+#define SDHCI_OMAP_CAPA2 0x144
#define CAPA2_TSDR50 BIT(13)
#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
@@ -89,7 +100,8 @@
#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
- u32 offset;
+ int omap_offset; /* Offset for omap regs from base */
+ u32 offset; /* Offset for SDHCI regs from base */
u8 flags;
};
@@ -107,12 +119,19 @@ struct sdhci_omap_host {
struct pinctrl *pinctrl;
struct pinctrl_state **pinctrl_state;
+ int wakeirq;
bool is_tuning;
+
+ /* Offset for omap specific registers from base */
+ int omap_offset;
+
/* Omap specific context save */
u32 con;
u32 hctl;
u32 sysctl;
u32 capa;
+ u32 ie;
+ u32 ise;
};
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
@@ -121,13 +140,13 @@ static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host);
static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
unsigned int offset)
{
- return readl(host->base + offset);
+ return readl(host->base + host->omap_offset + offset);
}
static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
unsigned int offset, u32 data)
{
- writel(data, host->base + offset);
+ writel(data, host->base + host->omap_offset + offset);
}
static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
@@ -172,7 +191,7 @@ static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
}
static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
- unsigned int iov)
+ unsigned int iov_pbias)
{
int ret;
struct sdhci_host *host = omap_host->host;
@@ -183,14 +202,15 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
return ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
- if (ret) {
+ /* Pick the right voltage to allow 3.0V for 3.3V nominal PBIAS */
+ ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
+ if (ret < 0) {
dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
return ret;
}
}
- ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ ret = sdhci_omap_set_pbias(omap_host, true, iov_pbias);
if (ret)
return ret;
@@ -200,16 +220,28 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
unsigned char signal_voltage)
{
- u32 reg;
+ u32 reg, capa;
ktime_t timeout;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
reg &= ~HCTL_SDVS_MASK;
- if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- reg |= HCTL_SDVS_33;
- else
+ switch (signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (capa & CAPA_VS33)
+ reg |= HCTL_SDVS_33;
+ else if (capa & CAPA_VS30)
+ reg |= HCTL_SDVS_30;
+ else
+ dev_warn(omap_host->dev, "misconfigured CAPA: %08x\n",
+ capa);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ default:
reg |= HCTL_SDVS_18;
+ break;
+ }
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
@@ -527,28 +559,32 @@ static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
- if (!(reg & CAPA_VS33))
+ if (!(reg & (CAPA_VS30 | CAPA_VS33)))
return -EOPNOTSUPP;
+ if (reg & CAPA_VS30)
+ iov = IOV_3V0;
+ else
+ iov = IOV_3V3;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
- iov = IOV_3V3;
} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (!(reg & CAPA_VS18))
return -EOPNOTSUPP;
+ iov = IOV_1V8;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg |= AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
-
- iov = IOV_1V8;
} else {
return -EOPNOTSUPP;
}
@@ -682,7 +718,24 @@ static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
{
struct mmc_host *mmc = host->mmc;
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+/*
+ * MMCHS_HL_HWINFO has the MADMA_EN bit set if the controller instance
+ * is connected to L3 interconnect and is bus master capable. Note that
+ * the MMCHS_HL_HWINFO register is in the module registers before the
+ * omap registers and sdhci registers. The offset can vary for omap
+ * registers depending on the SoC. Do not use sdhci_omap_readl() here.
+ */
+static bool sdhci_omap_has_adma(struct sdhci_omap_host *omap_host, int offset)
+{
+ /* MMCHS_HL_HWINFO register is only available on omap4 and later */
+ if (offset < 0x200)
+ return false;
+
+ return readl(omap_host->base + 4) & 1;
}
static int sdhci_omap_enable_dma(struct sdhci_host *host)
@@ -792,6 +845,11 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
unsigned long limit = MMC_TIMEOUT_US;
unsigned long i = 0;
+ u32 sysc;
+
+ /* Save target module sysconfig configured by SoC PM layer */
+ if (mask & SDHCI_RESET_ALL)
+ sysc = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCONFIG);
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
@@ -811,10 +869,15 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
dev_err(mmc_dev(host->mmc),
"Timeout waiting on controller reset in %s\n",
__func__);
- return;
+
+ goto restore_sysc;
}
sdhci_reset(host, mask);
+
+restore_sysc:
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCONFIG, sysc);
}
#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
@@ -877,34 +940,73 @@ static struct sdhci_ops sdhci_omap_ops = {
.set_timeout = sdhci_omap_set_timeout,
};
-static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+static unsigned int sdhci_omap_regulator_get_caps(struct device *dev,
+ const char *name)
{
- u32 reg;
- int ret = 0;
+ struct regulator *reg;
+ unsigned int caps = 0;
+
+ reg = regulator_get(dev, name);
+ if (IS_ERR(reg))
+ return ~0U;
+
+ if (regulator_is_supported_voltage(reg, 1700000, 1950000))
+ caps |= SDHCI_CAN_VDD_180;
+ if (regulator_is_supported_voltage(reg, 2700000, 3150000))
+ caps |= SDHCI_CAN_VDD_300;
+ if (regulator_is_supported_voltage(reg, 3150000, 3600000))
+ caps |= SDHCI_CAN_VDD_330;
+
+ regulator_put(reg);
+
+ return caps;
+}
+
+static int sdhci_omap_set_capabilities(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
struct device *dev = omap_host->dev;
- struct regulator *vqmmc;
+ const u32 mask = SDHCI_CAN_VDD_180 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_330;
+ unsigned int pbias, vqmmc, caps = 0;
+ u32 reg;
- vqmmc = regulator_get(dev, "vqmmc");
- if (IS_ERR(vqmmc)) {
- ret = PTR_ERR(vqmmc);
- goto reg_put;
- }
+ pbias = sdhci_omap_regulator_get_caps(dev, "pbias");
+ vqmmc = sdhci_omap_regulator_get_caps(dev, "vqmmc");
+ caps = pbias & vqmmc;
+
+ if (pbias != ~0U && vqmmc == ~0U)
+ dev_warn(dev, "vqmmc regulator missing for pbias\n");
+ else if (caps == ~0U)
+ return 0;
+
+ /*
+ * Quirk handling to allow 3.0V vqmmc with a valid 3.3V PBIAS. This is
+ * needed for 3.0V ldo9_reg on omap5 at least.
+ */
+ if (pbias != ~0U && (pbias & SDHCI_CAN_VDD_330) &&
+ (vqmmc & SDHCI_CAN_VDD_300))
+ caps |= SDHCI_CAN_VDD_330;
/* voltage capabilities might be set by boot loader, clear it */
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
- if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
- reg |= CAPA_VS33;
- if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ if (caps & SDHCI_CAN_VDD_180)
reg |= CAPA_VS18;
+ if (caps & SDHCI_CAN_VDD_300)
+ reg |= CAPA_VS30;
+
+ if (caps & SDHCI_CAN_VDD_330)
+ reg |= CAPA_VS33;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
-reg_put:
- regulator_put(vqmmc);
+ host->caps &= ~mask;
+ host->caps |= caps;
- return ret;
+ return 0;
}
static const struct sdhci_pltfm_data sdhci_omap_pdata = {
@@ -920,26 +1022,56 @@ static const struct sdhci_pltfm_data sdhci_omap_pdata = {
.ops = &sdhci_omap_ops,
};
+static const struct sdhci_omap_data omap2430_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap3_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap4_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data omap5_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data k2g_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
};
static const struct sdhci_omap_data am335_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data am437_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data dra7_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
};
static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,omap2430-sdhci", .data = &omap2430_data },
+ { .compatible = "ti,omap3-sdhci", .data = &omap3_data },
+ { .compatible = "ti,omap4-sdhci", .data = &omap4_data },
+ { .compatible = "ti,omap5-sdhci", .data = &omap5_data },
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
{ .compatible = "ti,am335-sdhci", .data = &am335_data },
@@ -1122,6 +1254,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->power_mode = MMC_POWER_UNDEFINED;
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
+ omap_host->omap_offset = data->omap_offset;
+ omap_host->con = -EINVAL; /* Prevent invalid restore on first resume */
host->ioaddr += offset;
host->mapbase = regs->start + offset;
@@ -1172,6 +1306,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
* SYSCONFIG register of omap devices. The callback will be invoked
* as part of pm_runtime_get_sync.
*/
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
@@ -1179,10 +1315,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
goto err_rpm_disable;
}
- ret = sdhci_omap_set_capabilities(omap_host);
+ ret = sdhci_omap_set_capabilities(host);
if (ret) {
dev_err(dev, "failed to set system capabilities\n");
- goto err_put_sync;
+ goto err_rpm_put;
}
host->mmc_host_ops.start_signal_voltage_switch =
@@ -1192,16 +1328,28 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
- /* Switch to external DMA only if there is the "dmas" property */
- if (of_find_property(dev->of_node, "dmas", NULL))
+ /*
+ * Switch to external DMA only if there is the "dmas" property and
+ * ADMA is not available on the controller instance.
+ */
+ if (device_property_present(dev, "dmas") &&
+ !sdhci_omap_has_adma(omap_host, offset))
sdhci_switch_external_dma(host, true);
+ if (device_property_read_bool(dev, "ti,non-removable")) {
+ dev_warn_once(dev, "using old ti,non-removable property\n");
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ /* Allow card power off and runtime PM for eMMC/SD card devices */
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
+
ret = sdhci_setup_host(host);
if (ret)
- goto err_put_sync;
+ goto err_rpm_put;
ret = sdhci_omap_config_iodelay_pinctrl_state(omap_host);
if (ret)
@@ -1211,15 +1359,38 @@ static int sdhci_omap_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup_host;
+ /*
+ * SDIO devices can use the dat1 pin as a wake-up interrupt. Some
+ * devices like wl1xxx, use an out-of-band GPIO interrupt instead.
+ */
+ omap_host->wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (omap_host->wakeirq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_cleanup_host;
+ }
+ if (omap_host->wakeirq > 0) {
+ device_init_wakeup(dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(dev, omap_host->wakeirq);
+ if (ret) {
+ device_init_wakeup(dev, false);
+ goto err_cleanup_host;
+ }
+ host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
err_cleanup_host:
sdhci_cleanup_host(host);
-err_put_sync:
- pm_runtime_put_sync(dev);
-
+err_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
err_rpm_disable:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_pltfm_free:
@@ -1232,64 +1403,81 @@ static int sdhci_omap_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(dev);
sdhci_remove_host(host, true);
+ device_init_wakeup(dev, false);
+ dev_pm_clear_wake_irq(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
+ /* Ensure device gets disabled despite userspace sysfs config */
+ pm_runtime_force_suspend(dev);
sdhci_pltfm_free(pdev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
+
+#ifdef CONFIG_PM
+static void __maybe_unused sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
{
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ omap_host->sysctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ omap_host->ie = sdhci_omap_readl(omap_host, SDHCI_OMAP_IE);
+ omap_host->ise = sdhci_omap_readl(omap_host, SDHCI_OMAP_ISE);
}
-static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
+/* Order matters here, HCTL must be restored in two phases */
+static void __maybe_unused sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
{
- sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, omap_host->sysctl);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_IE, omap_host->ie);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise);
}
-static int __maybe_unused sdhci_omap_suspend(struct device *dev)
+static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_suspend_host(host);
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
pinctrl_pm_select_idle_state(dev);
- pm_runtime_force_suspend(dev);
-
return 0;
}
-static int __maybe_unused sdhci_omap_resume(struct device *dev)
+static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- pm_runtime_force_resume(dev);
-
pinctrl_pm_select_default_state(dev);
- sdhci_omap_context_restore(omap_host);
+ if (omap_host->con != -EINVAL)
+ sdhci_omap_context_restore(omap_host);
- sdhci_resume_host(host);
+ sdhci_runtime_resume_host(host, 0);
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend,
- sdhci_omap_resume);
+
+static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sdhci_omap_runtime_suspend,
+ sdhci_omap_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index be19785227fe..6f9877546830 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -17,8 +17,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -26,11 +24,13 @@
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
-#include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
#endif
@@ -345,73 +345,6 @@ static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-#ifdef CONFIG_PM
-
-static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
-{
- struct sdhci_pci_slot *slot = dev_id;
- struct sdhci_host *host = slot->host;
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
- return IRQ_HANDLED;
-}
-
-static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
- int err, irq, gpio = slot->cd_gpio;
-
- slot->cd_gpio = -EINVAL;
- slot->cd_irq = -EINVAL;
-
- if (!gpio_is_valid(gpio))
- return;
-
- err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
- if (err < 0)
- goto out;
-
- err = gpio_direction_input(gpio);
- if (err < 0)
- goto out_free;
-
- irq = gpio_to_irq(gpio);
- if (irq < 0)
- goto out_free;
-
- err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "sd_cd", slot);
- if (err)
- goto out_free;
-
- slot->cd_gpio = gpio;
- slot->cd_irq = irq;
-
- return;
-
-out_free:
- devm_gpio_free(&slot->chip->pdev->dev, gpio);
-out:
- dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
-}
-
-static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
- if (slot->cd_irq >= 0)
- free_irq(slot->cd_irq, slot);
-}
-
-#else
-
-static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-#endif
-
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
@@ -619,23 +552,16 @@ static int intel_select_drive_strength(struct mmc_card *card,
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
+ return sdhci_get_cd_nogpio(mmc);
+}
- return ret;
+static int mrfld_get_cd(struct mmc_host *mmc)
+{
+ return sdhci_get_cd_nogpio(mmc);
}
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
@@ -1341,6 +1267,14 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
MMC_CAP_1_8V_DDR;
break;
case INTEL_MRFLD_SD:
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
+ /*
+ * There are two PCB designs of SD card slot with the opposite
+ * card detection sense. Quirk this out by ignoring GPIO state
+ * completely in the custom ->get_cd() callback.
+ */
+ slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
@@ -1981,21 +1915,6 @@ int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
-{
- struct sdhci_pci_slot *slot = sdhci_priv(host);
- int rst_n_gpio = slot->rst_n_gpio;
-
- if (!gpio_is_valid(rst_n_gpio))
- return;
- gpio_set_value_cansleep(rst_n_gpio, 0);
- /* For eMMC, minimum is 1us but give it 10us for good measure */
- udelay(10);
- gpio_set_value_cansleep(rst_n_gpio, 1);
- /* For eMMC, minimum is 200us but give it 300us for good measure */
- usleep_range(300, 1000);
-}
-
static void sdhci_pci_hw_reset(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -2126,26 +2045,8 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
slot->chip = chip;
slot->host = host;
- slot->rst_n_gpio = -EINVAL;
- slot->cd_gpio = -EINVAL;
slot->cd_idx = -1;
- /* Retrieve platform data if there is any */
- if (*sdhci_pci_get_data)
- slot->data = sdhci_pci_get_data(pdev, slotno);
-
- if (slot->data) {
- if (slot->data->setup) {
- ret = slot->data->setup(slot->data);
- if (ret) {
- dev_err(&pdev->dev, "platform setup failed\n");
- goto free;
- }
- }
- slot->rst_n_gpio = slot->data->rst_n_gpio;
- slot->cd_gpio = slot->data->cd_gpio;
- }
-
host->hw_name = "PCI";
host->ops = chip->fixes && chip->fixes->ops ?
chip->fixes->ops :
@@ -2169,17 +2070,6 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
goto cleanup;
}
- if (gpio_is_valid(slot->rst_n_gpio)) {
- if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
- gpio_direction_output(slot->rst_n_gpio, 1);
- slot->host->mmc->caps |= MMC_CAP_HW_RESET;
- slot->hw_reset = sdhci_pci_gpio_hw_reset;
- } else {
- dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
- slot->rst_n_gpio = -EINVAL;
- }
- }
-
host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
@@ -2214,15 +2104,11 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (ret)
goto remove;
- sdhci_pci_add_own_cd(slot);
-
/*
* Check if the chip needs a separate GPIO for card detect to wake up
* from runtime suspend. If it is not there, don't allow runtime PM.
- * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
*/
- if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
- !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
chip->allow_runtime_pm = false;
return slot;
@@ -2232,10 +2118,6 @@ remove:
chip->fixes->remove_slot(slot, 0);
cleanup:
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
-free:
sdhci_free_host(host);
return ERR_PTR(ret);
@@ -2246,8 +2128,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
int dead;
u32 scratch;
- sdhci_pci_remove_own_cd(slot);
-
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
@@ -2258,9 +2138,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
sdhci_free_host(slot->host);
}
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
deleted file mode 100644
index 18638fb363d8..000000000000
--- a/drivers/mmc/host/sdhci-pci-data.c
+++ /dev/null
@@ -1,6 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/mmc/sdhci-pci-data.h>
-
-struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
-EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 51d55a87aebe..f045c1ee4667 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -489,7 +489,7 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI);
if (!ret) {
- pr_info("%s: unsupport msi, use INTx irq\n",
+ pr_info("%s: unsupported MSI, use INTx irq\n",
mmc_hostname(host->mmc));
return;
}
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 8f90c4163bb5..5e3193278ff9 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -156,11 +156,6 @@ struct sdhci_pci_fixes {
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
- struct sdhci_pci_data *data;
-
- int rst_n_gpio;
- int cd_gpio;
- int cd_irq;
int cd_idx;
bool cd_override_level;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 862f033d235d..9085f3932443 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -791,4 +791,3 @@ module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 11e375579cfb..f33e9349e4e6 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -39,6 +40,9 @@
#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21)
#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29)
+#define SDHCI_SPRD_REG_32_DLL_STS0 0x210
+#define SDHCI_SPRD_DLL_LOCKED BIT(18)
+
#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250
#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25)
#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24)
@@ -256,6 +260,15 @@ static void sdhci_sprd_enable_phy_dll(struct sdhci_host *host)
sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG);
/* wait 1ms */
usleep_range(1000, 1250);
+
+ if (read_poll_timeout(sdhci_readl, tmp, (tmp & SDHCI_SPRD_DLL_LOCKED),
+ 2000, USEC_PER_SEC, false, host, SDHCI_SPRD_REG_32_DLL_STS0)) {
+ pr_err("%s: DLL locked fail!\n", mmc_hostname(host->mmc));
+ pr_info("%s: DLL_STS0 : 0x%x, DLL_CFG : 0x%x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_STS0),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG));
+ }
}
static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8eefa7d5fe85..269c86569402 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -930,7 +930,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
struct mmc_data *data;
unsigned target_timeout, current_timeout;
- *too_big = true;
+ *too_big = false;
/*
* If the host controller provides us with an incorrect timeout
@@ -941,7 +941,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return host->max_timeout_count;
- /* Unspecified command, asume max */
+ /* Unspecified command, assume max */
if (cmd == NULL)
return host->max_timeout_count;
@@ -968,17 +968,14 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
- if (count > host->max_timeout_count)
+ if (count > host->max_timeout_count) {
+ if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
+ DBG("Too large timeout 0x%x requested for CMD%d!\n",
+ count, cmd->opcode);
+ count = host->max_timeout_count;
+ *too_big = true;
break;
- }
-
- if (count > host->max_timeout_count) {
- if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
- DBG("Too large timeout 0x%x requested for CMD%d!\n",
- count, cmd->opcode);
- count = host->max_timeout_count;
- } else {
- *too_big = false;
+ }
}
return count;
@@ -2042,6 +2039,12 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
break;
case MMC_VDD_32_33:
case MMC_VDD_33_34:
+ /*
+ * 3.4 ~ 3.6V are valid only for those platforms where it's
+ * known that the voltage range is supported by hardware.
+ */
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
pwr = SDHCI_POWER_330;
break;
default:
@@ -2422,6 +2425,25 @@ static int sdhci_get_cd(struct mmc_host *mmc)
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
+int sdhci_get_cd_nogpio(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
+
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
@@ -3232,7 +3254,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
- if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
return;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e8d04e42a5af..bb883553d3b4 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -750,7 +750,6 @@ static inline void *sdhci_priv(struct sdhci_host *host)
return host->private;
}
-void sdhci_card_detect(struct sdhci_host *host);
void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
const u32 *caps, const u32 *caps1);
int sdhci_setup_host(struct sdhci_host *host);
@@ -775,6 +774,7 @@ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+int sdhci_get_cd_nogpio(struct mmc_host *mmc);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 7dfc26f48c18..e2affa52ef46 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -195,6 +195,10 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
@@ -956,8 +960,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
- if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) {
host->reset(host);
+
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE |
+ TMIO_STAT_CARD_INSERT);
+ }
+
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
@@ -1185,10 +1196,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->set_clock(_host, 0);
tmio_mmc_reset(_host);
- if (_host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(_host,
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 4950d10d3a19..97beece62fec 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -576,7 +576,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300)
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (sizeof(vub300->system_port_status) == retval)
new_system_port_status(vub300);
}
@@ -1241,7 +1241,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_INTERRUPT_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1284,7 +1284,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_TRANSFER_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1991,7 +1991,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_CLOCK_SPEED,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x00, 0x00, buf, buf_array_size, HZ);
+ 0x00, 0x00, buf, buf_array_size, 1000);
if (retval != 8) {
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
" %dkHz failed with retval=%d\n", kHzClock, retval);
@@ -2013,14 +2013,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0000, 0x0000, NULL, 0, HZ);
+ 0x0000, 0x0000, NULL, 0, 1000);
/* must wait for the VUB300 u-proc to boot up */
msleep(600);
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0001, 0x0000, NULL, 0, HZ);
+ 0x0001, 0x0000, NULL, 0, 1000);
msleep(600);
vub300->card_powered = 1;
} else if (ios->power_mode == MMC_POWER_ON) {
@@ -2275,14 +2275,14 @@ static int vub300_probe(struct usb_interface *interface,
GET_HC_INF0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->hc_info,
- sizeof(vub300->hc_info), HZ);
+ sizeof(vub300->hc_info), 1000);
if (retval < 0)
goto error5;
retval =
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+ firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
if (retval < 0)
goto error5;
dev_info(&vub300->udev->dev,
@@ -2297,7 +2297,7 @@ static int vub300_probe(struct usb_interface *interface,
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (retval < 0) {
goto error4;
} else if (sizeof(vub300->system_port_status) == retval) {
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index 2640c5b326a4..acabb7715b42 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -149,7 +149,8 @@ static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
DRCI_READ_REQ, req_type,
0x0000,
- reg, dma_buf, sizeof(*dma_buf), 5 * HZ);
+ reg, dma_buf, sizeof(*dma_buf),
+ USB_CTRL_GET_TIMEOUT);
*buf = le16_to_cpu(*dma_buf);
kfree(dma_buf);
@@ -176,7 +177,7 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
reg,
NULL,
0,
- 5 * HZ);
+ USB_CTRL_SET_TIMEOUT);
}
static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index b8ae1ec14e17..4eaba6f4ec68 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -384,7 +384,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->readonly)
set_disk_ro(gd, 1);
- device_add_disk(&new->mtd->dev, gd, NULL);
+ ret = device_add_disk(&new->mtd->dev, gd, NULL);
+ if (ret)
+ goto out_cleanup_disk;
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
@@ -393,6 +395,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 38b6aa849c63..5ff001140ef4 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
#include <linux/fs_context.h>
#include "mtdcore.h"
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 1fb22388e7e0..22f4709768d1 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "mux-core: " fmt
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -116,6 +117,7 @@ struct mux_chip *mux_chip_alloc(struct device *dev,
sema_init(&mux->lock, 1);
mux->cached_state = MUX_CACHE_UNKNOWN;
mux->idle_state = MUX_IDLE_AS_IS;
+ mux->last_change = ktime_get();
}
device_initialize(&mux_chip->dev);
@@ -129,6 +131,8 @@ static int mux_control_set(struct mux_control *mux, int state)
int ret = mux->chip->ops->set(mux, state);
mux->cached_state = ret < 0 ? MUX_CACHE_UNKNOWN : state;
+ if (ret >= 0)
+ mux->last_change = ktime_get();
return ret;
}
@@ -314,10 +318,25 @@ static int __mux_control_select(struct mux_control *mux, int state)
return ret;
}
+static void mux_control_delay(struct mux_control *mux, unsigned int delay_us)
+{
+ ktime_t delayend;
+ s64 remaining;
+
+ if (!delay_us)
+ return;
+
+ delayend = ktime_add_us(mux->last_change, delay_us);
+ remaining = ktime_us_delta(delayend, ktime_get());
+ if (remaining > 0)
+ fsleep(remaining);
+}
+
/**
- * mux_control_select() - Select the given multiplexer state.
+ * mux_control_select_delay() - Select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
* there is a call to mux_control_deselect(). If the mux-control is already
@@ -331,7 +350,8 @@ static int __mux_control_select(struct mux_control *mux, int state)
* Return: 0 when the mux-control state has the requested state or a negative
* errno on error.
*/
-int mux_control_select(struct mux_control *mux, unsigned int state)
+int mux_control_select_delay(struct mux_control *mux, unsigned int state,
+ unsigned int delay_us)
{
int ret;
@@ -340,18 +360,21 @@ int mux_control_select(struct mux_control *mux, unsigned int state)
return ret;
ret = __mux_control_select(mux, state);
+ if (ret >= 0)
+ mux_control_delay(mux, delay_us);
if (ret < 0)
up(&mux->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(mux_control_select);
+EXPORT_SYMBOL_GPL(mux_control_select_delay);
/**
- * mux_control_try_select() - Try to select the given multiplexer state.
+ * mux_control_try_select_delay() - Try to select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
* mux_control_deselect() called.
@@ -363,7 +386,8 @@ EXPORT_SYMBOL_GPL(mux_control_select);
* Return: 0 when the mux-control state has the requested state or a negative
* errno on error. Specifically -EBUSY if the mux-control is contended.
*/
-int mux_control_try_select(struct mux_control *mux, unsigned int state)
+int mux_control_try_select_delay(struct mux_control *mux, unsigned int state,
+ unsigned int delay_us)
{
int ret;
@@ -371,13 +395,15 @@ int mux_control_try_select(struct mux_control *mux, unsigned int state)
return -EBUSY;
ret = __mux_control_select(mux, state);
+ if (ret >= 0)
+ mux_control_delay(mux, delay_us);
if (ret < 0)
up(&mux->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(mux_control_try_select);
+EXPORT_SYMBOL_GPL(mux_control_try_select_delay);
/**
* mux_control_deselect() - Deselect the previously selected multiplexer state.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f37b1c56f7c4..034dbd487c33 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -150,7 +150,7 @@ config NET_FC
config IFB
tristate "Intermediate Functional Block support"
- depends on NET_CLS_ACT
+ depends on NET_ACT_MIRRED || NFT_FWD_NETDEV
select NET_REDIRECT
help
This is an intermediate driver that allows sharing of
@@ -291,6 +291,22 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
+config AMT
+ tristate "Automatic Multicast Tunneling (AMT)"
+ depends on INET && IP_MULTICAST
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create AMT(Automatic Multicast Tunneling)
+ virtual interfaces that provide multicast tunneling.
+ There are two roles, Gateway, and Relay.
+ Gateway Encapsulates IGMP/MLD traffic from listeners to the Relay.
+ Gateway Decapsulates multicast traffic from the Relay to Listeners.
+ Relay Encapsulates multicast traffic from Sources to Gateway.
+ Relay Decapsulates IGMP/MLD traffic from Gateway.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called amt.
+
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
select CRYPTO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 739838623cf6..50b23e71065f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_WIREGUARD) += wireguard/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
+obj-$(CONFIG_AMT) += amt.o
obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_MII) += mii.o
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
new file mode 100644
index 000000000000..60a7053a9cf7
--- /dev/null
+++ b/drivers/net/amt.c
@@ -0,0 +1,3296 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/igmp.h>
+#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/mld.h>
+#include <net/amt.h>
+#include <uapi/linux/amt.h>
+#include <linux/security.h>
+#include <net/gro_cells.h>
+#include <net/ipv6.h>
+#include <net/protocol.h>
+#include <net/if_inet6.h>
+#include <net/ndisc.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/inet_common.h>
+#include <net/ip6_checksum.h>
+
+static struct workqueue_struct *amt_wq;
+
+static HLIST_HEAD(source_gc_list);
+/* Lock for source_gc_list */
+static spinlock_t source_gc_lock;
+static struct delayed_work source_gc_wq;
+static char *status_str[] = {
+ "AMT_STATUS_INIT",
+ "AMT_STATUS_SENT_DISCOVERY",
+ "AMT_STATUS_RECEIVED_DISCOVERY",
+ "AMT_STATUS_SENT_ADVERTISEMENT",
+ "AMT_STATUS_RECEIVED_ADVERTISEMENT",
+ "AMT_STATUS_SENT_REQUEST",
+ "AMT_STATUS_RECEIVED_REQUEST",
+ "AMT_STATUS_SENT_QUERY",
+ "AMT_STATUS_RECEIVED_QUERY",
+ "AMT_STATUS_SENT_UPDATE",
+ "AMT_STATUS_RECEIVED_UPDATE",
+};
+
+static char *type_str[] = {
+ "AMT_MSG_DISCOVERY",
+ "AMT_MSG_ADVERTISEMENT",
+ "AMT_MSG_REQUEST",
+ "AMT_MSG_MEMBERSHIP_QUERY",
+ "AMT_MSG_MEMBERSHIP_UPDATE",
+ "AMT_MSG_MULTICAST_DATA",
+ "AMT_MSG_TEARDOWM",
+};
+
+static char *action_str[] = {
+ "AMT_ACT_GMI",
+ "AMT_ACT_GMI_ZERO",
+ "AMT_ACT_GT",
+ "AMT_ACT_STATUS_FWD_NEW",
+ "AMT_ACT_STATUS_D_FWD_NEW",
+ "AMT_ACT_STATUS_NONE_NEW",
+};
+
+static struct igmpv3_grec igmpv3_zero_grec;
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
+static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
+static struct mld2_grec mldv2_zero_grec;
+#endif
+
+static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+
+ return (struct amt_skb_cb *)((void *)skb->cb +
+ sizeof(struct qdisc_skb_cb));
+}
+
+static void __amt_source_gc_work(void)
+{
+ struct amt_source_node *snode;
+ struct hlist_head gc_list;
+ struct hlist_node *t;
+
+ spin_lock_bh(&source_gc_lock);
+ hlist_move_list(&source_gc_list, &gc_list);
+ spin_unlock_bh(&source_gc_lock);
+
+ hlist_for_each_entry_safe(snode, t, &gc_list, node) {
+ hlist_del_rcu(&snode->node);
+ kfree_rcu(snode, rcu);
+ }
+}
+
+static void amt_source_gc_work(struct work_struct *work)
+{
+ __amt_source_gc_work();
+
+ spin_lock_bh(&source_gc_lock);
+ mod_delayed_work(amt_wq, &source_gc_wq,
+ msecs_to_jiffies(AMT_GC_INTERVAL));
+ spin_unlock_bh(&source_gc_lock);
+}
+
+static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
+{
+ return !memcmp(a, b, sizeof(union amt_addr));
+}
+
+static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
+{
+ u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
+
+ return reciprocal_scale(hash, tunnel->amt->hash_buckets);
+}
+
+static bool amt_status_filter(struct amt_source_node *snode,
+ enum amt_filter filter)
+{
+ bool rc = false;
+
+ switch (filter) {
+ case AMT_FILTER_FWD:
+ if (snode->status == AMT_SOURCE_STATUS_FWD &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_D_FWD:
+ if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_FWD_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_FWD &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_D_FWD_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_ALL:
+ rc = true;
+ break;
+ case AMT_FILTER_NONE_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_NONE &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_BOTH:
+ if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
+ snode->status == AMT_SOURCE_STATUS_FWD) &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_BOTH_NEW:
+ if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
+ snode->status == AMT_SOURCE_STATUS_FWD) &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return rc;
+}
+
+static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ enum amt_filter filter,
+ union amt_addr *src)
+{
+ u32 hash = amt_source_hash(tunnel, src);
+ struct amt_source_node *snode;
+
+ hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
+ if (amt_status_filter(snode, filter) &&
+ amt_addr_equal(&snode->source_addr, src))
+ return snode;
+
+ return NULL;
+}
+
+static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
+{
+ u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
+
+ return reciprocal_scale(hash, tunnel->amt->hash_buckets);
+}
+
+static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
+ union amt_addr *group,
+ union amt_addr *host,
+ bool v6)
+{
+ u32 hash = amt_group_hash(tunnel, group);
+ struct amt_group_node *gnode;
+
+ hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
+ if (amt_addr_equal(&gnode->group_addr, group) &&
+ amt_addr_equal(&gnode->host_addr, host) &&
+ gnode->v6 == v6)
+ return gnode;
+ }
+
+ return NULL;
+}
+
+static void amt_destroy_source(struct amt_source_node *snode)
+{
+ struct amt_group_node *gnode = snode->gnode;
+ struct amt_tunnel_list *tunnel;
+
+ tunnel = gnode->tunnel_list;
+
+ if (!gnode->v6) {
+ netdev_dbg(snode->gnode->amt->dev,
+ "Delete source %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ netdev_dbg(snode->gnode->amt->dev,
+ "Delete source %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+
+ cancel_delayed_work(&snode->source_timer);
+ hlist_del_init_rcu(&snode->node);
+ tunnel->nr_sources--;
+ gnode->nr_sources--;
+ spin_lock_bh(&source_gc_lock);
+ hlist_add_head_rcu(&snode->node, &source_gc_list);
+ spin_unlock_bh(&source_gc_lock);
+}
+
+static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
+{
+ struct amt_source_node *snode;
+ struct hlist_node *t;
+ int i;
+
+ if (cancel_delayed_work(&gnode->group_timer))
+ dev_put(amt->dev);
+ hlist_del_rcu(&gnode->node);
+ gnode->tunnel_list->nr_groups--;
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Leave group %pI4\n",
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Leave group %pI6\n",
+ &gnode->group_addr.ip6);
+#endif
+ for (i = 0; i < amt->hash_buckets; i++)
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
+ amt_destroy_source(snode);
+
+ /* tunnel->lock was acquired outside of amt_del_group()
+ * But rcu_read_lock() was acquired too so It's safe.
+ */
+ kfree_rcu(gnode, rcu);
+}
+
+/* If a source timer expires with a router filter-mode for the group of
+ * INCLUDE, the router concludes that traffic from this particular
+ * source is no longer desired on the attached network, and deletes the
+ * associated source record.
+ */
+static void amt_source_work(struct work_struct *work)
+{
+ struct amt_source_node *snode = container_of(to_delayed_work(work),
+ struct amt_source_node,
+ source_timer);
+ struct amt_group_node *gnode = snode->gnode;
+ struct amt_dev *amt = gnode->amt;
+ struct amt_tunnel_list *tunnel;
+
+ tunnel = gnode->tunnel_list;
+ spin_lock_bh(&tunnel->lock);
+ rcu_read_lock();
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+ amt_destroy_source(snode);
+ if (!gnode->nr_sources)
+ amt_del_group(amt, gnode);
+ } else {
+ /* When a router filter-mode for a group is EXCLUDE,
+ * source records are only deleted when the group timer expires
+ */
+ snode->status = AMT_SOURCE_STATUS_D_FWD;
+ }
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_act_src(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ struct amt_source_node *snode,
+ enum amt_act act)
+{
+ struct amt_dev *amt = tunnel->amt;
+
+ switch (act) {
+ case AMT_ACT_GMI:
+ mod_delayed_work(amt_wq, &snode->source_timer,
+ msecs_to_jiffies(amt_gmi(amt)));
+ break;
+ case AMT_ACT_GMI_ZERO:
+ cancel_delayed_work(&snode->source_timer);
+ break;
+ case AMT_ACT_GT:
+ mod_delayed_work(amt_wq, &snode->source_timer,
+ gnode->group_timer.timer.expires);
+ break;
+ case AMT_ACT_STATUS_FWD_NEW:
+ snode->status = AMT_SOURCE_STATUS_FWD;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ case AMT_ACT_STATUS_D_FWD_NEW:
+ snode->status = AMT_SOURCE_STATUS_D_FWD;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ case AMT_ACT_STATUS_NONE_NEW:
+ cancel_delayed_work(&snode->source_timer);
+ snode->status = AMT_SOURCE_STATUS_NONE;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4,
+ action_str[act]);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6,
+ action_str[act]);
+#endif
+}
+
+static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
+ union amt_addr *src)
+{
+ struct amt_source_node *snode;
+
+ snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
+ if (!snode)
+ return NULL;
+
+ memcpy(&snode->source_addr, src, sizeof(union amt_addr));
+ snode->gnode = gnode;
+ snode->status = AMT_SOURCE_STATUS_NONE;
+ snode->flags = AMT_SOURCE_NEW;
+ INIT_HLIST_NODE(&snode->node);
+ INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
+
+ return snode;
+}
+
+/* RFC 3810 - 7.2.2. Definition of Filter Timers
+ *
+ * Router Mode Filter Timer Actions/Comments
+ * ----------- ----------------- ----------------
+ *
+ * INCLUDE Not Used All listeners in
+ * INCLUDE mode.
+ *
+ * EXCLUDE Timer > 0 At least one listener
+ * in EXCLUDE mode.
+ *
+ * EXCLUDE Timer == 0 No more listeners in
+ * EXCLUDE mode for the
+ * multicast address.
+ * If the Requested List
+ * is empty, delete
+ * Multicast Address
+ * Record. If not, switch
+ * to INCLUDE filter mode;
+ * the sources in the
+ * Requested List are
+ * moved to the Include
+ * List, and the Exclude
+ * List is deleted.
+ */
+static void amt_group_work(struct work_struct *work)
+{
+ struct amt_group_node *gnode = container_of(to_delayed_work(work),
+ struct amt_group_node,
+ group_timer);
+ struct amt_tunnel_list *tunnel = gnode->tunnel_list;
+ struct amt_dev *amt = gnode->amt;
+ struct amt_source_node *snode;
+ bool delete_group = true;
+ struct hlist_node *t;
+ int i, buckets;
+
+ buckets = amt->hash_buckets;
+
+ spin_lock_bh(&tunnel->lock);
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+ /* Not Used */
+ spin_unlock_bh(&tunnel->lock);
+ goto out;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < buckets; i++) {
+ hlist_for_each_entry_safe(snode, t,
+ &gnode->sources[i], node) {
+ if (!delayed_work_pending(&snode->source_timer) ||
+ snode->status == AMT_SOURCE_STATUS_D_FWD) {
+ amt_destroy_source(snode);
+ } else {
+ delete_group = false;
+ snode->status = AMT_SOURCE_STATUS_FWD;
+ }
+ }
+ }
+ if (delete_group)
+ amt_del_group(amt, gnode);
+ else
+ gnode->filter_mode = MCAST_INCLUDE;
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+out:
+ dev_put(amt->dev);
+}
+
+/* Non-existant group is created as INCLUDE {empty}:
+ *
+ * RFC 3376 - 5.1. Action on Change of Interface State
+ *
+ * If no interface state existed for that multicast address before
+ * the change (i.e., the change consisted of creating a new
+ * per-interface record), or if no state exists after the change
+ * (i.e., the change consisted of deleting a per-interface record),
+ * then the "non-existent" state is considered to have a filter mode
+ * of INCLUDE and an empty source list.
+ */
+static struct amt_group_node *amt_add_group(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ union amt_addr *group,
+ union amt_addr *host,
+ bool v6)
+{
+ struct amt_group_node *gnode;
+ u32 hash;
+ int i;
+
+ if (tunnel->nr_groups >= amt->max_groups)
+ return ERR_PTR(-ENOSPC);
+
+ gnode = kzalloc(sizeof(*gnode) +
+ (sizeof(struct hlist_head) * amt->hash_buckets),
+ GFP_ATOMIC);
+ if (unlikely(!gnode))
+ return ERR_PTR(-ENOMEM);
+
+ gnode->amt = amt;
+ gnode->group_addr = *group;
+ gnode->host_addr = *host;
+ gnode->v6 = v6;
+ gnode->tunnel_list = tunnel;
+ gnode->filter_mode = MCAST_INCLUDE;
+ INIT_HLIST_NODE(&gnode->node);
+ INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
+ for (i = 0; i < amt->hash_buckets; i++)
+ INIT_HLIST_HEAD(&gnode->sources[i]);
+
+ hash = amt_group_hash(tunnel, group);
+ hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
+ tunnel->nr_groups++;
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Join group %pI4\n",
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Join group %pI6\n",
+ &gnode->group_addr.ip6);
+#endif
+
+ return gnode;
+}
+
+static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
+{
+ u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
+ int hlen = LL_RESERVED_SPACE(amt->dev);
+ int tlen = amt->dev->needed_tailroom;
+ struct igmpv3_query *ihv3;
+ void *csum_start = NULL;
+ __sum16 *csum = NULL;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ unsigned int len;
+ int offset;
+
+ len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hlen);
+ skb_push(skb, sizeof(*eth));
+ skb->protocol = htons(ETH_P_IP);
+ skb_reset_mac_header(skb);
+ skb->priority = TC_PRIO_CONTROL;
+ skb_put(skb, sizeof(*iph));
+ skb_put_data(skb, ra, sizeof(ra));
+ skb_put(skb, sizeof(*ihv3));
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
+ iph->tos = AMT_TOS;
+ iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
+ iph->frag_off = htons(IP_DF);
+ iph->ttl = 1;
+ iph->id = 0;
+ iph->protocol = IPPROTO_IGMP;
+ iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
+ iph->saddr = htonl(INADDR_ANY);
+ ip_send_check(iph);
+
+ eth = eth_hdr(skb);
+ ether_addr_copy(eth->h_source, amt->dev->dev_addr);
+ ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
+ eth->h_proto = htons(ETH_P_IP);
+
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
+ ihv3->code = 1;
+ ihv3->group = 0;
+ ihv3->qqic = amt->qi;
+ ihv3->nsrcs = 0;
+ ihv3->resv = 0;
+ ihv3->suppress = false;
+ ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ ihv3->csum = 0;
+ csum = &ihv3->csum;
+ csum_start = (void *)ihv3;
+ *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
+
+ return skb;
+}
+
+static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
+{
+ if (validate && amt->status >= status)
+ return;
+ netdev_dbg(amt->dev, "Update GW status %s -> %s",
+ status_str[amt->status], status_str[status]);
+ amt->status = status;
+}
+
+static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
+ enum amt_status status,
+ bool validate)
+{
+ if (validate && tunnel->status >= status)
+ return;
+ netdev_dbg(tunnel->amt->dev,
+ "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
+ &tunnel->ip4, ntohs(tunnel->source_port),
+ status_str[tunnel->status], status_str[status]);
+ tunnel->status = status;
+}
+
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
+{
+ spin_lock_bh(&amt->lock);
+ __amt_update_gw_status(amt, status, validate);
+ spin_unlock_bh(&amt->lock);
+}
+
+static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
+ enum amt_status status, bool validate)
+{
+ spin_lock_bh(&tunnel->lock);
+ __amt_update_relay_status(tunnel, status, validate);
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_send_discovery(struct amt_dev *amt)
+{
+ struct amt_header_discovery *amtd;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ amt->discovery_ip, amt->local_ip,
+ amt->gw_port, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amtd->version = 0;
+ amtd->type = AMT_MSG_DISCOVERY;
+ amtd->reserved = 0;
+ amtd->nonce = amt->nonce;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->gw_port;
+ udph->dest = amt->relay_port;
+ udph->len = htons(sizeof(*udph) + sizeof(*amtd));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
+ sizeof(*udph) + sizeof(*amtd),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = amt->discovery_ip;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+ spin_lock_bh(&amt->lock);
+ __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
+ spin_unlock_bh(&amt->lock);
+out:
+ rcu_read_unlock();
+}
+
+static void amt_send_request(struct amt_dev *amt, bool v6)
+{
+ struct amt_header_request *amtrh;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ amt->remote_ip, amt->local_ip,
+ amt->gw_port, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amtrh->version = 0;
+ amtrh->type = AMT_MSG_REQUEST;
+ amtrh->reserved1 = 0;
+ amtrh->p = v6;
+ amtrh->reserved2 = 0;
+ amtrh->nonce = amt->nonce;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->gw_port;
+ udph->dest = amt->relay_port;
+ udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
+ sizeof(*udph) + sizeof(*amtrh),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = amt->remote_ip;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+out:
+ rcu_read_unlock();
+}
+
+static void amt_send_igmp_gq(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel)
+{
+ struct sk_buff *skb;
+
+ skb = amt_build_igmp_gq(amt);
+ if (!skb)
+ return;
+
+ amt_skb_cb(skb)->tunnel = tunnel;
+ dev_queue_xmit(skb);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
+{
+ u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
+ 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
+ int hlen = LL_RESERVED_SPACE(amt->dev);
+ int tlen = amt->dev->needed_tailroom;
+ struct mld2_query *mld2q;
+ void *csum_start = NULL;
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ u32 len;
+
+ len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hlen);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->priority = TC_PRIO_CONTROL;
+ skb->protocol = htons(ETH_P_IPV6);
+ skb_put_zero(skb, sizeof(*ip6h));
+ skb_put_data(skb, ra, sizeof(ra));
+ skb_put_zero(skb, sizeof(*mld2q));
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
+ ip6h->nexthdr = NEXTHDR_HOP;
+ ip6h->hop_limit = 1;
+ ip6h->daddr = mld2_all_node;
+ ip6_flow_hdr(ip6h, 0, 0);
+
+ if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
+ &ip6h->saddr)) {
+ amt->dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ eth->h_proto = htons(ETH_P_IPV6);
+ ether_addr_copy(eth->h_source, amt->dev->dev_addr);
+ ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
+
+ skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
+ skb_reset_transport_header(skb);
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ mld2q->mld2q_mrc = htons(1);
+ mld2q->mld2q_type = ICMPV6_MGM_QUERY;
+ mld2q->mld2q_code = 0;
+ mld2q->mld2q_cksum = 0;
+ mld2q->mld2q_resv1 = 0;
+ mld2q->mld2q_resv2 = 0;
+ mld2q->mld2q_suppress = 0;
+ mld2q->mld2q_qrv = amt->qrv;
+ mld2q->mld2q_nsrcs = 0;
+ mld2q->mld2q_qqic = amt->qi;
+ csum_start = (void *)mld2q;
+ mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ sizeof(*mld2q),
+ IPPROTO_ICMPV6,
+ csum_partial(csum_start,
+ sizeof(*mld2q), 0));
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
+ return skb;
+}
+
+static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
+{
+ struct sk_buff *skb;
+
+ skb = amt_build_mld_gq(amt);
+ if (!skb)
+ return;
+
+ amt_skb_cb(skb)->tunnel = tunnel;
+ dev_queue_xmit(skb);
+}
+#else
+static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
+{
+}
+#endif
+
+static void amt_secret_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ secret_wq);
+
+ spin_lock_bh(&amt->lock);
+ get_random_bytes(&amt->key, sizeof(siphash_key_t));
+ spin_unlock_bh(&amt->lock);
+ mod_delayed_work(amt_wq, &amt->secret_wq,
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT));
+}
+
+static void amt_discovery_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ discovery_wq);
+
+ spin_lock_bh(&amt->lock);
+ if (amt->status > AMT_STATUS_SENT_DISCOVERY)
+ goto out;
+ get_random_bytes(&amt->nonce, sizeof(__be32));
+ spin_unlock_bh(&amt->lock);
+
+ amt_send_discovery(amt);
+ spin_lock_bh(&amt->lock);
+out:
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+ spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+ u32 exp;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ goto out;
+
+ if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) {
+ netdev_dbg(amt->dev, "Gateway is not ready");
+ amt->qi = AMT_INIT_REQ_TIMEOUT;
+ amt->ready4 = false;
+ amt->ready6 = false;
+ amt->remote_ip = 0;
+ __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt->req_cnt = 0;
+ }
+ spin_unlock_bh(&amt->lock);
+
+ amt_send_request(amt, false);
+ amt_send_request(amt, true);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
+ spin_lock_bh(&amt->lock);
+out:
+ exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
+ mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
+ spin_unlock_bh(&amt->lock);
+}
+
+static bool amt_send_membership_update(struct amt_dev *amt,
+ struct sk_buff *skb,
+ bool v6)
+{
+ struct amt_header_membership_update *amtmu;
+ struct socket *sock;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ int err;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return true;
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
+ sizeof(*iph) + sizeof(struct udphdr));
+ if (err)
+ return true;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = amt->remote_ip;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
+ return true;
+ }
+
+ amtmu = skb_push(skb, sizeof(*amtmu));
+ amtmu->version = 0;
+ amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
+ amtmu->reserved = 0;
+ amtmu->nonce = amt->nonce;
+ amtmu->response_mac = amt->mac;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->gw_port,
+ amt->relay_port,
+ false,
+ false);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
+ return false;
+}
+
+static void amt_send_multicast_data(struct amt_dev *amt,
+ const struct sk_buff *oskb,
+ struct amt_tunnel_list *tunnel,
+ bool v6)
+{
+ struct amt_header_mcast_data *amtmd;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return;
+
+ skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
+ sizeof(struct udphdr), 0, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = tunnel->ip4;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
+ kfree_skb(skb);
+ return;
+ }
+
+ amtmd = skb_push(skb, sizeof(*amtmd));
+ amtmd->version = 0;
+ amtmd->reserved = 0;
+ amtmd->type = AMT_MSG_MULTICAST_DATA;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->relay_port,
+ tunnel->source_port,
+ false,
+ false);
+}
+
+static bool amt_send_membership_query(struct amt_dev *amt,
+ struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel,
+ bool v6)
+{
+ struct amt_header_membership_query *amtmq;
+ struct socket *sock;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ int err;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return true;
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
+ sizeof(struct iphdr) + sizeof(struct udphdr));
+ if (err)
+ return true;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = tunnel->ip4;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
+ return -1;
+ }
+
+ amtmq = skb_push(skb, sizeof(*amtmq));
+ amtmq->version = 0;
+ amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
+ amtmq->reserved = 0;
+ amtmq->l = 0;
+ amtmq->g = 0;
+ amtmq->nonce = tunnel->nonce;
+ amtmq->response_mac = tunnel->mac;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->relay_port,
+ tunnel->source_port,
+ false,
+ false);
+ amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
+ return false;
+}
+
+static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ struct amt_tunnel_list *tunnel;
+ struct amt_group_node *gnode;
+ union amt_addr group = {0,};
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h;
+ struct mld_msg *mld;
+#endif
+ bool report = false;
+ struct igmphdr *ih;
+ bool query = false;
+ struct iphdr *iph;
+ bool data = false;
+ bool v6 = false;
+ u32 hash;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ goto free;
+
+ if (!ip_mc_check_igmp(skb)) {
+ ih = igmp_hdr(skb);
+ switch (ih->type) {
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ report = true;
+ break;
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ query = true;
+ break;
+ default:
+ goto free;
+ }
+ } else {
+ data = true;
+ }
+ v6 = false;
+ group.ip4 = iph->daddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ goto free;
+
+ if (!ipv6_mc_check_mld(skb)) {
+ mld = (struct mld_msg *)skb_transport_header(skb);
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MLD2_REPORT:
+ report = true;
+ break;
+ case ICMPV6_MGM_QUERY:
+ query = true;
+ break;
+ default:
+ goto free;
+ }
+ } else {
+ data = true;
+ }
+ v6 = true;
+ group.ip6 = ip6h->daddr;
+#endif
+ } else {
+ dev->stats.tx_errors++;
+ goto free;
+ }
+
+ if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
+ goto free;
+
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ /* Gateway only passes IGMP/MLD packets */
+ if (!report)
+ goto free;
+ if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+ goto free;
+ if (amt_send_membership_update(amt, skb, v6))
+ goto free;
+ goto unlock;
+ } else if (amt->mode == AMT_MODE_RELAY) {
+ if (query) {
+ tunnel = amt_skb_cb(skb)->tunnel;
+ if (!tunnel) {
+ WARN_ON(1);
+ goto free;
+ }
+
+ /* Do not forward unexpected query */
+ if (amt_send_membership_query(amt, skb, tunnel, v6))
+ goto free;
+ goto unlock;
+ }
+
+ if (!data)
+ goto free;
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+ hash = amt_group_hash(tunnel, &group);
+ hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
+ node) {
+ if (!v6) {
+ if (gnode->group_addr.ip4 == iph->daddr)
+ goto found;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (ipv6_addr_equal(&gnode->group_addr.ip6,
+ &ip6h->daddr))
+ goto found;
+#endif
+ }
+ }
+ continue;
+found:
+ amt_send_multicast_data(amt, skb, tunnel, v6);
+ }
+ }
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+free:
+ dev_kfree_skb(skb);
+unlock:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int amt_parse_type(struct sk_buff *skb)
+{
+ struct amt_header *amth;
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr) +
+ sizeof(struct amt_header)))
+ return -1;
+
+ amth = (struct amt_header *)(udp_hdr(skb) + 1);
+
+ if (amth->version != 0)
+ return -1;
+
+ if (amth->type >= __AMT_MSG_MAX || !amth->type)
+ return -1;
+ return amth->type;
+}
+
+static void amt_clear_groups(struct amt_tunnel_list *tunnel)
+{
+ struct amt_dev *amt = tunnel->amt;
+ struct amt_group_node *gnode;
+ struct hlist_node *t;
+ int i;
+
+ spin_lock_bh(&tunnel->lock);
+ rcu_read_lock();
+ for (i = 0; i < amt->hash_buckets; i++)
+ hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
+ amt_del_group(amt, gnode);
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_tunnel_expire(struct work_struct *work)
+{
+ struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
+ struct amt_tunnel_list,
+ gc_wq);
+ struct amt_dev *amt = tunnel->amt;
+
+ spin_lock_bh(&amt->lock);
+ rcu_read_lock();
+ list_del_rcu(&tunnel->list);
+ amt->nr_tunnels--;
+ amt_clear_groups(tunnel);
+ rcu_read_unlock();
+ spin_unlock_bh(&amt->lock);
+ kfree_rcu(tunnel, rcu);
+}
+
+static void amt_cleanup_srcs(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode)
+{
+ struct amt_source_node *snode;
+ struct hlist_node *t;
+ int i;
+
+ /* Delete old sources */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
+ if (snode->flags == AMT_SOURCE_OLD)
+ amt_destroy_source(snode);
+ }
+ }
+
+ /* switch from new to old */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
+ snode->flags = AMT_SOURCE_OLD;
+ if (!gnode->v6)
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as OLD %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as OLD %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+ }
+}
+
+static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode, void *grec,
+ bool v6)
+{
+ struct igmpv3_grec *igmp_grec;
+ struct amt_source_node *snode;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct mld2_grec *mld_grec;
+#endif
+ union amt_addr src = {0,};
+ u16 nsrcs;
+ u32 hash;
+ int i;
+
+ if (!v6) {
+ igmp_grec = (struct igmpv3_grec *)grec;
+ nsrcs = ntohs(igmp_grec->grec_nsrcs);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ mld_grec = (struct mld2_grec *)grec;
+ nsrcs = ntohs(mld_grec->grec_nsrcs);
+#else
+ return;
+#endif
+ }
+ for (i = 0; i < nsrcs; i++) {
+ if (tunnel->nr_sources >= amt->max_sources)
+ return;
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
+ continue;
+
+ snode = amt_alloc_snode(gnode, &src);
+ if (snode) {
+ hash = amt_source_hash(tunnel, &snode->source_addr);
+ hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
+ tunnel->nr_sources++;
+ gnode->nr_sources++;
+
+ if (!gnode->v6)
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as NEW %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as NEW %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+ }
+}
+
+/* Router State Report Rec'd New Router State
+ * ------------ ------------ ----------------
+ * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
+ *
+ * -----------+-----------+-----------+
+ * | OLD | NEW |
+ * -----------+-----------+-----------+
+ * FWD | X | X+A |
+ * -----------+-----------+-----------+
+ * D_FWD | Y | Y-A |
+ * -----------+-----------+-----------+
+ * NONE | | A |
+ * -----------+-----------+-----------+
+ *
+ * a) Received sources are NONE/NEW
+ * b) All NONE will be deleted by amt_cleanup_srcs().
+ * c) All OLD will be deleted by amt_cleanup_srcs().
+ * d) After delete, NEW source will be switched to OLD.
+ */
+static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec,
+ enum amt_ops ops,
+ enum amt_filter filter,
+ enum amt_act act,
+ bool v6)
+{
+ struct amt_dev *amt = tunnel->amt;
+ struct amt_source_node *snode;
+ struct igmpv3_grec *igmp_grec;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct mld2_grec *mld_grec;
+#endif
+ union amt_addr src = {0,};
+ struct hlist_node *t;
+ u16 nsrcs;
+ int i, j;
+
+ if (!v6) {
+ igmp_grec = (struct igmpv3_grec *)grec;
+ nsrcs = ntohs(igmp_grec->grec_nsrcs);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ mld_grec = (struct mld2_grec *)grec;
+ nsrcs = ntohs(mld_grec->grec_nsrcs);
+#else
+ return;
+#endif
+ }
+
+ memset(&src, 0, sizeof(union amt_addr));
+ switch (ops) {
+ case AMT_OPS_INT:
+ /* A*B */
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, filter, &src);
+ if (!snode)
+ continue;
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ break;
+ case AMT_OPS_UNI:
+ /* A+B */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
+ node) {
+ if (amt_status_filter(snode, filter))
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ }
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, filter, &src);
+ if (!snode)
+ continue;
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ break;
+ case AMT_OPS_SUB:
+ /* A-B */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
+ node) {
+ if (!amt_status_filter(snode, filter))
+ continue;
+ for (j = 0; j < nsrcs; j++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[j];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6,
+ &mld_grec->grec_src[j],
+ sizeof(struct in6_addr));
+#endif
+ if (amt_addr_equal(&snode->source_addr,
+ &src))
+ goto out_sub;
+ }
+ amt_act_src(tunnel, gnode, snode, act);
+ continue;
+out_sub:;
+ }
+ }
+ break;
+ case AMT_OPS_SUB_REV:
+ /* B-A */
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
+ &src);
+ if (!snode) {
+ snode = amt_lookup_src(tunnel, gnode,
+ filter, &src);
+ if (snode)
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ }
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type\n");
+ return;
+ }
+}
+
+static void amt_mcast_is_in_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
+ */
+ /* Update IS_IN (B) as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update INCLUDE (A) as NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+ /* Update (A) in (X, Y) as NONE/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_BOTH,
+ AMT_ACT_STATUS_NONE_NEW,
+ v6);
+ /* Update FWD/OLD as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update IS_IN (A) as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ }
+}
+
+static void amt_mcast_is_ex_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE(A*B, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE(, B-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (B-A)=0 */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_ACT_GMI_ZERO,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ gnode->filter_mode = MCAST_EXCLUDE;
+ /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE (A-Y, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y*A ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A-X-Y)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH_NEW,
+ AMT_ACT_GMI,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
+ }
+}
+
+static void amt_mcast_to_in_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
+ * Send Q(G,A-B)
+ */
+ /* Update TO_IN (B) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update INCLUDE (A) sources as NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ * Send Q(G,X-A)
+ * Send Q(G)
+ */
+ /* Update TO_IN (A) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update EXCLUDE(X,) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y-A)
+ * (A) are already switched to FWD_NEW.
+ * So, D_FWD/OLD -> D_FWD/NEW is okay.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A)=GMI
+ * Only FWD_NEW will have (A) sources.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ }
+}
+
+static void amt_mcast_to_ex_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Send Q(G,A*B)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE (A*B, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, B-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (B-A)=0 */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_ACT_GMI_ZERO,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ gnode->filter_mode = MCAST_EXCLUDE;
+ /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Send Q(G,A-Y)
+ * Group Timer=GMI
+ */
+ /* Update (A-X-Y) as NONE/OLD */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH,
+ AMT_ACT_GT,
+ v6);
+ /* EXCLUDE (A-Y, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y*A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
+ }
+}
+
+static void amt_mcast_allow_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
+ */
+ /* INCLUDE (A+B) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+ /* EXCLUDE (X+A, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A)=GMI
+ * All (A) source are now FWD/NEW status.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ }
+}
+
+static void amt_mcast_block_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
+ */
+ /* INCLUDE (A) */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
+ * Send Q(G,A-Y)
+ */
+ /* (A-X-Y)=Group Timer */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH,
+ AMT_ACT_GT,
+ v6);
+ /* EXCLUDE (X, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (X+(A-Y) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ }
+}
+
+/* RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = ih->group;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host, false);
+ if (!IS_ERR(gnode)) {
+ gnode->filter_mode = MCAST_EXCLUDE;
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ }
+ }
+}
+
+/* RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = ih->group;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (gnode)
+ amt_del_group(amt, gnode);
+}
+
+static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
+ int len = skb_transport_offset(skb) + sizeof(*ihrv3);
+ void *zero_grec = (void *)&igmpv3_zero_grec;
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+ struct igmpv3_grec *grec;
+ u16 nsrcs;
+ int i;
+
+ for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
+ len += sizeof(*grec);
+ if (!ip_mc_may_pull(skb, len))
+ break;
+
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ nsrcs = ntohs(grec->grec_nsrcs);
+
+ len += nsrcs * sizeof(__be32);
+ if (!ip_mc_may_pull(skb, len))
+ break;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = grec->grec_mca;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host,
+ false);
+ if (IS_ERR(gnode))
+ continue;
+ }
+
+ amt_add_srcs(amt, tunnel, gnode, grec, false);
+ switch (grec->grec_type) {
+ case IGMPV3_MODE_IS_INCLUDE:
+ amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_MODE_IS_EXCLUDE:
+ amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_CHANGE_TO_INCLUDE:
+ amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_CHANGE_TO_EXCLUDE:
+ amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_ALLOW_NEW_SOURCES:
+ amt_mcast_allow_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_BLOCK_OLD_SOURCES:
+ amt_mcast_block_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ default:
+ break;
+ }
+ amt_cleanup_srcs(amt, tunnel, gnode);
+ }
+}
+
+/* caller held tunnel->lock */
+static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+
+ switch (ih->type) {
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ amt_igmpv3_report_handler(amt, skb, tunnel);
+ break;
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ amt_igmpv2_report_handler(amt, skb, tunnel);
+ break;
+ case IGMP_HOST_LEAVE_MESSAGE:
+ amt_igmpv2_leave_handler(amt, skb, tunnel);
+ break;
+ default:
+ break;
+ }
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* RFC 3810
+ * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
+ *
+ * When Multicast Address Compatibility Mode is MLDv2, a router acts
+ * using the MLDv2 protocol for that multicast address. When Multicast
+ * Address Compatibility Mode is MLDv1, a router internally translates
+ * the following MLDv1 messages for that multicast address to their
+ * MLDv2 equivalents:
+ *
+ * MLDv1 Message MLDv2 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Done TO_IN( {} )
+ */
+static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
+ memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
+
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host, true);
+ if (!IS_ERR(gnode)) {
+ gnode->filter_mode = MCAST_EXCLUDE;
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ }
+ }
+}
+
+/* RFC 3810
+ * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
+ *
+ * When Multicast Address Compatibility Mode is MLDv2, a router acts
+ * using the MLDv2 protocol for that multicast address. When Multicast
+ * Address Compatibility Mode is MLDv1, a router internally translates
+ * the following MLDv1 messages for that multicast address to their
+ * MLDv2 equivalents:
+ *
+ * MLDv1 Message MLDv2 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Done TO_IN( {} )
+ */
+static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (gnode) {
+ amt_del_group(amt, gnode);
+ return;
+ }
+}
+
+static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
+ int len = skb_transport_offset(skb) + sizeof(*mld2r);
+ void *zero_grec = (void *)&mldv2_zero_grec;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+ struct mld2_grec *grec;
+ u16 nsrcs;
+ int i;
+
+ for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
+ len += sizeof(*grec);
+ if (!ipv6_mc_may_pull(skb, len))
+ break;
+
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ nsrcs = ntohs(grec->grec_nsrcs);
+
+ len += nsrcs * sizeof(struct in6_addr);
+ if (!ipv6_mc_may_pull(skb, len))
+ break;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip6 = grec->grec_mca;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip6 = ip6h->saddr;
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host,
+ ETH_P_IPV6);
+ if (IS_ERR(gnode))
+ continue;
+ }
+
+ amt_add_srcs(amt, tunnel, gnode, grec, true);
+ switch (grec->grec_type) {
+ case MLD2_MODE_IS_INCLUDE:
+ amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_MODE_IS_EXCLUDE:
+ amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_CHANGE_TO_INCLUDE:
+ amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_CHANGE_TO_EXCLUDE:
+ amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_ALLOW_NEW_SOURCES:
+ amt_mcast_allow_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_BLOCK_OLD_SOURCES:
+ amt_mcast_block_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ default:
+ break;
+ }
+ amt_cleanup_srcs(amt, tunnel, gnode);
+ }
+}
+
+/* caller held tunnel->lock */
+static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REPORT:
+ amt_mldv1_report_handler(amt, skb, tunnel);
+ break;
+ case ICMPV6_MLD2_REPORT:
+ amt_mldv2_report_handler(amt, skb, tunnel);
+ break;
+ case ICMPV6_MGM_REDUCTION:
+ amt_mldv1_leave_handler(amt, skb, tunnel);
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_advertisement *amta;
+ int hdr_size;
+
+ hdr_size = sizeof(*amta) - sizeof(struct amt_header);
+
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
+ if (!amta->ip4)
+ return true;
+
+ if (amta->reserved || amta->version)
+ return true;
+
+ if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
+ ipv4_is_zeronet(amta->ip4))
+ return true;
+
+ amt->remote_ip = amta->ip4;
+ netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
+ return false;
+}
+
+static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_mcast_data *amtmd;
+ int hdr_size, len, err;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
+ if (amtmd->reserved || amtmd->version)
+ return true;
+
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
+ return true;
+ skb_reset_network_header(skb);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = eth_hdr(skb);
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h;
+
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ return true;
+ }
+
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ err = gro_cells_receive(&amt->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(amt->dev, len);
+ else
+ amt->dev->stats.rx_dropped++;
+
+ return false;
+}
+
+static bool amt_membership_query_handler(struct amt_dev *amt,
+ struct sk_buff *skb)
+{
+ struct amt_header_membership_query *amtmq;
+ struct igmpv3_query *ihv3;
+ struct ethhdr *eth, *oeth;
+ struct iphdr *iph;
+ int hdr_size, len;
+
+ hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
+
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
+ if (amtmq->reserved || amtmq->version)
+ return true;
+
+ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
+ return true;
+ oeth = eth_hdr(skb);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ eth = eth_hdr(skb);
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+ if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
+ sizeof(*ihv3)))
+ return true;
+
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ spin_lock_bh(&amt->lock);
+ amt->ready4 = true;
+ amt->mac = amtmq->response_mac;
+ amt->req_cnt = 0;
+ amt->qi = ihv3->qqic;
+ spin_unlock_bh(&amt->lock);
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct mld2_query *mld2q;
+
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+ if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
+ sizeof(*mld2q)))
+ return true;
+
+ mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ spin_lock_bh(&amt->lock);
+ amt->ready6 = true;
+ amt->mac = amtmq->response_mac;
+ amt->req_cnt = 0;
+ amt->qi = mld2q->mld2q_qqic;
+ spin_unlock_bh(&amt->lock);
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ return true;
+ }
+
+ ether_addr_copy(eth->h_source, oeth->h_source);
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
+ dev_sw_netstats_rx_add(amt->dev, len);
+ } else {
+ amt->dev->stats.rx_dropped++;
+ }
+
+ return false;
+}
+
+static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_membership_update *amtmu;
+ struct amt_tunnel_list *tunnel;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ int len;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+
+ if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
+ false, false))
+ return true;
+
+ amtmu = (struct amt_header_membership_update *)skb->data;
+ if (amtmu->reserved || amtmu->version)
+ return true;
+
+ skb_pull(skb, sizeof(*amtmu));
+ skb_reset_network_header(skb);
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+ if (tunnel->ip4 == iph->saddr) {
+ if ((amtmu->nonce == tunnel->nonce &&
+ amtmu->response_mac == tunnel->mac)) {
+ mod_delayed_work(amt_wq, &tunnel->gc_wq,
+ msecs_to_jiffies(amt_gmi(amt))
+ * 3);
+ goto report;
+ } else {
+ netdev_dbg(amt->dev, "Invalid MAC\n");
+ return true;
+ }
+ }
+ }
+
+ return false;
+
+report:
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (ip_mc_check_igmp(skb)) {
+ netdev_dbg(amt->dev, "Invalid IGMP\n");
+ return true;
+ }
+
+ spin_lock_bh(&tunnel->lock);
+ amt_igmp_report_handler(amt, skb, tunnel);
+ spin_unlock_bh(&tunnel->lock);
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ if (ipv6_mc_check_mld(skb)) {
+ netdev_dbg(amt->dev, "Invalid MLD\n");
+ return true;
+ }
+
+ spin_lock_bh(&tunnel->lock);
+ amt_mld_report_handler(amt, skb, tunnel);
+ spin_unlock_bh(&tunnel->lock);
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ netdev_dbg(amt->dev, "Unsupported Protocol\n");
+ return true;
+ }
+
+ skb_pull(skb, sizeof(struct ethhdr));
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
+ true);
+ dev_sw_netstats_rx_add(amt->dev, len);
+ } else {
+ amt->dev->stats.rx_dropped++;
+ }
+
+ return false;
+}
+
+static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
+ __be32 daddr, __be16 dport)
+{
+ struct amt_header_advertisement *amta;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ daddr, amt->local_ip,
+ dport, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amta->version = 0;
+ amta->type = AMT_MSG_ADVERTISEMENT;
+ amta->reserved = 0;
+ amta->nonce = nonce;
+ amta->ip4 = amt->local_ip;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->relay_port;
+ udph->dest = dport;
+ udph->len = htons(sizeof(*amta) + sizeof(*udph));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
+ sizeof(*udph) + sizeof(*amta),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = daddr;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+out:
+ rcu_read_unlock();
+}
+
+static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_discovery *amtd;
+ struct udphdr *udph;
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
+ return true;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+ amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
+
+ if (amtd->reserved || amtd->version)
+ return true;
+
+ amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
+
+ return false;
+}
+
+static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_request *amtrh;
+ struct amt_tunnel_list *tunnel;
+ unsigned long long key;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ u64 mac;
+ int i;
+
+ if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
+ return true;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+ amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
+
+ if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
+ return true;
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
+ if (tunnel->ip4 == iph->saddr)
+ goto send;
+
+ if (amt->nr_tunnels >= amt->max_tunnels) {
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ return true;
+ }
+
+ tunnel = kzalloc(sizeof(*tunnel) +
+ (sizeof(struct hlist_head) * amt->hash_buckets),
+ GFP_ATOMIC);
+ if (!tunnel)
+ return true;
+
+ tunnel->source_port = udph->source;
+ tunnel->ip4 = iph->saddr;
+
+ memcpy(&key, &tunnel->key, sizeof(unsigned long long));
+ tunnel->amt = amt;
+ spin_lock_init(&tunnel->lock);
+ for (i = 0; i < amt->hash_buckets; i++)
+ INIT_HLIST_HEAD(&tunnel->groups[i]);
+
+ INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
+
+ spin_lock_bh(&amt->lock);
+ list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
+ tunnel->key = amt->key;
+ amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ amt->nr_tunnels++;
+ mod_delayed_work(amt_wq, &tunnel->gc_wq,
+ msecs_to_jiffies(amt_gmi(amt)));
+ spin_unlock_bh(&amt->lock);
+
+send:
+ tunnel->nonce = amtrh->nonce;
+ mac = siphash_3u32((__force u32)tunnel->ip4,
+ (__force u32)tunnel->source_port,
+ (__force u32)tunnel->nonce,
+ &tunnel->key);
+ tunnel->mac = mac >> 16;
+
+ if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
+ return true;
+
+ if (!amtrh->p)
+ amt_send_igmp_gq(amt, tunnel);
+ else
+ amt_send_mld_gq(amt, tunnel);
+
+ return false;
+}
+
+static int amt_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct amt_dev *amt;
+ struct iphdr *iph;
+ int type;
+ bool err;
+
+ rcu_read_lock_bh();
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt) {
+ err = true;
+ goto out;
+ }
+
+ skb->dev = amt->dev;
+ iph = ip_hdr(skb);
+ type = amt_parse_type(skb);
+ if (type == -1) {
+ err = true;
+ goto drop;
+ }
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ if (iph->saddr != amt->discovery_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ if (amt_advertisement_handler(amt, skb))
+ amt->dev->stats.rx_dropped++;
+ goto out;
+ case AMT_MSG_MULTICAST_DATA:
+ if (iph->saddr != amt->remote_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ err = amt_multicast_data_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ if (iph->saddr != amt->remote_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ err = amt_membership_query_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ default:
+ err = true;
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ } else {
+ switch (type) {
+ case AMT_MSG_DISCOVERY:
+ err = amt_discovery_handler(amt, skb);
+ break;
+ case AMT_MSG_REQUEST:
+ err = amt_request_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_UPDATE:
+ err = amt_update_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ default:
+ err = true;
+ netdev_dbg(amt->dev, "Invalid type of relay\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+out:
+ rcu_read_unlock_bh();
+ return 0;
+}
+
+static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct amt_dev *amt;
+ int type;
+
+ rcu_read_lock_bh();
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt)
+ goto drop;
+
+ if (amt->mode != AMT_MODE_GATEWAY)
+ goto drop;
+
+ type = amt_parse_type(skb);
+ if (type == -1)
+ goto drop;
+
+ netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
+ type_str[type]);
+ switch (type) {
+ case AMT_MSG_DISCOVERY:
+ break;
+ case AMT_MSG_REQUEST:
+ case AMT_MSG_MEMBERSHIP_UPDATE:
+ if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+ break;
+ default:
+ goto drop;
+ }
+ rcu_read_unlock_bh();
+ return 0;
+drop:
+ rcu_read_unlock_bh();
+ amt->dev->stats.rx_dropped++;
+ return 0;
+}
+
+static struct socket *amt_create_sock(struct net *net, __be16 port)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ udp_conf.local_udp_port = port;
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+static int amt_socket_create(struct amt_dev *amt)
+{
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+ struct socket *sock;
+
+ sock = amt_create_sock(amt->net, amt->relay_port);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ /* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
+ tunnel_cfg.sk_user_data = amt;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = amt_rcv;
+ tunnel_cfg.encap_err_lookup = amt_err_lookup;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
+
+ rcu_assign_pointer(amt->sock, sock);
+ return 0;
+}
+
+static int amt_dev_open(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err;
+
+ amt->ready4 = false;
+ amt->ready6 = false;
+
+ err = amt_socket_create(amt);
+ if (err)
+ return err;
+
+ amt->req_cnt = 0;
+ amt->remote_ip = 0;
+ get_random_bytes(&amt->key, sizeof(siphash_key_t));
+
+ amt->status = AMT_STATUS_INIT;
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+ } else if (amt->mode == AMT_MODE_RELAY) {
+ mod_delayed_work(amt_wq, &amt->secret_wq,
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT));
+ }
+ return err;
+}
+
+static int amt_dev_stop(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ struct amt_tunnel_list *tunnel, *tmp;
+ struct socket *sock;
+
+ cancel_delayed_work_sync(&amt->req_wq);
+ cancel_delayed_work_sync(&amt->discovery_wq);
+ cancel_delayed_work_sync(&amt->secret_wq);
+
+ /* shutdown */
+ sock = rtnl_dereference(amt->sock);
+ RCU_INIT_POINTER(amt->sock, NULL);
+ synchronize_net();
+ if (sock)
+ udp_tunnel_sock_release(sock);
+
+ amt->ready4 = false;
+ amt->ready6 = false;
+ amt->req_cnt = 0;
+ amt->remote_ip = 0;
+
+ list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
+ list_del_rcu(&tunnel->list);
+ amt->nr_tunnels--;
+ cancel_delayed_work_sync(&tunnel->gc_wq);
+ amt_clear_groups(tunnel);
+ kfree_rcu(tunnel, rcu);
+ }
+
+ return 0;
+}
+
+static const struct device_type amt_type = {
+ .name = "amt",
+};
+
+static int amt_dev_init(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err;
+
+ amt->dev = dev;
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&amt->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
+ return 0;
+}
+
+static void amt_dev_uninit(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ gro_cells_destroy(&amt->gro_cells);
+ free_percpu(dev->tstats);
+}
+
+static const struct net_device_ops amt_netdev_ops = {
+ .ndo_init = amt_dev_init,
+ .ndo_uninit = amt_dev_uninit,
+ .ndo_open = amt_dev_open,
+ .ndo_stop = amt_dev_stop,
+ .ndo_start_xmit = amt_dev_xmit,
+ .ndo_get_stats64 = dev_get_tstats64,
+};
+
+static void amt_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &amt_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &amt_type);
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ eth_hw_addr_random(dev);
+ eth_zero_addr(dev->broadcast);
+ ether_setup(dev);
+}
+
+static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
+ [IFLA_AMT_MODE] = { .type = NLA_U32 },
+ [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
+ [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
+ [IFLA_AMT_LINK] = { .type = NLA_U32 },
+ [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
+};
+
+static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!data[IFLA_AMT_LINK]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
+ "Link attribute is required");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_MODE]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
+ "Mode attribute is required");
+ return -EINVAL;
+ }
+
+ if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
+ "Mode attribute is not valid");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_LOCAL_IP]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
+ "Local attribute is required");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_DISCOVERY_IP] &&
+ nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
+ "Discovery attribute is required");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amt_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err = -EINVAL;
+
+ amt->net = net;
+ amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
+
+ if (data[IFLA_AMT_MAX_TUNNELS] &&
+ nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
+ amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
+ else
+ amt->max_tunnels = AMT_MAX_TUNNELS;
+
+ spin_lock_init(&amt->lock);
+ amt->max_groups = AMT_MAX_GROUP;
+ amt->max_sources = AMT_MAX_SOURCE;
+ amt->hash_buckets = AMT_HSIZE;
+ amt->nr_tunnels = 0;
+ get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
+ amt->stream_dev = dev_get_by_index(net,
+ nla_get_u32(data[IFLA_AMT_LINK]));
+ if (!amt->stream_dev) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
+ "Can't find stream device");
+ return -ENODEV;
+ }
+
+ if (amt->stream_dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
+ "Invalid stream device type");
+ goto err;
+ }
+
+ amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
+ if (ipv4_is_loopback(amt->local_ip) ||
+ ipv4_is_zeronet(amt->local_ip) ||
+ ipv4_is_multicast(amt->local_ip)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
+ "Invalid Local address");
+ goto err;
+ }
+
+ if (data[IFLA_AMT_RELAY_PORT])
+ amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
+ else
+ amt->relay_port = htons(IANA_AMT_UDP_PORT);
+
+ if (data[IFLA_AMT_GATEWAY_PORT])
+ amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
+ else
+ amt->gw_port = htons(IANA_AMT_UDP_PORT);
+
+ if (!amt->relay_port) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "relay port must not be 0");
+ goto err;
+ }
+ if (amt->mode == AMT_MODE_RELAY) {
+ amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ amt->qri = 10;
+ dev->needed_headroom = amt->stream_dev->needed_headroom +
+ AMT_RELAY_HLEN;
+ dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
+ dev->max_mtu = dev->mtu;
+ dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
+ } else {
+ if (!data[IFLA_AMT_DISCOVERY_IP]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "discovery must be set in gateway mode");
+ goto err;
+ }
+ if (!amt->gw_port) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "gateway port must not be 0");
+ goto err;
+ }
+ amt->remote_ip = 0;
+ amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
+ if (ipv4_is_loopback(amt->discovery_ip) ||
+ ipv4_is_zeronet(amt->discovery_ip) ||
+ ipv4_is_multicast(amt->discovery_ip)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "discovery must be unicast");
+ goto err;
+ }
+
+ dev->needed_headroom = amt->stream_dev->needed_headroom +
+ AMT_GW_HLEN;
+ dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
+ dev->max_mtu = dev->mtu;
+ dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
+ }
+ amt->qi = AMT_INIT_QUERY_INTERVAL;
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ netdev_dbg(dev, "failed to register new netdev %d\n", err);
+ goto err;
+ }
+
+ err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
+ if (err < 0) {
+ unregister_netdevice(dev);
+ goto err;
+ }
+
+ INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
+ INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
+ INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_LIST_HEAD(&amt->tunnel_list);
+
+ return 0;
+err:
+ dev_put(amt->stream_dev);
+ return err;
+}
+
+static void amt_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(amt->stream_dev, dev);
+ dev_put(amt->stream_dev);
+}
+
+static size_t amt_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
+ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
+ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
+ nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
+ nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
+ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
+ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
+ nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
+}
+
+static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
+ goto nla_put_failure;
+ if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
+ goto nla_put_failure;
+ if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
+ goto nla_put_failure;
+ if (amt->remote_ip)
+ if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops amt_link_ops __read_mostly = {
+ .kind = "amt",
+ .maxtype = IFLA_AMT_MAX,
+ .policy = amt_policy,
+ .priv_size = sizeof(struct amt_dev),
+ .setup = amt_link_setup,
+ .validate = amt_validate,
+ .newlink = amt_newlink,
+ .dellink = amt_dellink,
+ .get_size = amt_get_size,
+ .fill_info = amt_fill_info,
+};
+
+static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
+{
+ struct net_device *upper_dev;
+ struct amt_dev *amt;
+
+ for_each_netdev(dev_net(dev), upper_dev) {
+ if (netif_is_amt(upper_dev)) {
+ amt = netdev_priv(upper_dev);
+ if (amt->stream_dev == dev)
+ return upper_dev;
+ }
+ }
+
+ return NULL;
+}
+
+static int amt_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *upper_dev;
+ struct amt_dev *amt;
+ LIST_HEAD(list);
+ int new_mtu;
+
+ upper_dev = amt_lookup_upper_dev(dev);
+ if (!upper_dev)
+ return NOTIFY_DONE;
+ amt = netdev_priv(upper_dev);
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ amt_dellink(amt->dev, &list);
+ unregister_netdevice_many(&list);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (amt->mode == AMT_MODE_RELAY)
+ new_mtu = dev->mtu - AMT_RELAY_HLEN;
+ else
+ new_mtu = dev->mtu - AMT_GW_HLEN;
+
+ dev_set_mtu(amt->dev, new_mtu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block amt_notifier_block __read_mostly = {
+ .notifier_call = amt_device_event,
+};
+
+static int __init amt_init(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&amt_notifier_block);
+ if (err < 0)
+ goto err;
+
+ err = rtnl_link_register(&amt_link_ops);
+ if (err < 0)
+ goto unregister_notifier;
+
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+ if (!amt_wq)
+ goto rtnl_unregister;
+
+ spin_lock_init(&source_gc_lock);
+ spin_lock_bh(&source_gc_lock);
+ INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
+ mod_delayed_work(amt_wq, &source_gc_wq,
+ msecs_to_jiffies(AMT_GC_INTERVAL));
+ spin_unlock_bh(&source_gc_lock);
+
+ return 0;
+
+rtnl_unregister:
+ rtnl_link_unregister(&amt_link_ops);
+unregister_notifier:
+ unregister_netdevice_notifier(&amt_notifier_block);
+err:
+ pr_err("error loading AMT module loaded\n");
+ return err;
+}
+late_initcall(amt_init);
+
+static void __exit amt_fini(void)
+{
+ rtnl_link_unregister(&amt_link_ops);
+ unregister_netdevice_notifier(&amt_notifier_block);
+ flush_delayed_work(&source_gc_wq);
+ __amt_source_gc_work();
+ destroy_workqueue(amt_wq);
+}
+module_exit(amt_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
+MODULE_ALIAS_RTNL_LINK("amt");
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index f0695d68c47e..97f254bdbb16 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -945,8 +945,8 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
dev->broadcast[0] = 0xFF;
/* Set hardware address. */
- dev->dev_addr[0] = aa->s_node;
dev->addr_len = 1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
case SIOCGIFADDR:
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 1f8925e75b3f..388d7b3bd4c2 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -846,9 +846,8 @@ static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
set_30 (dev,ltflags);
dev->broadcast[0] = 0xFF;
- dev->dev_addr[0] = aa->s_node;
-
dev->addr_len=1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 12d085405bd0..8c3ccc7c83cd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -207,7 +207,8 @@ static int __init arcrimi_found(struct net_device *dev)
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
arc_printk(D_NORMAL, dev, "ARCnet RIM I: station %02Xh found at IRQ %d, ShMem %lXh (%ld*%d bytes)\n",
dev->dev_addr[0],
@@ -324,7 +325,7 @@ static int __init arc_rimi_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->mem_start = io;
dev->irq = irq;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 5d4a4c7efbbf..19e996a829c9 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -364,6 +364,11 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
+static inline void arcnet_set_addr(struct net_device *dev, u8 addr)
+{
+ dev_addr_set(dev, &addr);
+}
+
/* I/O equivalents */
#ifdef CONFIG_SA1100_CT6001
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index be618e4b9ed5..293a621e654c 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,7 +151,7 @@ static int __init com20020_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->netdev_ops = &com20020_netdev_ops;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 3c8f665c1558..6382e1937cca 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -194,7 +194,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = ioaddr;
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->sysfs_groups[0] = &com20020_state_group;
dev->irq = pdev->irq;
lp->card_name = "PCI COM20020";
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 78043a9c5981..06e1651b594b 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -157,7 +157,7 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr)
struct arcnet_local *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
- memcpy(dev->dev_addr, hwaddr->sa_data, 1);
+ dev_addr_set(dev, hwaddr->sa_data);
com20020_set_subaddress(lp, ioaddr, SUB_NODE);
arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -220,7 +220,7 @@ int com20020_found(struct net_device *dev, int shared)
/* FIXME: do this some other way! */
if (!dev->dev_addr[0])
- dev->dev_addr[0] = arcnet_inb(ioaddr, 8);
+ arcnet_set_addr(dev, arcnet_inb(ioaddr, 8));
com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index b88a109b3b15..24150c933fcb 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -133,7 +133,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
lp->hw.owner = THIS_MODULE;
/* fill in our module parameters as defaults */
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[0]->end = 16;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 3856b447d38e..37b47749fc8b 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -252,7 +252,7 @@ static int __init com90io_found(struct net_device *dev)
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = get_buffer_byte(dev, 1);
+ arcnet_set_addr(dev, get_buffer_byte(dev, 1));
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index d8dfb9ea0de8..f49dae194284 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -531,7 +531,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem,
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
dev->base_addr = ioaddr;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 54e321a695ce..edffc3489a12 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -577,11 +577,8 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
return -EINVAL;
}
- if (data[IFLA_BAREUDP_PORT])
- conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
-
- if (data[IFLA_BAREUDP_ETHERTYPE])
- conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
+ conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
+ conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
if (data[IFLA_BAREUDP_SRCPORT_MIN])
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7d3752cbf761..2ec8e015c7b3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -50,7 +50,7 @@ struct arp_pkt {
#pragma pack()
/* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
bool strict_match);
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
static void rlb_src_unlink(struct bonding *bond, u32 index);
@@ -353,7 +353,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
*
* Caller must hold RTNL
*/
-static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
+static void rlb_teach_disabled_mac_on_primary(struct bonding *bond,
+ const u8 addr[])
{
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
@@ -904,7 +905,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
-static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
+static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[],
__be16 vlan_proto, u16 vid)
{
struct learning_pkt pkt;
@@ -940,7 +941,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
struct alb_walk_data {
struct bonding *bond;
struct slave *slave;
- u8 *mac_addr;
+ const u8 *mac_addr;
bool strict_match;
};
@@ -949,9 +950,9 @@ static int alb_upper_dev_walk(struct net_device *upper,
{
struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
bool strict_match = data->strict_match;
+ const u8 *mac_addr = data->mac_addr;
struct bonding *bond = data->bond;
struct slave *slave = data->slave;
- u8 *mac_addr = data->mac_addr;
struct bond_vlan_tag *tags;
if (is_vlan_dev(upper) &&
@@ -982,7 +983,7 @@ static int alb_upper_dev_walk(struct net_device *upper,
return 0;
}
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
@@ -1006,14 +1007,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
rcu_read_unlock();
}
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
+static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
unsigned int len)
{
struct net_device *dev = slave->dev;
struct sockaddr_storage ss;
if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
- memcpy(dev->dev_addr, addr, len);
+ __dev_addr_set(dev, addr, len);
return 0;
}
@@ -1242,8 +1243,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
res = dev_set_mac_address(slave->dev, addr, NULL);
/* restore net_device's hw address */
- bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
- slave->dev->addr_len);
+ dev_addr_set(slave->dev, tmp_addr);
if (res)
goto unwind;
@@ -1263,8 +1263,7 @@ unwind:
rollback_slave->dev->addr_len);
dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&ss, NULL);
- bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
- rollback_slave->dev->addr_len);
+ dev_addr_set(rollback_slave->dev, tmp_addr);
}
return res;
@@ -1727,8 +1726,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
NULL);
- bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
- new_slave->dev->addr_len);
+ dev_addr_set(new_slave->dev, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1761,7 +1759,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
if (res)
return res;
- bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
+ dev_addr_set(bond_dev, ss->__data);
/* If there is no curr_active_slave there is nothing else to do.
* Otherwise we'll need to pass the new address to it and handle
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 77dc79a7f574..ff8da720a33a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -923,7 +923,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
if (err)
return err;
- memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
+ __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
return 0;
@@ -4414,7 +4414,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
}
/* success */
- memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
+ dev_addr_set(bond_dev, ss->__data);
return 0;
unwind:
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b9e9842fed94..c48b77167fab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -811,8 +811,8 @@ int bond_create_sysfs(struct bond_net *bn)
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
- if (__dev_get_by_name(bn->net,
- class_attr_bonding_masters.attr.name))
+ if (netdev_name_in_use(bn->net,
+ class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index b06af90a9964..3aea32c9b108 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1170,9 +1170,9 @@ static ssize_t mb0_id_show(struct device *dev,
struct at91_priv *priv = netdev_priv(to_net_dev(dev));
if (priv->mb0_id & CAN_EFF_FLAG)
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+ return sysfs_emit(buf, "0x%08x\n", priv->mb0_id);
else
- return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+ return sysfs_emit(buf, "0x%03x\n", priv->mb0_id);
}
static ssize_t mb0_id_store(struct device *dev,
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index f49170eadd54..0509625c3082 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -175,27 +175,29 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
return 0;
}
-void can_calc_tdco(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- const struct can_bittiming *dbt = &priv->data_bittiming;
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
- if (!tdc_const)
+{
+ if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
return;
+ *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
* one or two.
*/
if (dbt->brp == 1 || dbt->brp == 2) {
- /* Reuse "normal" sample point and convert it to time quanta */
- u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000;
-
- tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max);
- } else {
- tdc->tdco = 0;
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
}
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
@@ -209,7 +211,7 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- int tseg1, alltseg;
+ unsigned int tseg1, alltseg;
u64 brp64;
tseg1 = bt->prop_seg + bt->phase_seg1;
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 80425636049d..95cca4e5251f 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#include <linux/can/dev.h>
@@ -19,6 +20,19 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
[IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+ [IFLA_CAN_TDC] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
+ [IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -30,6 +44,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
* - nominal/arbitration bittiming
* - data bittiming
* - control mode with CAN_CTRLMODE_FD set
+ * - TDC parameters are coherent (details below)
*/
if (!data)
@@ -37,8 +52,43 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+
+ /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
+ if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
+ return -EOPNOTSUPP;
+ /* If one of the CAN_CTRLMODE_TDC_* flag is set then
+ * TDC must be set and vice-versa
+ */
+ if (!!tdc_flags != !!data[IFLA_CAN_TDC])
+ return -EOPNOTSUPP;
+ /* If providing TDC parameters, at least TDCO is
+ * needed. TDCV is needed if and only if
+ * CAN_CTRLMODE_TDC_MANUAL is set
+ */
+ if (data[IFLA_CAN_TDC]) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data[IFLA_CAN_TDC],
+ can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
+ return -EOPNOTSUPP;
+ } else {
+ if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
+ return -EOPNOTSUPP;
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO])
+ return -EOPNOTSUPP;
+ }
}
if (is_can_fd) {
@@ -46,7 +96,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
if (!is_can_fd)
return -EOPNOTSUPP;
}
@@ -54,11 +104,60 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ struct can_tdc tdc = { 0 };
+ const struct can_tdc_const *tdc_const = priv->tdc_const;
+ int err;
+
+ if (!tdc_const || !can_tdc_is_enabled(priv))
+ return -EOPNOTSUPP;
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
+ can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]);
+
+ if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max)
+ return -EINVAL;
+
+ tdc.tdcv = tdcv;
+ }
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]);
+
+ if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max)
+ return -EINVAL;
+
+ tdc.tdco = tdco;
+ }
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCF]) {
+ u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]);
+
+ if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max)
+ return -EINVAL;
+
+ tdc.tdcf = tdcf;
+ }
+
+ priv->tdc = tdc;
+
+ return 0;
+}
+
static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
+ u32 tdc_mask = 0;
int err;
/* We need synchronization with dev->stop() */
@@ -138,7 +237,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = CAN_MTU;
memset(&priv->data_bittiming, 0,
sizeof(priv->data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ memset(&priv->tdc, 0, sizeof(priv->tdc));
}
+
+ tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
+ /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
+ * exclusive: make sure to turn the other one off
+ */
+ if (tdc_mask)
+ priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
}
if (data[IFLA_CAN_RESTART_MS]) {
@@ -187,9 +295,26 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+ memset(&priv->tdc, 0, sizeof(priv->tdc));
+ if (data[IFLA_CAN_TDC]) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
+ extack);
+ if (err) {
+ priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ return err;
+ }
+ } else if (!tdc_mask) {
+ /* Neither of TDC parameters nor TDC flags are
+ * provided: do calculation
+ */
+ can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
+ &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
- can_calc_tdco(dev);
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
if (priv->do_set_data_bittiming) {
/* Finally, set the bit-timing registers */
@@ -226,6 +351,38 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
+static size_t can_tdc_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size;
+
+ if (!priv->tdc_const)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_TDC */
+ if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
+ }
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
+ if (priv->tdc_const->tdcf_max) {
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
+ }
+
+ if (can_tdc_is_enabled(priv)) {
+ if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
+ priv->do_get_auto_tdcv)
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
+ if (priv->tdc_const->tdcf_max)
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
+ }
+
+ return size;
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -257,10 +414,64 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+ size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
return size;
}
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct nlattr *nest;
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_tdc *tdc = &priv->tdc;
+ const struct can_tdc_const *tdc_const = priv->tdc_const;
+
+ if (!tdc_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ (nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
+ goto err_cancel;
+ if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max))
+ goto err_cancel;
+ if (tdc_const->tdcf_max &&
+ (nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
+ goto err_cancel;
+
+ if (can_tdc_is_enabled(priv)) {
+ u32 tdcv;
+ int err = -EINVAL;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ tdcv = tdc->tdcv;
+ err = 0;
+ } else if (priv->do_get_auto_tdcv) {
+ err = priv->do_get_auto_tdcv(dev, &tdcv);
+ }
+ if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
+ goto err_cancel;
+ if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco))
+ goto err_cancel;
+ if (tdc_const->tdcf_max &&
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -318,7 +529,9 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
- &priv->bitrate_max))
+ &priv->bitrate_max)) ||
+
+ (can_tdc_fill_info(skb, dev))
)
return -EMSGSIZE;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7734229aa078..12b60ad95b02 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -290,31 +290,33 @@ struct flexcan_regs {
u32 dbg1; /* 0x58 */
u32 dbg2; /* 0x5c */
u32 _reserved3[8]; /* 0x60 */
- u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
- /* FIFO-mode:
- * MB
- * 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserved
- * 0x0e0...0x0ff 6-7 8 entry ID table
- * (mx25, mx28, mx35, mx53)
- * 0x0e0...0x2df 6-7..37 8..128 entry ID table
- * size conf'ed via ctrl2::RFFN
- * (mx6, vf610)
- */
- u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
- u32 _reserved5[24]; /* 0x980 */
- u32 gfwr_mx6; /* 0x9e0 - MX6 */
- u32 _reserved6[39]; /* 0x9e4 */
- u32 _rxfir[6]; /* 0xa80 */
- u32 _reserved8[2]; /* 0xa98 */
- u32 _rxmgmask; /* 0xaa0 */
- u32 _rxfgmask; /* 0xaa4 */
- u32 _rx14mask; /* 0xaa8 */
- u32 _rx15mask; /* 0xaac */
- u32 tx_smb[4]; /* 0xab0 */
- u32 rx_smb0[4]; /* 0xac0 */
- u32 rx_smb1[4]; /* 0xad0 */
+ struct_group(init,
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
+ /* FIFO-mode:
+ * MB
+ * 0x080...0x08f 0 RX message buffer
+ * 0x090...0x0df 1-5 reserved
+ * 0x0e0...0x0ff 6-7 8 entry ID table
+ * (mx25, mx28, mx35, mx53)
+ * 0x0e0...0x2df 6-7..37 8..128 entry ID table
+ * size conf'ed via ctrl2::RFFN
+ * (mx6, vf610)
+ */
+ u32 _reserved4[256]; /* 0x480 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
+ u32 _reserved5[24]; /* 0x980 */
+ u32 gfwr_mx6; /* 0x9e0 - MX6 */
+ u32 _reserved6[39]; /* 0x9e4 */
+ u32 _rxfir[6]; /* 0xa80 */
+ u32 _reserved8[2]; /* 0xa98 */
+ u32 _rxmgmask; /* 0xaa0 */
+ u32 _rxfgmask; /* 0xaa4 */
+ u32 _rx14mask; /* 0xaa8 */
+ u32 _rx15mask; /* 0xaac */
+ u32 tx_smb[4]; /* 0xab0 */
+ u32 rx_smb0[4]; /* 0xac0 */
+ u32 rx_smb1[4]; /* 0xad0 */
+ );
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -328,9 +330,11 @@ struct flexcan_regs {
u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
u32 fdcrc; /* 0xc08 */
u32 _reserved9[199]; /* 0xc0c */
- u32 tx_smb_fd[18]; /* 0xf28 */
- u32 rx_smb0_fd[18]; /* 0xf70 */
- u32 rx_smb1_fd[18]; /* 0xfb8 */
+ struct_group(init_fd,
+ u32 tx_smb_fd[18]; /* 0xf28 */
+ u32 rx_smb0_fd[18]; /* 0xf70 */
+ u32 rx_smb1_fd[18]; /* 0xfb8 */
+ );
};
static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
@@ -1400,14 +1404,10 @@ static void flexcan_ram_init(struct net_device *dev)
reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
- memset_io(&regs->mb[0][0], 0,
- offsetof(struct flexcan_regs, rx_smb1[3]) -
- offsetof(struct flexcan_regs, mb[0][0]) + 0x4);
+ memset_io(&regs->init, 0, sizeof(regs->init));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
- memset_io(&regs->tx_smb_fd[0], 0,
- offsetof(struct flexcan_regs, rx_smb1_fd[17]) -
- offsetof(struct flexcan_regs, tx_smb_fd[0]) + 0x4);
+ memset_io(&regs->init_fd, 0, sizeof(regs->init_fd));
reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c68ad56628bd..32006dbf5abd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1831,7 +1831,7 @@ static ssize_t termination_show(struct device *dev,
return -ETIMEDOUT;
}
- return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
+ return sysfs_emit(buf, "%u\n", mod->termination_enabled);
}
static ssize_t termination_store(struct device *dev,
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 308d4f2fff00..eee47bad0592 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->mram_base + offset;
- ioread32_rep(priv->mram_base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->mram_base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 35892c1efef0..de4ddf79ba9b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -293,10 +293,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return -EINVAL;
base = of_iomap(np, 0);
- if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap\n");
- return err;
- }
+ if (!base)
+ return dev_err_probe(&ofdev->dev, err, "couldn't ioremap\n");
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 56320a7f828b..c66762ef631b 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
tristate "Renesas R-Car and RZ/G CAN controller"
- depends on ARCH_RENESAS || ARM || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Say Y here if you want to use CAN controller found on Renesas R-Car
or RZ/G SoCs.
@@ -11,7 +11,7 @@ config CAN_RCAR
config CAN_RCAR_CANFD
tristate "Renesas R-Car CAN FD controller"
- depends on ARCH_RENESAS || ARM || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Say Y here if you want to use CAN FD controller found on
Renesas R-Car SoCs. The driver puts the controller in CAN FD only
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 00e4533c8bdd..8999ec9455ec 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr;
int err;
+ if (!netif_running(ndev))
+ return 0;
+
err = clk_enable(priv->clk);
if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- if (netif_running(ndev)) {
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+
return 0;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6db90dc4bc9d..84f34020aafb 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ /* do that only for first channel */
+ if (!prev_dev && chan->pciec_card)
+ peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = prev_dev;
- if (!dev) {
- /* do that only for first channel */
- if (chan->pciec_card)
- peak_pciec_remove(chan->pciec_card);
+ if (!dev)
break;
- }
priv = netdev_priv(dev);
chan = priv->priv;
}
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h
index 4bc60a6df697..667ecb77168c 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.h
+++ b/drivers/net/can/usb/etas_es58x/es581_4.h
@@ -192,7 +192,7 @@ struct es581_4_urb_cmd {
struct es581_4_rx_cmd_ret rx_cmd_ret;
__le64 timestamp;
u8 rx_cmd_ret_u8;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index af042aa55f59..4f0cae29f4d8 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -428,7 +428,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
&priv->can.data_bittiming);
- if (priv->can.tdc.tdco) {
+ if (can_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
@@ -505,8 +505,11 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
* Register" from Microchip.
*/
static const struct can_tdc_const es58x_tdc_const = {
+ .tdcv_min = 0,
.tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
.tdco_max = 127,
+ .tdcf_min = 0,
.tdcf_max = 127
};
@@ -523,7 +526,7 @@ const struct es58x_parameters es58x_fd_param = {
.clock = {.freq = 80 * CAN_MHZ},
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC,
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
.tx_start_of_frame = 0xCEFA, /* FACE in little endian */
.rx_start_of_frame = 0xFECA, /* CAFE in little endian */
.tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h
index a191891b8777..c4b19a6a33ae 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h
@@ -219,7 +219,7 @@ struct es58x_fd_urb_cmd {
struct es58x_fd_tx_ack_msg tx_ack_msg;
__le64 timestamp;
__le32 rx_cmd_ret_le32;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5e892bef46b0..1b400de00f51 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -352,7 +352,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
} else { /* echo_id == hf->echo_id */
if (hf->echo_id >= GS_MAX_TX_URBS) {
netdev_err(netdev,
- "Unexpected out of range echo id %d\n",
+ "Unexpected out of range echo id %u\n",
hf->echo_id);
goto resubmit_urb;
}
@@ -365,7 +365,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
/* bad devices send bad echo_ids. */
if (!txc) {
netdev_err(netdev,
- "Unexpected unused echo id %d\n",
+ "Unexpected unused echo id %u\n",
hf->echo_id);
goto resubmit_urb;
}
@@ -458,7 +458,7 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct net_device *netdev = dev->netdev;
if (urb->status)
- netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+ netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
usb_free_coherent(urb->dev,
urb->transfer_buffer_length,
@@ -501,7 +501,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
idx = txc->echo_id;
if (idx >= GS_MAX_TX_URBS) {
- netdev_err(netdev, "Invalid tx context %d\n", idx);
+ netdev_err(netdev, "Invalid tx context %u\n", idx);
goto badidx;
}
@@ -964,11 +964,11 @@ static int gs_usb_probe(struct usb_interface *intf,
}
icount = dconf->icount + 1;
- dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+ dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
- "Driver cannot handle more that %d CAN interfaces\n",
+ "Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
kfree(dconf);
return -EINVAL;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index e8f43ed90b72..6107fef9f4a0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -205,6 +205,19 @@ int peak_usb_netif_rx(struct sk_buff *skb,
return netif_rx(skb);
}
+/* post received skb with native 64-bit hw timestamp */
+int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high)
+{
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+ u64 ns_ts;
+
+ ns_ts = (u64)ts_high << 32 | ts_low;
+ ns_ts *= NSEC_PER_USEC;
+ hwts->hwtstamp = ns_to_ktime(ns_ts);
+
+ return netif_rx(skb);
+}
+
/*
* callback for bulk Rx urb
*/
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index b00a4811bf61..daa19f57e742 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -143,6 +143,7 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx(struct sk_buff *skb,
struct peak_time_ref *time_ref, u32 ts_low);
+int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index b11eabad575b..6bd12549f101 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -515,7 +515,8 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+ peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
+ le32_to_cpu(rm->ts_high));
return 0;
}
@@ -551,11 +552,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
- /* no error bit (so, no error skb, back to active state) */
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* back to (or still in) ERROR_ACTIVE state */
+ new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0;
pdev->bec.rxerr = 0;
- return 0;
}
/* state hasn't changed */
@@ -568,8 +568,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
- if (skb)
- can_change_state(netdev, cf, tx_state, rx_state);
+ can_change_state(netdev, cf, tx_state, rx_state);
/* things must be done even in case of OOM */
if (new_state == CAN_STATE_BUS_OFF)
@@ -581,7 +580,8 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+ peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
+ le32_to_cpu(sm->ts_high));
return 0;
}
@@ -631,7 +631,8 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
+ peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low),
+ le32_to_cpu(ov->ts_high));
netdev->stats.rx_over_errors++;
netdev->stats.rx_errors++;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3b883e607d8b..e2b15d29d15e 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -516,8 +516,7 @@ static int xcan_chip_start(struct net_device *ndev)
* @ndev: Pointer to net_device structure
* @mode: Tells the mode of the driver
*
- * This check the drivers state and calls the
- * the corresponding modes to set.
+ * This check the drivers state and calls the corresponding modes to set.
*
* Return: 0 on success and failure value on error
*/
@@ -982,7 +981,7 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
* @isr: interrupt status register value
*
* This is the CAN error interrupt and it will
- * check the the type of error and forward the error
+ * check the type of error and forward the error
* frame to upper layers.
*/
static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
@@ -1844,11 +1843,9 @@ err:
static int xcan_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct xcan_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
pm_runtime_disable(&pdev->dev);
- netif_napi_del(&priv->napi);
free_candev(ndev);
return 0;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index a5f1aa911fe2..7b1457a6e327 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -70,6 +70,7 @@ config NET_DSA_QCA8K
config NET_DSA_REALTEK_SMI
tristate "Realtek SMI Ethernet switch family support"
select NET_DSA_TAG_RTL4_A
+ select NET_DSA_TAG_RTL8_4
select FIXED_PHY
select IRQ_DOMAIN
select REALTEK_PHY
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index f3598c040994..8da1569a34e6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
+realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 604f54112665..af4761968733 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
return;
/* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && port == dev->cpu_port)
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
tx_pause = rx_pause = true;
if (phydev->pause) {
@@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
return;
}
}
- } else if (is5301x(dev)) {
- if (port != dev->cpu_port) {
- b53_force_port_config(dev, dev->cpu_port, 2000,
- DUPLEX_FULL, true, true);
- b53_force_link(dev, dev->cpu_port, 1);
- }
}
/* Re-negotiate EEE if it was enabled already */
@@ -1349,10 +1343,8 @@ void b53_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
phylink_helper_basex_speed(state);
}
@@ -1550,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_vlan_del);
-/* Address Resolution Logic routines */
+/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
static int b53_arl_op_wait(struct b53_device *dev)
{
unsigned int timeout = 10;
@@ -1715,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct b53_device *priv = ds->priv;
+ int ret;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
@@ -1722,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return b53_arl_op(priv, 0, port, addr, vid, true);
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, addr, vid, true);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
}
EXPORT_SYMBOL(b53_fdb_add);
@@ -1730,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct b53_device *priv = ds->priv;
+ int ret;
+
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, addr, vid, false);
+ mutex_unlock(&priv->arl_mutex);
- return b53_arl_op(priv, 0, port, addr, vid, false);
+ return ret;
}
EXPORT_SYMBOL(b53_fdb_del);
@@ -1788,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
int ret;
u8 reg;
+ mutex_lock(&priv->arl_mutex);
+
/* Start search operation */
reg = ARL_SRCH_STDN;
b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
@@ -1795,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
do {
ret = b53_arl_search_wait(priv);
if (ret)
- return ret;
+ break;
b53_arl_search_rd(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret)
- return ret;
+ break;
if (priv->num_arl_bins > 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
- return ret;
+ break;
if (!results[0].is_valid && !results[1].is_valid)
break;
@@ -1814,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
} while (count++ < b53_max_arl_entries(priv) / 2);
+ mutex_unlock(&priv->arl_mutex);
+
return 0;
}
EXPORT_SYMBOL(b53_fdb_dump);
@@ -1822,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
struct b53_device *priv = ds->priv;
+ int ret;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
@@ -1829,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
}
EXPORT_SYMBOL(b53_mdb_add);
@@ -1839,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
int ret;
+ mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+ mutex_unlock(&priv->arl_mutex);
if (ret)
dev_err(ds->dev, "failed to delete MDB entry\n");
@@ -2302,33 +2315,30 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5325_DEVICE_ID,
.dev_name = "BCM5325",
.vlans = 16,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
- .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5365_DEVICE_ID,
.dev_name = "BCM5365",
.vlans = 256,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
- .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5389_DEVICE_ID,
.dev_name = "BCM5389",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2338,11 +2348,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5395_DEVICE_ID,
.dev_name = "BCM5395",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2352,11 +2361,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5397_DEVICE_ID,
.dev_name = "BCM5397",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2366,11 +2374,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5398_DEVICE_ID,
.dev_name = "BCM5398",
.vlans = 4096,
- .enabled_ports = 0x7f,
+ .enabled_ports = 0x17f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2380,12 +2387,11 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53115_DEVICE_ID,
.dev_name = "BCM53115",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
@@ -2394,11 +2400,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53125_DEVICE_ID,
.dev_name = "BCM53125",
.vlans = 4096,
- .enabled_ports = 0xff,
+ .enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2412,7 +2417,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2426,7 +2430,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
@@ -2436,11 +2439,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53010_DEVICE_ID,
.dev_name = "BCM53010",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2454,7 +2456,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2468,7 +2469,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2478,11 +2478,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53018_DEVICE_ID,
.dev_name = "BCM53018",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2492,11 +2491,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53019_DEVICE_ID,
.dev_name = "BCM53019",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2510,7 +2508,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2524,7 +2521,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2539,7 +2535,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
- .cpu_port = 8, /* TODO: ports 4, 5, 8 */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2553,7 +2548,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2567,7 +2561,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2593,7 +2586,6 @@ static int b53_switch_init(struct b53_device *dev)
dev->vta_regs[2] = chip->vta_regs[2];
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
dev->imp_port = chip->imp_port;
- dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
@@ -2625,16 +2617,8 @@ static int b53_switch_init(struct b53_device *dev)
break;
#endif
}
- } else if (dev->chip_id == BCM53115_DEVICE_ID) {
- u64 strap_value;
-
- b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
- /* use second IMP port if GMII is enabled */
- if (strap_value & SV_GMII_CTRL_115)
- dev->cpu_port = 5;
}
- dev->enabled_ports |= BIT(dev->cpu_port);
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2705,6 +2689,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->arl_mutex);
return dev;
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 959a52d41f0a..579da74ada64 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -107,6 +107,7 @@ struct b53_device {
struct mutex reg_mutex;
struct mutex stats_mutex;
+ struct mutex arl_mutex;
const struct b53_io_ops *ops;
/* chip specific data */
@@ -124,7 +125,6 @@ struct b53_device {
/* used ports mask */
u16 enabled_ports;
unsigned int imp_port;
- unsigned int cpu_port;
/* connect specific data */
u8 current_page;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 7578a5c38df5..13aa43b5cffd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -667,7 +667,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
if (priv->int_phy_mask & BIT(port))
return priv->hw_params.gphy_rev;
else
- return 0;
+ return PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
}
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
@@ -683,7 +685,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
state->interface != PHY_INTERFACE_MODE_GMII &&
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
state->interface != PHY_INTERFACE_MODE_MOCA) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
if (port != core_readl(priv, CORE_IMP0_PRT_ID))
dev_err(ds->dev,
"Unsupported interface: %d for port %d\n",
@@ -711,10 +713,8 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 354655f9ed00..4e0b53d94b52 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1403,10 +1403,8 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
else
phylink_set(mask, 1000baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 3ff4b7e177f3..7056d98d8177 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -230,7 +230,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
@@ -276,6 +276,7 @@ struct gswip_priv {
int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw;
u32 port_vlan_filter;
+ struct mutex pce_table_lock;
};
struct gswip_pce_table_entry {
@@ -523,10 +524,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+ mutex_lock(&priv->pce_table_lock);
+
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
return err;
+ }
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
@@ -536,8 +541,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
return err;
+ }
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
@@ -553,6 +560,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+ mutex_unlock(&priv->pce_table_lock);
+
return 0;
}
@@ -565,10 +574,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+ mutex_lock(&priv->pce_table_lock);
+
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
return err;
+ }
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
@@ -600,8 +613,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
crtl |= GSWIP_PCE_TBL_CTRL_BAS;
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
- return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
}
/* Add the LAN port into a bridge with the CPU port by
@@ -1447,10 +1464,8 @@ static void gswip_phylink_set_capab(unsigned long *supported,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
@@ -1478,7 +1493,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
goto unsupported;
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -1488,7 +1503,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(state->interface), port);
}
@@ -1518,7 +1533,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
goto unsupported;
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -1528,7 +1543,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(state->interface), port);
}
@@ -2106,6 +2121,7 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->priv = priv;
priv->ds->ops = priv->hw_info->ops;
priv->dev = dev;
+ mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
np = dev->of_node;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c5142f86a3c7..43fc3087aeb3 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1542,15 +1542,13 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
phy_modes(state->interface), port);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 094737e5084a..9890672a206d 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return 0;
-
mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also
@@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return;
-
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
@@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
- priv->ds->num_ports = DSA_MAX_PORTS;
+ priv->ds->num_ports = MT7530_NUM_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8dadcae93c9b..14c678a9e41b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -674,9 +674,8 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
if (chip->info->ops->phylink_validate)
chip->info->ops->phylink_validate(chip, port, mask, state);
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
/* We can only operate at 2500BaseX or 1000BaseX. If requested
* to advertise both, only report advertising at 2500BaseX.
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 341236dcbdb4..83808e7dbdda 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -958,8 +958,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
- dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
return -ENODEV;
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 11b42fd812e4..45c5ec7a83ea 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -943,7 +943,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != ocelot_port->phy_mode) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -965,10 +965,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index de1d34a1f1e4..92eae63150ea 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -999,7 +999,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != ocelot_port->phy_mode) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -1018,10 +1018,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index a6bfb6abc51a..da0d7e68643a 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -522,7 +522,7 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
goto unsupported;
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -536,15 +536,13 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
state->interface, port);
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a984f06f6f04..ea7f12778922 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -889,62 +889,183 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
}
static int
-qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv)
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
- struct device_node *port_dn;
- phy_interface_t mode;
- struct dsa_port *dp;
- u32 val;
+ u32 mask = 0;
+ int ret = 0;
- /* CPU port is already checked */
- dp = dsa_to_port(priv->ds, 0);
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
- port_dn = dp->dn;
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
- /* Check if port 0 is set to the correct type */
- of_get_phy_mode(port_dn, &mode);
- if (mode != PHY_INTERFACE_MODE_RGMII_ID &&
- mode != PHY_INTERFACE_MODE_RGMII_RXID &&
- mode != PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ struct device_node *node = priv->dev->of_node;
+ const struct qca8k_match_data *data;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ data = of_device_get_match_data(priv->dev);
+
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
}
- switch (mode) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val))
- val = 2;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
}
- priv->rgmii_rx_delay = val;
- /* Stop here if we need to check only for rx delay */
- if (mode != PHY_INTERFACE_MODE_RGMII_ID)
- break;
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val))
- val = 1;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
- }
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
- priv->rgmii_tx_delay = val;
- break;
- default:
- return 0;
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
}
return 0;
@@ -954,15 +1075,20 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i;
+ int cpu_port, ret, i;
u32 mask;
- /* Make sure that port 0 is the cpu port */
- if (!dsa_is_cpu_port(ds, 0)) {
- dev_err(priv->dev, "port 0 is not the CPU port");
- return -EINVAL;
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
}
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
mutex_init(&priv->reg_mutex);
/* Start by setting up the register mapping */
@@ -975,7 +1101,11 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ret = qca8k_setup_of_rgmii_delay(priv);
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
if (ret)
return ret;
@@ -992,41 +1122,49 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
dev_warn(priv->dev, "mib init failed");
- /* Enable QCA header mode on the cpu port */
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
-
- /* Disable forwarding by default on all ports */
+ /* Initial setup of all ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
- }
- /* Disable MAC by default on all ports */
- for (i = 1; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
- /* Forward all unknown frames to CPU port for Linux processing */
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
if (ret)
return ret;
- /* Setup connection between CPU port & user ports */
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
@@ -1038,7 +1176,7 @@ qca8k_setup(struct dsa_switch *ds)
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
- BIT(QCA8K_CPU_PORT));
+ BIT(cpu_port));
if (ret)
return ret;
@@ -1063,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
}
- }
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
switch (i) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
@@ -1108,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
+
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
+ */
+ priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1121,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds)
}
/* Setup our port MTUs to match power on defaults */
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
@@ -1137,12 +1277,53 @@ qca8k_setup(struct dsa_switch *ds)
}
static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
+ int cpu_port_index, ret;
u32 reg, val;
- int ret;
switch (port) {
case 0: /* 1st CPU port */
@@ -1154,6 +1335,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
@@ -1172,6 +1354,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
@@ -1186,23 +1369,18 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
- /* RGMII mode means no delay so don't enable the delay */
- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
- break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* RGMII_ID needs internal delay. This is enabled through
- * PORT5_PAD_CTRL for all ports, rather than individual port
- * registers
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
*/
- qca8k_write(priv, reg,
- QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
- /* QCA8337 requires to set rgmii rx delay */
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
@@ -1227,8 +1405,11 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (ret)
return;
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
@@ -1243,6 +1424,35 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1522,10 +1732,15 @@ static int
qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask = BIT(QCA8K_CPU_PORT);
+ int port_mask, cpu_port;
int i, ret;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
@@ -1551,9 +1766,13 @@ static void
qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int i;
+ int cpu_port, i;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
@@ -1568,7 +1787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
static int
@@ -1939,7 +2158,12 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
-static const struct qca8k_match_data qca832x = {
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+};
+
+static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
};
@@ -1948,7 +2172,8 @@ static const struct qca8k_match_data qca833x = {
};
static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8327", .data = &qca832x },
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index ed3b05ad6745..e10571a398c9 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
#define QCA8K_MAX_MTU 9000
#define PHY_ID_QCA8327 0x004dd034
@@ -24,8 +25,6 @@
#define QCA8K_NUM_FDB_RECORDS 2048
-#define QCA8K_CPU_PORT 0
-
#define QCA8K_PORT_VID_DEF 1
/* Global control registers */
@@ -35,16 +34,26 @@
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
@@ -100,6 +109,11 @@
#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
/* EEE control registers */
#define QCA8K_REG_EEE_CTRL 0x100
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
@@ -248,14 +262,27 @@ struct ar8xxx_port_status {
struct qca8k_match_data {
u8 id;
+ bool reduced_package;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
- u8 rgmii_tx_delay;
- u8 rgmii_rx_delay;
bool legacy_phy_port_mapping;
+ struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index 2fcfd917b876..c66ebd0ee217 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -501,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = {
.compatible = "realtek,rtl8366s",
.data = NULL,
},
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index fcf465f7f922..5bfa53e2480a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
-int rtl8366_init_vlan(struct realtek_smi *smi);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
extern const struct realtek_smi_variant rtl8366rb_variant;
+extern const struct realtek_smi_variant rtl8365mb_variant;
#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
new file mode 100644
index 000000000000..baaae97283c5
--- /dev/null
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
+ *
+ * Copyright (C) 2021 Alvin Šipraga <alsi@bang-olufsen.dk>
+ * Copyright (C) 2021 Michael Rasmussen <mir@bang-olufsen.dk>
+ *
+ * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
+ * integrated PHYs for the user facing ports, and an extension interface which
+ * can be connected to the CPU - or another PHY - via either MII, RMII, or
+ * RGMII. The switch is configured via the Realtek Simple Management Interface
+ * (SMI), which uses the MDIO/MDC lines.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * .-----------------------------------.
+ * | |
+ * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
+ * | |
+ * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
+ * | interface 1 GMAC 1 |
+ * | |
+ * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
+ * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
+ * | ~RTL8365MB ~~~ |
+ * | ~GXXXC TAIWAN~ |
+ * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
+ * | |
+ * Interrupt <----------> Link UP/DOWN events |
+ * controller | |
+ * '-----------------------------------'
+ *
+ * The driver uses DSA to integrate the 4 user and 1 extension ports into the
+ * kernel. Netdevices are created for the user ports, as are PHY devices for
+ * their integrated PHYs. The device tree firmware should also specify the link
+ * partner of the extension port - either via a fixed-link or other phy-handle.
+ * See the device tree bindings for more detailed information. Note that the
+ * driver has only been tested with a fixed-link, but in principle it should not
+ * matter.
+ *
+ * NOTE: Currently, only the RGMII interface is implemented in this driver.
+ *
+ * The interrupt line is asserted on link UP/DOWN events. The driver creates a
+ * custom irqchip to handle this interrupt and demultiplex the events by reading
+ * the status registers via SMI. Interrupts are then propagated to the relevant
+ * PHY device.
+ *
+ * The EEPROM contains initial register values which the chip will read over I2C
+ * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
+ * the driver will manually reprogram some registers using jam tables to reach
+ * an initial state defined by the vendor driver.
+ *
+ * This Linux driver is written based on an OS-agnostic vendor driver from
+ * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
+ * source tree under the name rtl8367c. The vendor driver claims to support a
+ * number of similar switch controllers from Realtek, but the only hardware we
+ * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
+ * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
+ * common hardware revision, there exist examples of chips with the suffix -VC
+ * which are explicitly not supported by the rtl8367c driver and which instead
+ * require the rtl8367d vendor driver. With all this uncertainty, the driver has
+ * been modestly named rtl8365mb. Future implementors may wish to rename things
+ * accordingly.
+ *
+ * In the same family of chips, some carry up to 8 user ports and up to 2
+ * extension ports. Where possible this driver tries to make things generic, but
+ * more work must be done to support these configurations. According to
+ * documentation from Realtek, the family should include the following chips:
+ *
+ * - RTL8363NB
+ * - RTL8363NB-VB
+ * - RTL8363SC
+ * - RTL8363SC-VB
+ * - RTL8364NB
+ * - RTL8364NB-VB
+ * - RTL8365MB-VC
+ * - RTL8366SC
+ * - RTL8367RB-VB
+ * - RTL8367SB
+ * - RTL8367S
+ * - RTL8370MB
+ * - RTL8310SR
+ *
+ * Some of the register logic for these additional chips has been skipped over
+ * while implementing this driver. It is therefore not possible to assume that
+ * things will work out-of-the-box for other chips, and a careful review of the
+ * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
+ * one of the simpler chips.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi-core.h"
+
+/* Chip-specific data and limits */
+#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
+#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
+#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+
+/* Family-specific data and limits */
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+
+/* Chip identification registers */
+#define RTL8365MB_CHIP_ID_REG 0x1300
+
+#define RTL8365MB_CHIP_VER_REG 0x1301
+
+#define RTL8365MB_MAGIC_REG 0x13C2
+#define RTL8365MB_MAGIC_VALUE 0x0249
+
+/* Chip reset register */
+#define RTL8365MB_CHIP_RESET_REG 0x1322
+#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
+#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
+
+/* Interrupt polarity register */
+#define RTL8365MB_INTR_POLARITY_REG 0x1100
+#define RTL8365MB_INTR_POLARITY_MASK 0x0001
+#define RTL8365MB_INTR_POLARITY_HIGH 0
+#define RTL8365MB_INTR_POLARITY_LOW 1
+
+/* Interrupt control/status register - enable/check specific interrupt types */
+#define RTL8365MB_INTR_CTRL_REG 0x1101
+#define RTL8365MB_INTR_STATUS_REG 0x1102
+#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
+#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
+#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
+#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
+#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
+#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
+#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
+#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
+#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
+#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
+#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
+#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
+#define RTL8365MB_INTR_ALL_MASK \
+ (RTL8365MB_INTR_SLIENT_START_2_MASK | \
+ RTL8365MB_INTR_SLIENT_START_MASK | \
+ RTL8365MB_INTR_ACL_ACTION_MASK | \
+ RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
+ RTL8365MB_INTR_INTERRUPT_8051_MASK | \
+ RTL8365MB_INTR_LOOP_DETECTION_MASK | \
+ RTL8365MB_INTR_GREEN_TIMER_MASK | \
+ RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
+ RTL8365MB_INTR_SPEED_CHANGE_MASK | \
+ RTL8365MB_INTR_LEARN_OVER_MASK | \
+ RTL8365MB_INTR_METER_EXCEEDED_MASK | \
+ RTL8365MB_INTR_LINK_CHANGE_MASK)
+
+/* Per-port interrupt type status registers */
+#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
+#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
+
+#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
+#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
+
+/* PHY indirect access registers */
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
+#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
+#define RTL8365MB_PHY_BASE 0x2000
+#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
+#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
+
+/* PHY OCP address prefix register */
+#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
+#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
+#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
+
+/* The PHY OCP addresses of PHY registers 0~31 start here */
+#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
+
+/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
+#define RTL8365MB_EXT_PORT_MODE_RGMII 1
+#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
+#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
+#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
+#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
+#define RTL8365MB_EXT_PORT_MODE_GMII 6
+#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
+#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
+#define RTL8365MB_EXT_PORT_MODE_SGMII 9
+#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
+#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
+#define RTL8365MB_EXT_PORT_MODE_1000X 12
+#define RTL8365MB_EXT_PORT_MODE_100FX 13
+
+/* EXT port interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
+ ((_extport) >> 1) * (0x13C3 - 0x1305))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
+ (0xF << (((_extport) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
+ (((_extport) % 2) * 4)
+
+/* EXT port RGMII TX/RX delay configuration registers 1~2 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
+#define RTL8365MB_EXT_RGMXF_REG(_extport) \
+ (RTL8365MB_EXT_RGMXF_REG1 + \
+ (((_extport) >> 1) * (0x13C5 - 0x1307)))
+#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
+#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
+
+/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+#define RTL8365MB_PORT_SPEED_10M 0
+#define RTL8365MB_PORT_SPEED_100M 1
+#define RTL8365MB_PORT_SPEED_1000M 2
+
+/* EXT port force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
+ ((_extport) & 0x1) + \
+ ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
+
+/* CPU port mask register - controls which ports are treated as CPU ports */
+#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
+#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
+
+/* CPU control register */
+#define RTL8365MB_CPU_CTRL_REG 0x121A
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
+#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
+#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
+#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
+#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
+#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
+
+/* Maximum packet length register */
+#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
+#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+
+/* Port learning limit registers */
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
+ (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
+
+/* Port isolation (forwarding mask) registers */
+#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
+#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
+ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
+#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
+
+/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
+#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
+ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
+ (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
+
+/* MIB counter value registers */
+#define RTL8365MB_MIB_COUNTER_BASE 0x1000
+#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
+
+/* MIB counter address register */
+#define RTL8365MB_MIB_ADDRESS_REG 0x1004
+#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
+#define RTL8365MB_MIB_ADDRESS(_p, _x) \
+ (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
+
+#define RTL8365MB_MIB_CTRL0_REG 0x1005
+#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
+#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
+
+/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
+ * to block. On the other hand, accessing MIB counters absolutely requires us to
+ * block. The solution is thus to schedule work which polls the MIB counters
+ * asynchronously and updates some private data, which the callback can then
+ * fetch atomically. Three seconds should be a good enough polling interval.
+ */
+#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+enum rtl8365mb_mib_counter_index {
+ RTL8365MB_MIB_ifInOctets,
+ RTL8365MB_MIB_dot3StatsFCSErrors,
+ RTL8365MB_MIB_dot3StatsSymbolErrors,
+ RTL8365MB_MIB_dot3InPauseFrames,
+ RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
+ RTL8365MB_MIB_etherStatsFragments,
+ RTL8365MB_MIB_etherStatsJabbers,
+ RTL8365MB_MIB_ifInUcastPkts,
+ RTL8365MB_MIB_etherStatsDropEvents,
+ RTL8365MB_MIB_ifInMulticastPkts,
+ RTL8365MB_MIB_ifInBroadcastPkts,
+ RTL8365MB_MIB_inMldChecksumError,
+ RTL8365MB_MIB_inIgmpChecksumError,
+ RTL8365MB_MIB_inMldSpecificQuery,
+ RTL8365MB_MIB_inMldGeneralQuery,
+ RTL8365MB_MIB_inIgmpSpecificQuery,
+ RTL8365MB_MIB_inIgmpGeneralQuery,
+ RTL8365MB_MIB_inMldLeaves,
+ RTL8365MB_MIB_inIgmpLeaves,
+ RTL8365MB_MIB_etherStatsOctets,
+ RTL8365MB_MIB_etherStatsUnderSizePkts,
+ RTL8365MB_MIB_etherOversizeStats,
+ RTL8365MB_MIB_etherStatsPkts64Octets,
+ RTL8365MB_MIB_etherStatsPkts65to127Octets,
+ RTL8365MB_MIB_etherStatsPkts128to255Octets,
+ RTL8365MB_MIB_etherStatsPkts256to511Octets,
+ RTL8365MB_MIB_etherStatsPkts512to1023Octets,
+ RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
+ RTL8365MB_MIB_ifOutOctets,
+ RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsDeferredTransmissions,
+ RTL8365MB_MIB_dot3StatsLateCollisions,
+ RTL8365MB_MIB_etherStatsCollisions,
+ RTL8365MB_MIB_dot3StatsExcessiveCollisions,
+ RTL8365MB_MIB_dot3OutPauseFrames,
+ RTL8365MB_MIB_ifOutDiscards,
+ RTL8365MB_MIB_dot1dTpPortInDiscards,
+ RTL8365MB_MIB_ifOutUcastPkts,
+ RTL8365MB_MIB_ifOutMulticastPkts,
+ RTL8365MB_MIB_ifOutBroadcastPkts,
+ RTL8365MB_MIB_outOampduPkts,
+ RTL8365MB_MIB_inOampduPkts,
+ RTL8365MB_MIB_inIgmpJoinsSuccess,
+ RTL8365MB_MIB_inIgmpJoinsFail,
+ RTL8365MB_MIB_inMldJoinsSuccess,
+ RTL8365MB_MIB_inMldJoinsFail,
+ RTL8365MB_MIB_inReportSuppressionDrop,
+ RTL8365MB_MIB_inLeaveSuppressionDrop,
+ RTL8365MB_MIB_outIgmpReports,
+ RTL8365MB_MIB_outIgmpLeaves,
+ RTL8365MB_MIB_outIgmpGeneralQuery,
+ RTL8365MB_MIB_outIgmpSpecificQuery,
+ RTL8365MB_MIB_outMldReports,
+ RTL8365MB_MIB_outMldLeaves,
+ RTL8365MB_MIB_outMldGeneralQuery,
+ RTL8365MB_MIB_outMldSpecificQuery,
+ RTL8365MB_MIB_inKnownMulticastPkts,
+ RTL8365MB_MIB_END,
+};
+
+struct rtl8365mb_mib_counter {
+ u32 offset;
+ u32 length;
+ const char *name;
+};
+
+#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
+ [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
+
+static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
+ RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
+ RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
+ RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
+ RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
+ RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
+ RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
+ RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
+ RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
+ RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
+ RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
+};
+
+static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
+
+struct rtl8365mb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
+/* Lifted from the vendor driver sources */
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
+ { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
+ { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
+ { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
+ { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
+ { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
+ { 0x13F0, 0x0000 },
+};
+
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
+ { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
+ { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
+ { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
+ { 0x1D32, 0x0002 },
+};
+
+enum rtl8365mb_stp_state {
+ RTL8365MB_STP_STATE_DISABLED = 0,
+ RTL8365MB_STP_STATE_BLOCKING = 1,
+ RTL8365MB_STP_STATE_LEARNING = 2,
+ RTL8365MB_STP_STATE_FORWARDING = 3,
+};
+
+enum rtl8365mb_cpu_insert {
+ RTL8365MB_CPU_INSERT_TO_ALL = 0,
+ RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
+ RTL8365MB_CPU_INSERT_TO_NONE = 2,
+};
+
+enum rtl8365mb_cpu_position {
+ RTL8365MB_CPU_POS_AFTER_SA = 0,
+ RTL8365MB_CPU_POS_BEFORE_CRC = 1,
+};
+
+enum rtl8365mb_cpu_format {
+ RTL8365MB_CPU_FORMAT_8BYTES = 0,
+ RTL8365MB_CPU_FORMAT_4BYTES = 1,
+};
+
+enum rtl8365mb_cpu_rxlen {
+ RTL8365MB_CPU_RXLEN_72BYTES = 0,
+ RTL8365MB_CPU_RXLEN_64BYTES = 1,
+};
+
+/**
+ * struct rtl8365mb_cpu - CPU port configuration
+ * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
+ * @mask: port mask of ports that parse should parse CPU tags
+ * @trap_port: forward trapped frames to this port
+ * @insert: CPU tag insertion mode in switch->CPU frames
+ * @position: position of CPU tag in frame
+ * @rx_length: minimum CPU RX length
+ * @format: CPU tag format
+ *
+ * Represents the CPU tagging and CPU port configuration of the switch. These
+ * settings are configurable at runtime.
+ */
+struct rtl8365mb_cpu {
+ bool enable;
+ u32 mask;
+ u32 trap_port;
+ enum rtl8365mb_cpu_insert insert;
+ enum rtl8365mb_cpu_position position;
+ enum rtl8365mb_cpu_rxlen rx_length;
+ enum rtl8365mb_cpu_format format;
+};
+
+/**
+ * struct rtl8365mb_port - private per-port data
+ * @smi: pointer to parent realtek_smi data
+ * @index: DSA port index, same as dsa_port::index
+ * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
+ * access via rtl8365mb_get_stats64
+ * @stats_lock: protect the stats structure during read/update
+ * @mib_work: delayed work for polling MIB counters
+ */
+struct rtl8365mb_port {
+ struct realtek_smi *smi;
+ unsigned int index;
+ struct rtnl_link_stats64 stats;
+ spinlock_t stats_lock;
+ struct delayed_work mib_work;
+};
+
+/**
+ * struct rtl8365mb - private chip-specific driver data
+ * @smi: pointer to parent realtek_smi data
+ * @irq: registered IRQ or zero
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @port_mask: mask of all ports
+ * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @cpu: CPU tagging and CPU port configuration for this chip
+ * @mib_lock: prevent concurrent reads of MIB counters
+ * @ports: per-port data
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * Private data for this driver.
+ */
+struct rtl8365mb {
+ struct realtek_smi *smi;
+ int irq;
+ u32 chip_id;
+ u32 chip_ver;
+ u32 port_mask;
+ u32 learn_limit_max;
+ struct rtl8365mb_cpu cpu;
+ struct mutex mib_lock;
+ struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(smi->map,
+ RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
+ val, !val, 10, 100);
+}
+
+static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+ u32 ocp_addr)
+{
+ u32 val;
+ int ret;
+
+ /* Set OCP prefix */
+ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
+ FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Set PHY register address */
+ val = RTL8365MB_PHY_BASE;
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
+ ocp_addr >> 1);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
+ ocp_addr >> 6);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 *data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Execute read operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ /* Get PHY register data */
+ ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
+ &val);
+ if (ret)
+ return ret;
+
+ *data = val & 0xFFFF;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Set PHY register data */
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
+ data);
+ if (ret)
+ return ret;
+
+ /* Execute write operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+ u32 ocp_addr;
+ u16 val;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return val;
+}
+
+static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+ u16 val)
+{
+ u32 ocp_addr;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RTL8_4;
+}
+
+static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+ phy_interface_t interface)
+{
+ struct device_node *dn;
+ struct dsa_port *dp;
+ int tx_delay = 0;
+ int rx_delay = 0;
+ int ext_port;
+ u32 val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ dp = dsa_to_port(smi->ds, port);
+ dn = dp->dn;
+
+ /* Set the RGMII TX/RX delay
+ *
+ * The Realtek vendor driver indicates the following possible
+ * configuration settings:
+ *
+ * TX delay:
+ * 0 = no delay, 1 = 2 ns delay
+ * RX delay:
+ * 0 = no delay, 7 = maximum delay
+ * No units are specified, but there are a total of 8 steps.
+ *
+ * The vendor driver also states that this must be configured *before*
+ * forcing the external interface into a particular mode, which is done
+ * in the rtl8365mb_phylink_mac_link_{up,down} functions.
+ *
+ * Only configure an RGMII TX (resp. RX) delay if the
+ * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
+ * specified. We ignore the detail of the RGMII interface mode
+ * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
+ * property.
+ *
+ * For the RX delay, we assume that a register value of 4 corresponds to
+ * 2 ns. But this is just an educated guess, so ignore all other values
+ * to avoid too much confusion.
+ */
+ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ tx_delay = val / 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port TX delay must be 0 or 2 ns\n");
+ }
+
+ if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ rx_delay = val * 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port RX delay must be 0 to 2 ns\n");
+ }
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
+ RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ RTL8365MB_EXT_PORT_MODE_RGMII
+ << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
+ ext_port));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+ bool link, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 r_tx_pause;
+ u32 r_rx_pause;
+ u32 r_duplex;
+ u32 r_speed;
+ u32 r_link;
+ int ext_port;
+ int val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ if (link) {
+ /* Force the link up with the desired configuration */
+ r_link = 1;
+ r_rx_pause = rx_pause ? 1 : 0;
+ r_tx_pause = tx_pause ? 1 : 0;
+
+ if (speed == SPEED_1000) {
+ r_speed = RTL8365MB_PORT_SPEED_1000M;
+ } else if (speed == SPEED_100) {
+ r_speed = RTL8365MB_PORT_SPEED_100M;
+ } else if (speed == SPEED_10) {
+ r_speed = RTL8365MB_PORT_SPEED_10M;
+ } else {
+ dev_err(smi->dev, "unsupported port speed %s\n",
+ phy_speed_to_str(speed));
+ return -EINVAL;
+ }
+
+ if (duplex == DUPLEX_FULL) {
+ r_duplex = 1;
+ } else if (duplex == DUPLEX_HALF) {
+ r_duplex = 0;
+ } else {
+ dev_err(smi->dev, "unsupported duplex %s\n",
+ phy_duplex_to_str(duplex));
+ return -EINVAL;
+ }
+ } else {
+ /* Force the link down and reset any programmed configuration */
+ r_link = 0;
+ r_tx_pause = 0;
+ r_rx_pause = 0;
+ r_speed = 0;
+ r_duplex = 0;
+ }
+
+ val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
+ r_tx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
+ r_rx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
+ r_duplex) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
+ ret = regmap_write(smi->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ if (dsa_is_user_port(ds, port) &&
+ (interface == PHY_INTERFACE_MODE_NA ||
+ interface == PHY_INTERFACE_MODE_INTERNAL))
+ /* Internal PHY */
+ return true;
+ else if (dsa_is_cpu_port(ds, port) &&
+ phy_interface_mode_is_rgmii(interface))
+ /* Extension MAC */
+ return true;
+
+ return false;
+}
+
+static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+
+ /* include/linux/phylink.h says:
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
+ * expects the MAC driver to return all supported link modes.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+
+ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
+ dev_err(smi->dev,
+ "port %d supports only conventional PHY or fixed-link\n",
+ port);
+ return;
+ }
+
+ if (phy_interface_mode_is_rgmii(state->interface)) {
+ ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to configure RGMII mode on port %d: %d\n",
+ port, ret);
+ return;
+ }
+
+ /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
+ * supports
+ */
+}
+
+static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ cancel_delayed_work_sync(&p->mib_work);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ false, false);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to reset forced mode on port %d: %d\n",
+ port, ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ schedule_delayed_work(&p->mib_work, 0);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ duplex, tx_pause,
+ rx_pause);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to force mode on port %d: %d\n", port,
+ ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ enum rtl8365mb_stp_state val;
+ int msti = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8365MB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8365MB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8365MB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8365MB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "invalid STP state: %u\n", state);
+ return;
+ }
+
+ regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
+ val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
+}
+
+static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+ bool enable)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+
+ /* Enable/disable learning by limiting the number of L2 addresses the
+ * port can learn. Realtek documentation states that a limit of zero
+ * disables learning. When enabling learning, set it to the chip's
+ * maximum.
+ */
+ return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? mb->learn_limit_max : 0);
+}
+
+static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+ u32 mask)
+{
+ return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+}
+
+static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+ u32 offset, u32 length, u64 *mibvalue)
+{
+ u64 tmpvalue = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ /* The MIB address is an SRAM address. We request a particular address
+ * and then poll the control register before reading the value from some
+ * counter registers.
+ */
+ ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ RTL8365MB_MIB_ADDRESS(port, offset));
+ if (ret)
+ return ret;
+
+ /* Poll for completion */
+ ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
+ 10, 100);
+ if (ret)
+ return ret;
+
+ /* Presumably this indicates a MIB counter read failure */
+ if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
+ return -EIO;
+
+ /* There are four MIB counter registers each holding a 16 bit word of a
+ * MIB counter. Depending on the offset, we should read from the upper
+ * two or lower two registers. In case the MIB counter is 4 words, we
+ * read from all four registers.
+ */
+ if (length == 4)
+ offset = 3;
+ else
+ offset = (offset + 1) % 4;
+
+ /* Read the MIB counter 16 bits at a time */
+ for (i = 0; i < length; i++) {
+ ret = regmap_read(smi->map,
+ RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
+ if (ret)
+ return ret;
+
+ tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
+ }
+
+ /* Only commit the result if no error occurred */
+ *mibvalue = tmpvalue;
+
+ return 0;
+}
+
+static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &data[i]);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read port %d counters: %d\n", port,
+ ret);
+ break;
+ }
+ }
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return RTL8365MB_MIB_END;
+}
+
+static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &phy_stats->SymbolErrorDuringCarrier);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3InPauseFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
+
+ };
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
+ * IEEE 802.3 Managed Objects. This is not always completely faithful,
+ * but we try out best. See RFC 3635 for a detailed treatment of the
+ * subject.
+ */
+
+ mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+ mac_stats->SingleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
+ mac_stats->MultipleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
+ mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3InPauseFrames];
+ mac_stats->FrameCheckSequenceErrors =
+ cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
+ 18 * mac_stats->FramesTransmittedOK;
+ mac_stats->FramesWithDeferredXmissions =
+ cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
+ mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ mac_stats->FramesAbortedDueToXSColls =
+ cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
+ mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
+ 18 * mac_stats->FramesReceivedOK;
+ mac_stats->MulticastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts];
+ mac_stats->BroadcastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+ mac_stats->MulticastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ mac_stats->BroadcastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
+}
+
+static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &ctrl_stats->UnsupportedOpcodesReceived);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_etherStatsDropEvents] = 1,
+ [RTL8365MB_MIB_etherStatsCollisions] = 1,
+ [RTL8365MB_MIB_etherStatsFragments] = 1,
+ [RTL8365MB_MIB_etherStatsJabbers] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ };
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtnl_link_stats64 *stats;
+ int ret;
+ int i;
+
+ stats = &mb->ports[port].stats;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ c->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* Don't update statistics if there was an error reading the counters */
+ if (ret)
+ return;
+
+ spin_lock(&mb->ports[port].stats_lock);
+
+ stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+
+ /* if{In,Out}Octets includes FCS - remove it */
+ stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
+ stats->tx_bytes =
+ cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
+
+ stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
+ stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
+
+ stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
+ cnt[RTL8365MB_MIB_etherStatsJabbers];
+ stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
+
+ stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
+ stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
+
+ spin_unlock(&mb->ports[port].stats_lock);
+}
+
+static void rtl8365mb_stats_poll(struct work_struct *work)
+{
+ struct rtl8365mb_port *p = container_of(to_delayed_work(work),
+ struct rtl8365mb_port,
+ mib_work);
+ struct realtek_smi *smi = p->smi;
+
+ rtl8365mb_stats_update(smi, p->index);
+
+ schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
+}
+
+static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
+static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ /* Per-chip global mutex to protect MIB counter access, since doing
+ * so requires accessing a series of registers in a particular order.
+ */
+ mutex_init(&mb->mib_lock);
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Per-port spinlock to protect the stats64 data */
+ spin_lock_init(&p->stats_lock);
+
+ /* This work polls the MIB counters and keeps the stats64 data
+ * up-to-date.
+ */
+ INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
+ }
+}
+
+static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ cancel_delayed_work_sync(&p->mib_work);
+ }
+}
+
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+ u32 *val)
+{
+ int ret;
+
+ ret = regmap_read(smi->map, reg, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(smi->map, reg, *val);
+}
+
+static irqreturn_t rtl8365mb_irq(int irq, void *data)
+{
+ struct realtek_smi *smi = data;
+ unsigned long line_changes = 0;
+ struct rtl8365mb *mb;
+ u32 stat;
+ int line;
+ int ret;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ &stat);
+ if (ret)
+ goto out_error;
+
+ if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
+ u32 linkdown_ind;
+ u32 linkup_ind;
+ u32 val;
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
+
+ line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ }
+
+ if (!line_changes)
+ goto out_none;
+
+ for_each_set_bit(line, &line_changes, smi->num_ports) {
+ int child_irq = irq_find_mapping(smi->irqdomain, line);
+
+ handle_nested_irq(child_irq);
+ }
+
+ return IRQ_HANDLED;
+
+out_error:
+ dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+
+out_none:
+ return IRQ_NONE;
+}
+
+static struct irq_chip rtl8365mb_irq_chip = {
+ .name = "rtl8365mb",
+ /* The hardware doesn't support masking IRQs on a per-port basis */
+};
+
+static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
+ .map = rtl8365mb_irq_map,
+ .unmap = rtl8365mb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+{
+ return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ RTL8365MB_INTR_LINK_CHANGE_MASK,
+ FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
+ enable ? 1 : 0));
+}
+
+static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, true);
+}
+
+static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, false);
+}
+
+static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct device_node *intc;
+ u32 irq_trig;
+ int virq;
+ int irq;
+ u32 val;
+ int ret;
+ int i;
+
+ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(smi->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* rtl8365mb IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(smi->dev, "failed to get parent irq: %d\n",
+ irq);
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
+ }
+
+ smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
+ &rtl8365mb_irqdomain_ops, smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_create_mapping(smi->irqdomain, i);
+ if (!virq) {
+ dev_err(smi->dev,
+ "failed to create irq domain mapping\n");
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ irq_set_parent(virq, irq);
+ }
+
+ /* Configure chip interrupt signal polarity */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ val = RTL8365MB_INTR_POLARITY_HIGH;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ val = RTL8365MB_INTR_POLARITY_LOW;
+ break;
+ default:
+ dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ irq_trig);
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ RTL8365MB_INTR_POLARITY_MASK,
+ FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Disable the interrupt in case the chip has it enabled on reset */
+ ret = rtl8365mb_irq_disable(smi);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Clear the interrupt status register */
+ ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ RTL8365MB_INTR_ALL_MASK);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
+ "rtl8365mb", smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ goto out_remove_irqdomain;
+ }
+
+ /* Store the irq so that we know to free it during teardown */
+ mb->irq = irq;
+
+ ret = rtl8365mb_irq_enable(smi);
+ if (ret)
+ goto out_free_irq;
+
+ of_node_put(intc);
+
+ return 0;
+
+out_free_irq:
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+
+out_remove_irqdomain:
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+
+out_put_node:
+ of_node_put(intc);
+
+ return ret;
+}
+
+static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int virq;
+ int i;
+
+ if (mb->irq) {
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+ }
+
+ if (smi->irqdomain) {
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+ }
+}
+
+static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb_cpu *cpu = &mb->cpu;
+ u32 val;
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ RTL8365MB_CPU_PORT_MASK_MASK,
+ FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
+ cpu->mask));
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
+ cpu->trap_port >> 3);
+ ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_switch_init(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int ret;
+ int i;
+
+ /* Do any chip-specific init jam before getting to the common stuff */
+ if (mb->jam_table) {
+ for (i = 0; i < mb->jam_size; i++) {
+ ret = regmap_write(smi->map, mb->jam_table[i].reg,
+ mb->jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Common init jam */
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
+ ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ rtl8365mb_init_jam_common[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+{
+ u32 val;
+
+ realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
+ 1));
+
+ /* Realtek documentation says the chip needs 1 second to reset. Sleep
+ * for 100 ms before accessing any registers to prevent ACK timeouts.
+ */
+ msleep(100);
+ return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ !(val & RTL8365MB_CHIP_RESET_HW_MASK),
+ 20000, 1e6);
+}
+
+static int rtl8365mb_setup(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_reset_chip(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Configure switch to vendor-defined initial state */
+ ret = rtl8365mb_switch_init(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Set up cascading IRQs */
+ ret = rtl8365mb_irq_setup(smi);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ dev_info(smi->dev, "no interrupt support\n");
+
+ /* Configure CPU tagging */
+ ret = rtl8365mb_cpu_config(smi);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Configure ports */
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Set up per-port private data */
+ p->smi = smi;
+ p->index = i;
+
+ /* Forward only to the CPU */
+ ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Disable learning */
+ ret = rtl8365mb_port_set_learning(smi, i, false);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Set the initial STP state of all ports to DISABLED, otherwise
+ * ports will still forward frames to the CPU despite being
+ * administratively down by default.
+ */
+ rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ }
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ if (ret)
+ goto out_teardown_irq;
+
+ ret = realtek_smi_setup_mdio(smi);
+ if (ret) {
+ dev_err(smi->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
+
+ /* Start statistics counter polling */
+ rtl8365mb_stats_setup(smi);
+
+ return 0;
+
+out_teardown_irq:
+ rtl8365mb_irq_teardown(smi);
+
+out_error:
+ return ret;
+}
+
+static void rtl8365mb_teardown(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ rtl8365mb_stats_teardown(smi);
+ rtl8365mb_irq_teardown(smi);
+}
+
+static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
+{
+ int ret;
+
+ /* For some reason we have to write a magic value to an arbitrary
+ * register whenever accessing the chip ID/version registers.
+ */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
+ if (ret)
+ return ret;
+
+ /* Reset magic register */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_detect(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ u32 chip_id;
+ u32 chip_ver;
+ int ret;
+
+ ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ if (ret) {
+ dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8365MB_CHIP_ID_8365MB_VC:
+ dev_info(smi->dev,
+ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
+ chip_ver);
+
+ smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
+ smi->num_ports = smi->cpu_port + 1;
+
+ mb->smi = smi;
+ mb->chip_id = chip_id;
+ mb->chip_ver = chip_ver;
+ mb->port_mask = BIT(smi->num_ports) - 1;
+ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
+ mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
+ mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
+
+ mb->cpu.enable = 1;
+ mb->cpu.mask = BIT(smi->cpu_port);
+ mb->cpu.trap_port = smi->cpu_port;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
+ break;
+ default:
+ dev_err(smi->dev,
+ "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
+ chip_id, chip_ver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+ .detect = rtl8365mb_detect,
+ .phy_read = rtl8365mb_phy_read,
+ .phy_write = rtl8365mb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8365mb_variant = {
+ .ds_ops = &rtl8365mb_switch_ops,
+ .ops = &rtl8365mb_smi_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xb9,
+ .cmd_write = 0xb8,
+ .chip_data_sz = sizeof(struct rtl8365mb),
+};
+EXPORT_SYMBOL_GPL(rtl8365mb_variant);
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 75897a369096..bdb8d8d34880 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
-int rtl8366_init_vlan(struct realtek_smi *smi)
-{
- int port;
- int ret;
-
- ret = rtl8366_reset_vlan(smi);
- if (ret)
- return ret;
-
- /* Loop over the available ports, for each port, associate
- * it with the VLAN (port+1)
- */
- for (port = 0; port < smi->num_ports; port++) {
- u32 mask;
-
- if (port == smi->cpu_port)
- /* For the CPU port, make all ports members of this
- * VLAN.
- */
- mask = GENMASK((int)smi->num_ports - 1, 0);
- else
- /* For all other ports, enable itself plus the
- * CPU port.
- */
- mask = BIT(port) | BIT(smi->cpu_port);
-
- /* For each port, set the port as member of VLAN (port+1)
- * and untagged, except for the CPU port: the CPU port (5) is
- * member of VLAN 6 and so are ALL the other ports as well.
- * Use filter 0 (no filter).
- */
- dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
- (port + 1), port, mask);
- ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
- if (ret)
- return ret;
-
- dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
- (port + 1), port, (port + 1));
- ret = rtl8366_set_pvid(smi, port, (port + 1));
- if (ret)
- return ret;
- }
-
- return rtl8366_enable_vlan(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct realtek_smi *smi = ds->priv;
- struct rtl8366_vlan_4k vlan4k;
- int ret;
-
- /* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- dev_info(smi->dev, "%s filtering on port %d\n",
- vlan_filtering ? "enable" : "disable",
- port);
-
- /* TODO:
- * The hardware support filter ID (FID) 0..7, I have no clue how to
- * support this in the driver when the callback only says on/off.
- */
- ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
- if (ret)
- return ret;
-
- /* Just set the filter to FID 1 for now then */
- ret = rtl8366_set_vlan(smi, port + 1,
- vlan4k.member,
- vlan4k.untag,
- 1);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
@@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return ret;
}
- dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid, port, untagged ? "untagged" : "tagged",
- pvid ? " PVID" : "no PVID");
-
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
- dev_err(smi->dev, "port is DSA or CPU port\n");
+ dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
member |= BIT(port);
@@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
struct realtek_smi *smi = ds->priv;
int ret, i;
- dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
+ dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
for (i = 0; i < smi->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
@@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
* anymore then clear the whole member
* config so it can be reused.
*/
- if (!vlanmc.member && vlanmc.untag) {
+ if (!vlanmc.member) {
vlanmc.vid = 0;
vlanmc.priority = 0;
vlanmc.fid = 0;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a89093bc6c6a..03deacd83e61 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -42,9 +43,12 @@
/* Port Enable Control register */
#define RTL8366RB_PECR 0x0001
-/* Switch Security Control registers */
-#define RTL8366RB_SSCR0 0x0002
-#define RTL8366RB_SSCR1 0x0003
+/* Switch per-port learning disablement register */
+#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
+
+/* Security control, actually aging register */
+#define RTL8366RB_SECURITY_CTRL 0x0003
+
#define RTL8366RB_SSCR2 0x0004
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
@@ -106,6 +110,18 @@
#define RTL8366RB_POWER_SAVING_REG 0x0021
+/* Spanning tree status (STP) control, two bits per port per FID */
+#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
+#define RTL8366RB_STP_STATE_DISABLED 0x0
+#define RTL8366RB_STP_STATE_BLOCKING 0x1
+#define RTL8366RB_STP_STATE_LEARNING 0x2
+#define RTL8366RB_STP_STATE_FORWARDING 0x3
+#define RTL8366RB_STP_MASK GENMASK(1, 0)
+#define RTL8366RB_STP_STATE(port, state) \
+ ((state) << ((port) * 2))
+#define RTL8366RB_STP_STATE_MASK(port) \
+ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
+
/* CPU port control reg */
#define RTL8368RB_CPU_CTRL_REG 0x0061
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
@@ -143,6 +159,21 @@
#define RTL8366RB_PHY_NO_OFFSET 9
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+/* VLAN Ingress Control Register 1, one bit per port.
+ * bit 0 .. 5 will make the switch drop ingress frames without
+ * VID such as untagged or priority-tagged frames for respective
+ * port.
+ * bit 6 .. 11 will make the switch drop ingress frames carrying
+ * a C-tag with VID != 0 for respective port.
+ */
+#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
+#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
+
+/* VLAN Ingress Control Register 2, one bit per port.
+ * bit0 .. bit5 will make the switch drop all ingress frames with
+ * a VLAN classification that does not include the port is in its
+ * member set.
+ */
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
@@ -215,6 +246,7 @@
#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_NUM_FIDS 8
#define RTL8366RB_FIDMAX 7
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
@@ -300,6 +332,13 @@
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+/* Port isolation registers */
+#define RTL8366RB_PORT_ISO_BASE 0x0F08
+#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
+#define RTL8366RB_PORT_ISO_EN BIT(0)
+#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
+#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
+
/* bits 0..5 enable force when cleared */
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
@@ -314,9 +353,11 @@
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
@@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ /* Isolate all user ports so they can only send packets to itself and the CPU port */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+ }
+ /* CPU port can send packets to all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
@@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* layer 2 size, see rtl8366rb_change_mtu() */
rb->max_mtu[i] = 1532;
- /* Enable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+ /* Disable learning for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+ ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Discard VLAN tagged packets if the port is not a member of
- * the VLAN with which the packets is associated.
- */
+ /* Accept all packets by default, we enable filtering on-demand */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ 0);
+ if (ret)
+ return ret;
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
- RTL8366RB_PORT_ALL);
+ 0);
if (ret)
return ret;
@@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_init_vlan(smi);
+ ret = rtl8366_reset_vlan(smi);
if (ret)
return ret;
@@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
@@ -1127,6 +1184,190 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
rb8366rb_set_port_led(smi, port, false);
}
+static int
+rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than the current one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Join this port to each other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)));
+ if (ret)
+ dev_err(smi->dev, "failed to join port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Set the bits for the ports we can access */
+ return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap));
+}
+
+static void
+rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than this one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Remove this port from any other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
+ if (ret)
+ dev_err(smi->dev, "failed to leave port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Clear the bits for the ports we can not access, leave ourselves */
+ regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
+}
+
+/**
+ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
+ * @smi: SMI state container
+ * @port: the port to drop untagged and C-tagged frames on
+ * @drop: whether to drop or pass untagged and C-tagged frames
+ */
+static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+{
+ return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
+ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
+}
+
+static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366rb *rb;
+ int ret;
+
+ rb = smi->chip_data;
+
+ dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ vlan_filtering ? "enable" : "disable");
+
+ /* If the port is not in the member set, the frame will be dropped */
+ ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ BIT(port), vlan_filtering ? BIT(port) : 0);
+ if (ret)
+ return ret;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. If we turn off VLAN
+ * filtering on a port, we need to accept any frames.
+ */
+ if (vlan_filtering)
+ ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ else
+ ret = rtl8366rb_drop_untagged(smi, port, false);
+
+ return ret;
+}
+
+static int
+rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ BIT(port),
+ (flags.val & BR_LEARNING) ? 0 : BIT(port));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ u32 val;
+ int i;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8366RB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8366RB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8366RB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8366RB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ /* Set the same status for the port on all the FIDs */
+ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
+ regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ RTL8366RB_STP_STATE_MASK(port),
+ RTL8366RB_STP_STATE(port, val));
+ }
+}
+
+static void
+rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ /* This will age out any learned L2 entries */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), BIT(port));
+ /* Restore the normal state of things */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), 0);
+}
+
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct realtek_smi *smi = ds->priv;
@@ -1338,24 +1579,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
{
+ struct rtl8366rb *rb;
+ bool pvid_enabled;
+ int ret;
+
+ rb = smi->chip_data;
+ pvid_enabled = !!index;
+
if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+ if (ret)
+ return ret;
+
+ rb->pvid_enabled[port] = pvid_enabled;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. Make sure to update the
+ * filtering setting.
+ */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
+ ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+
+ return ret;
}
static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
{
- unsigned int max = RTL8366RB_NUM_VLANS;
+ unsigned int max = RTL8366RB_NUM_VLANS - 1;
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan > max)
+ if (vlan > max)
return false;
return true;
@@ -1510,11 +1771,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
- .port_vlan_filtering = rtl8366_vlan_filtering,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 5e5d24e7c02b..21dba16af097 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,6 +20,27 @@
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
+/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
+ * To avoid floating point operations, we'll multiply the degrees by 10
+ * to get a "phase" and get 1 decimal point precision.
+ */
+#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
+ (((ps) * 360) / 800)
+#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
+ ((800 * (phase)) / 360)
+#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
+ (((phase) - 738) / 9)
+#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
+ SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
+
+/* Valid range in degrees is a value between 73.8 and 101.7
+ * in 0.9 degree increments
+ */
+#define SJA1105_RGMII_DELAY_MIN_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
+#define SJA1105_RGMII_DELAY_MAX_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
+
typedef enum {
SPI_READ = 0,
SPI_WRITE = 1,
@@ -222,16 +243,14 @@ struct sja1105_flow_block {
struct sja1105_private {
struct sja1105_static_config static_config;
- bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
- bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
+ int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
- bool vlan_aware;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
const struct sja1105_info *info;
size_t max_xfer_len;
- struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
@@ -242,6 +261,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes access to the dynamic config interface */
+ struct mutex dynamic_config_lock;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct mii_bus *mdio_base_t1;
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 5bbf1707f2af..e3699f76f6d7 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
-/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static u64 sja1105_rgmii_delay(u64 phase)
-{
- /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
- * To avoid floating point operations we'll multiply by 10
- * and get 1 decimal point precision.
- */
- phase *= 10;
- return (phase - 738) / 9;
-}
-
/* The RGMII delay setup procedure is 2-step and gets called upon each
* .phylink_mac_config. Both are strategic.
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
@@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int rc;
- if (priv->rgmii_rx_delay[port])
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
- if (priv->rgmii_tx_delay[port])
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay)
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ if (tx_delay)
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
/* Stage 1: Turn the RGMII delay lines off. */
pad_mii_id.rxc_bypass = 1;
@@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
return rc;
/* Stage 2: Turn the RGMII delay lines on. */
- if (priv->rgmii_rx_delay[port]) {
+ if (rx_delay) {
pad_mii_id.rxc_bypass = 0;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
+ if (tx_delay) {
pad_mii_id.txc_bypass = 0;
pad_mii_id.txc_pd = 0;
}
@@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
pad_mii_id.rxc_pd = 1;
pad_mii_id.txc_pd = 1;
- if (priv->rgmii_rx_delay[port]) {
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay) {
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
pad_mii_id.rxc_bypass = 1;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (tx_delay) {
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
pad_mii_id.txc_bypass = 1;
pad_mii_id.txc_pd = 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index f2049f52833c..7729d3f8b7f5 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
},
};
+#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
+#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
+
+static int
+sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+ struct sja1105_dyn_cmd *cmd,
+ const struct sja1105_dynamic_table_ops *ops)
+{
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ int rc;
+
+ /* We don't _need_ to read the full entry, just the command area which
+ * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
+ * buffer that contains the full entry too. Additionally, our API
+ * doesn't really know how many bytes into the buffer does the command
+ * area really begin. So just read back the whole entry.
+ */
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc)
+ return rc;
+
+ /* Unpack the command structure, and return it to the caller in case it
+ * needs to perform further checks on it (VALIDENT).
+ */
+ memset(cmd, 0, sizeof(*cmd));
+ ops->cmd_packing(packed_buf, cmd, UNPACK);
+
+ /* Hardware hasn't cleared VALID => still working on it */
+ return cmd->valid ? -EAGAIN : 0;
+}
+
+/* Poll the dynamic config entry's control area until the hardware has
+ * cleared the VALID bit, which means we have confirmation that it has
+ * finished processing the command.
+ */
+static int
+sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
+ struct sja1105_dyn_cmd *cmd,
+ const struct sja1105_dynamic_table_ops *ops)
+{
+ int rc;
+
+ return read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, cmd, ops);
+}
+
/* Provides read access to the settings through the dynamic interface
* of the switch.
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
@@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
struct sja1105_dyn_cmd cmd = {0};
/* SPI payload buffer */
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
- int retries = 3;
int rc;
if (blk_idx >= BLK_IDX_MAX_DYN)
@@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0)
+ if (rc < 0) {
+ mutex_unlock(&priv->dynamic_config_lock);
return rc;
+ }
- /* Loop until we have confirmation that hardware has finished
- * processing the command and has cleared the VALID field
- */
- do {
- memset(packed_buf, 0, ops->packed_size);
-
- /* Retrieve the read operation's result */
- rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
- ops->packed_size);
- if (rc < 0)
- return rc;
-
- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
-
- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
- cpu_relax();
- } while (cmd.valid && --retries);
+ rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+ mutex_unlock(&priv->dynamic_config_lock);
+ if (rc < 0)
+ return rc;
- if (cmd.valid)
- return -ETIMEDOUT;
+ if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
@@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
+ if (rc < 0) {
+ mutex_unlock(&priv->dynamic_config_lock);
+ return rc;
+ }
+
+ rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+ mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 924c3f129992..c343effe2e96 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -27,15 +27,29 @@
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
-static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
- unsigned int startup_delay)
+/* Configure the optional reset pin and bring up switch */
+static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
+ unsigned int startup_delay)
{
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (!gpio)
+ return 0;
+
gpiod_set_value_cansleep(gpio, 1);
/* Wait for minimum reset pulse length */
msleep(pulse_len);
gpiod_set_value_cansleep(gpio, 0);
/* Wait until chip is ready after reset */
msleep(startup_delay);
+
+ gpiod_put(gpio);
+
+ return 0;
}
static void
@@ -1095,27 +1109,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv)
return sja1105_static_config_upload(priv);
}
-static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
+/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
+ * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
+ * properties. It has the advantage of working with fixed links and with PHYs
+ * that apply RGMII delays too, and the MAC driver needs not perform any
+ * special checks.
+ *
+ * Previously we were acting upon the "phy-mode" property when we were
+ * operating in fixed-link, basically acting as a PHY, but with a reversed
+ * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
+ * behave as if it is connected to a PHY which has applied RGMII delays in the
+ * TX direction. So if anything, RX delays should have been added by the MAC,
+ * but we were adding TX delays.
+ *
+ * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
+ * back to the legacy behavior and apply delays on fixed-link ports based on
+ * the reverse interpretation of the phy-mode. This is a deviation from the
+ * expected default behavior which is to simply apply no delays. To achieve
+ * that behavior with the new bindings, it is mandatory to specify
+ * "{rx,tx}-internal-delay-ps" with a value of 0.
+ */
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
+ struct device_node *port_dn)
{
- struct dsa_switch *ds = priv->ds;
- int port;
+ phy_interface_t phy_mode = priv->phy_mode[port];
+ struct device *dev = &priv->spidev->dev;
+ int rx_delay = -1, tx_delay = -1;
- for (port = 0; port < ds->num_ports; port++) {
- if (!priv->fixed_link[port])
- continue;
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return 0;
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_rx_delay[port] = true;
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_tx_delay[port] = true;
+ if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
+ dev_warn(dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port);
- if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
- !priv->info->setup_rgmii_delay)
- return -EINVAL;
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
+ dev_err(dev, "Chip cannot apply RGMII delays\n");
+ return -EINVAL;
}
+
+ if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
+ (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
+ dev_err(dev,
+ "port %d RGMII delay values out of range, must be between %d and %d ps\n",
+ port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
+ return -ERANGE;
+ }
+
+ priv->rgmii_rx_delay_ps[port] = rx_delay;
+ priv->rgmii_tx_delay_ps[port] = tx_delay;
+
return 0;
}
@@ -1166,6 +1231,12 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
priv->phy_mode[index] = phy_mode;
+
+ err = sja1105_parse_rgmii_delays(priv, index, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
}
return 0;
@@ -1360,7 +1431,7 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
*/
if (state->interface != PHY_INTERFACE_MODE_NA &&
sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -1380,9 +1451,8 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int
@@ -1766,6 +1836,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1802,7 +1873,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!priv->vlan_aware)
+ if (!dsa_port_is_vlan_filtering(dp))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -2295,11 +2366,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
tpid2 = ETH_P_SJA1105;
}
- if (priv->vlan_aware == enabled)
- return 0;
-
- priv->vlan_aware = enabled;
-
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
@@ -2332,7 +2398,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !priv->vlan_aware;
+ l2_lookup_params->shared_learn = !enabled;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -2965,7 +3031,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv)
continue;
dp->priv = sp;
- sp->dp = dp;
sp->data = tagger_data;
slave = dp->slave;
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
@@ -3229,17 +3294,14 @@ static int sja1105_probe(struct spi_device *spi)
return -EINVAL;
}
+ rc = sja1105_hw_reset(dev, 1, 1);
+ if (rc)
+ return rc;
+
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- /* Configure the optional reset pin and bring up switch */
- priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset_gpio))
- dev_dbg(dev, "reset-gpios not defined, ignoring\n");
- else
- sja1105_hw_reset(priv->reset_gpio, 1, 1);
-
/* Populate our driver private structure (priv) based on
* the device tree node that was probed (spi)
*/
@@ -3303,6 +3365,7 @@ static int sja1105_probe(struct spi_device *spi)
priv->ds = ds;
mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
rc = sja1105_parse_dt(priv);
@@ -3311,15 +3374,6 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
}
- /* Error out early if internal delays are required through DT
- * and we can't apply them.
- */
- rc = sja1105_parse_rgmii_delays(priv);
- if (rc < 0) {
- dev_err(ds->dev, "RGMII delay not supported\n");
- return rc;
- }
-
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index 6802f4057cc0..f5dca6a9b0f9 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -394,7 +394,8 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
- u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ u16 vid = dsa_tag_8021q_rx_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
@@ -494,13 +495,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
bool append)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int rc;
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
@@ -568,6 +571,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
u32 num_entries, struct action_gate_entry *entries)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int ipv = -1;
int i, rc;
s32 rem;
@@ -592,11 +597,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 469420941054..910fcb3b252b 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -456,7 +456,7 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 1000baseT_Full);
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -467,10 +467,8 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index d01cf1073d49..127a677d1f39 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -31,7 +31,7 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
@@ -39,13 +39,13 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
- ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD);
+ ret = mdiodev_read(mdiodev, XRS_MDIO_IBD);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
return ret;
@@ -64,7 +64,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
u16 uval;
int ret;
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
@@ -72,7 +72,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
@@ -80,7 +80,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 87c906e744fb..846fa3af4504 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -270,7 +270,7 @@ static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
{
struct el3_private *lp = netdev_priv(dev);
- memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)phys_addr);
dev->base_addr = ioaddr;
dev->irq = irq;
dev->if_port = if_port;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 6f0ea2facea9..1d124b0f65e7 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -567,6 +567,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
{
struct corkscrew_private *vp = netdev_priv(dev);
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i;
int irq;
@@ -619,7 +620,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
- __be16 *phys_addr = (__be16 *) dev->dev_addr;
int timer;
outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
@@ -631,8 +631,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
eeprom[i] = inw(ioaddr + Wn0EepromData);
checksum ^= eeprom[i];
if (i < 3)
- phys_addr[i] = htons(eeprom[i]);
+ addr[i] = htons(eeprom[i]);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
checksum = (checksum ^ (checksum >> 8)) & 0xff;
if (checksum != 0x00)
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dd4d3c48b98d..dc3b7c960611 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -305,15 +305,13 @@ static int tc574_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct el3_private *lp = netdev_priv(dev);
int ret, i, j;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
- __be16 *phys_addr;
char *cardname;
__u32 config;
u8 *buf;
size_t len;
- phys_addr = (__be16 *)dev->dev_addr;
-
dev_dbg(&link->dev, "3c574_config()\n");
link->io_lines = 16;
@@ -347,19 +345,20 @@ static int tc574_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
+ addr[i] = htons(le16_to_cpu(buf[i * 2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
EL3WINDOW(0);
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (addr[0] == htons(0x6060)) {
pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
if (link->prod_id[1])
cardname = link->prod_id[1];
else
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 09816e84314d..4673bc1604e7 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -237,8 +237,8 @@ static void tc589_detach(struct pcmcia_device *link)
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- __be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
@@ -246,7 +246,6 @@ static int tc589_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_config\n");
- phys_addr = (__be16 *)dev->dev_addr;
/* Is this a 3c562? */
if (link->manf_id != MANFID_3COM)
dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
@@ -285,18 +284,19 @@ static int tc589_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ addr[i] = htons(le16_to_cpu(buf[i*2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i));
+ if (addr[0] == htons(0x6060)) {
dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The address and resource configuration register aren't loaded from
* the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7b0ae9efc004..ccf07667aa5e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1091,6 +1091,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
struct vortex_private *vp;
int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i, step;
struct net_device *dev;
static int printed_version;
@@ -1284,7 +1285,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ addr[i] = htons(eeprom[i + 10]);
+ eth_hw_addr_set(dev, (u8 *)addr);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index da1ae37a9d73..991ad953aa79 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -320,8 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_cont(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 6c6bdd5913ec..1f8acbba5b6b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -716,7 +716,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
+ eth_hw_addr_set(dev, SA_prom);
}
#ifdef CONFIG_AX88796_93CX6
@@ -733,7 +733,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -748,16 +748,18 @@ static int ax_init_dev(struct net_device *dev)
/* load the mac-address from the device */
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ u8 addr[ETH_ALEN];
+
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] =
- ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ eth_hw_addr_set(dev, addr);
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, ax->plat->mac_addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3c370e686ec3..3aef959fc25b 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -187,6 +187,7 @@ static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* This is based on drivers/net/ethernet/8390/ne.c */
@@ -220,9 +221,11 @@ static int get_prom(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + AXNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
+
return 1;
} /* get_prom */
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 4ad8031ab669..e320cccba61a 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -374,8 +374,7 @@ static int mcf8390_init(struct net_device *dev)
if (ret)
return ret;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 9afc712f5948..0a9118b8be0c 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -500,9 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->base_addr = ioaddr;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = SA_prom[i];
- }
+ eth_hw_addr_set(dev, SA_prom);
pr_cont("%pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index d6715008e04d..6a0a2039600a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -390,7 +390,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
dev->ethtool_ops = &ne2k_pci_ethtool_ops;
NS8390_init(dev, 0);
- memcpy(dev->dev_addr, SA_prom, dev->addr_len);
+ eth_hw_addr_set(dev, SA_prom);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 96ad72abd373..0f07fe03da98 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -278,6 +278,7 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
u_char __iomem *base, *virt;
+ u8 addr[ETH_ALEN];
int i, j;
/* Allocate a small memory window */
@@ -302,7 +303,8 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
(readb(base+2) == hw_info[i].a1) &&
(readb(base+4) == hw_info[i].a2)) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = readb(base + (j<<1));
+ addr[j] = readb(base + (j<<1));
+ eth_hw_addr_set(dev, addr);
break;
}
}
@@ -324,6 +326,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
u_char prom[32];
int i, j;
@@ -362,7 +365,8 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
}
if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = prom[j<<1];
+ addr[j] = prom[j<<1];
+ eth_hw_addr_set(dev, addr);
return (i < NR_INFO) ? hw_info+i : &default_info;
}
return NULL;
@@ -377,6 +381,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
static struct hw_info *get_dl10019(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
u_char sum;
@@ -385,7 +390,8 @@ static struct hw_info *get_dl10019(struct pcmcia_device *link)
if (sum != 0xff)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ eth_hw_addr_set(dev, addr);
i = inb(dev->base_addr + 0x1f);
return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
}
@@ -400,6 +406,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* Not much of a test, but the alternatives are messy */
@@ -413,9 +420,10 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + PCNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
return NULL;
}
@@ -430,6 +438,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
static struct hw_info *get_hwired(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
for (i = 0; i < 6; i++)
@@ -438,7 +447,8 @@ static struct hw_info *get_hwired(struct pcmcia_device *link)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = hw_addr[i];
+ addr[i] = hw_addr[i];
+ eth_hw_addr_set(dev, addr);
return &default_info;
} /* get_hwired */
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index fbbd7f22c142..bd89ca8a92df 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -104,8 +104,8 @@ STNIC_WRITE (int reg, byte val)
static int __init stnic_probe(void)
{
struct net_device *dev;
- int i, err;
struct ei_device *ei_local;
+ int err;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -119,8 +119,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = stnic_eadr[i];
+ eth_hw_addr_set(dev, stnic_eadr);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = 0x1000;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 35a500a21521..e8b4fe813a08 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -364,8 +364,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
if (i)
return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 412ae3e43ffb..4601b38f532a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,6 +33,7 @@ source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
+source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index aaa5078cd7d1..fdd8c6c17451 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
+obj-$(CONFIG_NET_VENDOR_ASIX) += asix/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index c4ecf4fcadf8..1cfdd01b4c2e 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -342,7 +342,7 @@ static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
static void owl_emac_set_hw_mac_addr(struct net_device *netdev)
{
struct owl_emac_priv *priv = netdev_priv(netdev);
- u8 *mac_addr = netdev->dev_addr;
+ const u8 *mac_addr = netdev->dev_addr;
u32 addr_high, addr_low;
addr_high = mac_addr[0] << 8 | mac_addr[1];
@@ -1173,7 +1173,7 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
owl_emac_set_hw_mac_addr(netdev);
return owl_emac_setup_frame_xmit(netdev_priv(netdev));
@@ -1385,7 +1385,7 @@ static void owl_emac_get_mac_addr(struct net_device *netdev)
struct device *dev = netdev->dev.parent;
int ret;
- ret = eth_platform_get_mac_address(dev, netdev->dev_addr);
+ ret = platform_get_ethdev_address(dev, netdev);
if (!ret && is_valid_ether_addr(netdev->dev_addr))
return;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index e0f6cc910bd2..c6982f7caf9b 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -641,6 +641,7 @@ static int starfire_init_one(struct pci_dev *pdev,
struct netdev_private *np;
int i, irq, chip_idx = ent->driver_data;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
long ioaddr;
void __iomem *base;
int drv_flags, io_size;
@@ -696,7 +697,8 @@ static int starfire_init_one(struct pci_dev *pdev,
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
if (debug > 4)
@@ -955,7 +957,7 @@ static int netdev_open(struct net_device *dev)
writew(0, ioaddr + PerfFilterTable + 4);
writew(0, ioaddr + PerfFilterTable + 8);
for (i = 1; i < 16; i++) {
- __be16 *eaddrs = (__be16 *)dev->dev_addr;
+ const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
@@ -1787,14 +1789,14 @@ static void set_rx_mode(struct net_device *dev)
} else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
netdev_for_each_mc_addr(ha, dev) {
eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
@@ -1805,7 +1807,7 @@ static void set_rx_mode(struct net_device *dev)
} else {
/* Must use a multicast hash table. */
void __iomem *filter_addr;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
@@ -1819,7 +1821,7 @@ static void set_rx_mode(struct net_device *dev)
}
/* Clear the perfect filter list, skip first two entries. */
filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
for (i = 2; i < 16; i++) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c560ad06f0be..447dc64a17e5 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1025,7 +1025,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 | dev->dev_addr[5]);
@@ -1346,6 +1346,7 @@ static int greth_of_probe(struct platform_device *ofdev)
int i;
int err;
int tmp;
+ u8 addr[ETH_ALEN];
unsigned long timeout;
dev = alloc_etherdev(sizeof(struct greth_private));
@@ -1449,8 +1450,6 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- u8 addr[ETH_ALEN];
-
err = of_get_mac_address(ofdev->dev.of_node, addr);
if (!err) {
for (i = 0; i < 6; i++)
@@ -1464,7 +1463,8 @@ static int greth_of_probe(struct platform_device *ofdev)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 920633161174..f4edc616388c 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3863,7 +3863,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_init_send(adapter);
et131x_hwaddr_init(adapter);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
/* Init the device with the new settings */
et131x_adapter_setup(adapter);
@@ -3966,7 +3966,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
rc = -ENOMEM;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 696517eae77f..1fc9a1cd3ef8 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1008,7 +1008,7 @@ static void slic_set_link_autoneg(struct slic_device *sdev)
static void slic_set_mac_address(struct slic_device *sdev)
{
- u8 *addr = sdev->netdev->dev_addr;
+ const u8 *addr = sdev->netdev->dev_addr;
u32 val;
val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
@@ -1660,7 +1660,7 @@ static int slic_read_eeprom(struct slic_device *sdev)
goto free_eeprom;
}
/* set mac address */
- ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
+ eth_hw_addr_set(sdev->netdev, mac[devfn]);
free_eeprom:
dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 037baea1c738..800ee022388f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -356,7 +356,7 @@ static int emac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev->
dev_addr[2], db->membase + EMAC_MAC_A1_REG);
@@ -852,7 +852,7 @@ static int emac_probe(struct platform_device *pdev)
}
/* Read MAC-address from DT */
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the MAC address is invalid get a random one */
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 9dc12b13061f..732da15a3827 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -869,6 +869,7 @@ static int ace_init(struct net_device *dev)
int board_idx, ecode = 0;
short i;
unsigned char cache_size;
+ u8 addr[ETH_ALEN];
ap = netdev_priv(dev);
regs = ap->regs;
@@ -988,12 +989,13 @@ static int ace_init(struct net_device *dev)
writel(mac1, &regs->MacAddrHi);
writel(mac2, &regs->MacAddrLo);
- dev->dev_addr[0] = (mac1 >> 8) & 0xff;
- dev->dev_addr[1] = mac1 & 0xff;
- dev->dev_addr[2] = (mac2 >> 24) & 0xff;
- dev->dev_addr[3] = (mac2 >> 16) & 0xff;
- dev->dev_addr[4] = (mac2 >> 8) & 0xff;
- dev->dev_addr[5] = mac2 & 0xff;
+ addr[0] = (mac1 >> 8) & 0xff;
+ addr[1] = mac1 & 0xff;
+ addr[2] = (mac2 >> 24) & 0xff;
+ addr[3] = (mac2 >> 16) & 0xff;
+ addr[4] = (mac2 >> 8) & 0xff;
+ addr[5] = mac2 & 0xff;
+ eth_hw_addr_set(dev, addr);
printk("MAC: %pM\n", dev->dev_addr);
@@ -2712,15 +2714,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p)
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct sockaddr *addr=p;
- u8 *da;
+ const u8 *da;
struct cmd cmd;
if(netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
- da = (u8 *)dev->dev_addr;
+ da = (const u8 *)dev->dev_addr;
writel(da[0] << 8 | da[1], &regs->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c00d719e5d7..d75d95a97dd9 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -849,7 +849,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
u32 lsb;
@@ -1524,7 +1524,7 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
/* get default MAC address from device tree */
- ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr);
+ ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
if (ret)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0e43000614ab..7d5d885d85d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4073,7 +4073,7 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
} else {
ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
}
/* Set offload features */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 4786f0504691..899c8a2a34b6 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -168,7 +168,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
+ depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 92e4246dc359..9421afb950f7 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1500,7 +1500,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
int i;
struct sockaddr *addr = p;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++)
@@ -1743,6 +1743,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
unsigned long reg_addr, reg_len;
struct amd8111e_priv *lp;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -1809,7 +1810,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initializing MAC address */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+ addr[i] = readb(lp->mmio + PADR + i);
+ eth_hw_addr_set(dev, addr);
/* Setting user defined parametrs */
lp->ext_phy_option = speed_duplex[card_idx];
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9d2f49fd945e..9c7d9690d00c 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -582,7 +582,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
@@ -1123,7 +1123,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
return -EIO;
}
- memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ eth_hw_addr_set(dev, saddr->sa_data);
for( i = 0; i < 6; i++ )
MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 9c1636222b99..c6f003975621 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 4019cab87505..30ee5329bd7c 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -529,7 +529,8 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
mace_init
Resets the MACE chip.
---------------------------------------------------------------------------- */
-static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+static int mace_init(mace_private *lp, unsigned int ioaddr,
+ const char *enet_addr)
{
int i;
int ct = 0;
@@ -635,7 +636,7 @@ static int nmclan_config(struct pcmcia_device *link)
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev, buf);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 70d76fdb9f56..f5c50ff377ff 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1595,6 +1595,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
struct net_device *dev;
const struct pcnet32_access *a = NULL;
u8 promaddr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1760,9 +1761,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
unsigned int val;
val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
/* There may be endianness issues here. */
- dev->dev_addr[2 * i] = val & 0x0ff;
- dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ addr[2 * i] = val & 0x0ff;
+ addr[2 * i + 1] = (val >> 8) & 0x0ff;
}
+ eth_hw_addr_set(dev, addr);
/* read PROM address and compare with CSR address */
for (i = 0; i < ETH_ALEN; i++)
@@ -1775,13 +1777,16 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, promaddr);
}
}
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_zero_addr(dev->dev_addr);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(dev, zero_addr);
+ }
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 4a845bc071b2..007bd7787291 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -305,7 +305,6 @@ static int __init lance_probe( struct net_device *dev)
unsigned long ioaddr;
struct lance_private *lp;
- int i;
static int did_version;
volatile unsigned short *ioaddr_probe;
unsigned short tmp1, tmp2;
@@ -373,8 +372,7 @@ static int __init lance_probe( struct net_device *dev)
dev->irq);
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* tell the card it's ether address, bytes swapped */
MEM->init.hwaddr[0] = dev->dev_addr[1];
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ddece276ae23..22d609563af8 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1301,7 +1301,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
struct device_node *dp = op->dev.of_node;
struct lance_private *lp;
struct net_device *dev;
- int i;
dev = alloc_etherdev(sizeof(struct lance_private) + 8);
if (!dev)
@@ -1315,8 +1314,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
* will copy the address in the device structure to the lance
* initialization block.
*/
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Get the IO region */
lp->lregs = of_ioremap(&op->resource[0], 0,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b2cd3bdba9f8..533b8519ec35 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1331,6 +1331,10 @@
#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
#endif
+#ifndef MDIO_VEND2_PMA_MISC_CTRL0
+#define MDIO_VEND2_PMA_MISC_CTRL0 0x8090
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
@@ -1389,6 +1393,10 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
+#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
+#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index d5fd49dd25f3..3936543a74d8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1080,7 +1080,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
return 0;
}
-static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 17a585adfb49..30d24d19f40d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1912,10 +1912,8 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
- flush_workqueue(pdata->dev_workqueue);
destroy_workqueue(pdata->dev_workqueue);
set_bit(XGBE_DOWN, &pdata->dev_state);
@@ -2016,7 +2014,7 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_if->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index bafc51c34e0b..94879cf8b420 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -369,9 +369,8 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
__ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
__ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
- bitmap_and(advertising,
- cmd->link_modes.advertising, lks->link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(advertising, cmd->link_modes.advertising,
+ lks->link_modes.supported);
if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
@@ -384,8 +383,7 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
pdata->phy.autoneg = cmd->base.autoneg;
pdata->phy.speed = speed;
pdata->phy.duplex = cmd->base.duplex;
- bitmap_copy(lks->link_modes.advertising, advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(lks->link_modes.advertising, advertising);
if (cmd->base.autoneg == AUTONEG_ENABLE)
XGBE_SET_ADV(lks, Autoneg);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index a218dc6f2edd..0e8698928e4d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -267,7 +267,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Initialize ECC timestamps */
pdata->tx_sec_period = jiffies;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 18e48b3bc402..213769054391 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1977,12 +1977,26 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
+{
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
+ XGBE_PMA_PLL_CTRL_MASK,
+ enable ? XGBE_PMA_PLL_CTRL_ENABLE
+ : XGBE_PMA_PLL_CTRL_DISABLE);
+
+ /* Wait for command to complete */
+ usleep_range(100, 200);
+}
+
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int cmd, unsigned int sub_cmd)
{
unsigned int s0 = 0;
unsigned int wait;
+ /* Disable PLL re-initialization during FW command processing */
+ xgbe_phy_pll_ctrl(pdata, false);
+
/* Log if a previous command did not complete */
if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
netif_dbg(pdata, link, pdata->netdev,
@@ -2003,7 +2017,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- return;
+ goto reenable_pll;
usleep_range(1000, 2000);
}
@@ -2013,6 +2027,10 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+
+reenable_pll:
+ /* Enable PLL re-initialization */
+ xgbe_phy_pll_ctrl(pdata, true);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 3305979a9f7c..607a2c90513b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -729,7 +729,7 @@ struct xgbe_ext_stats {
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
- int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*set_mac_address)(struct xgbe_prv_data *, const u8 *addr);
int (*config_rx_mode)(struct xgbe_prv_data *);
int (*enable_rx_csum)(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
index 2da979e4fad1..6423e22e05b2 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mac.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -65,7 +65,7 @@ void xge_mac_set_speed(struct xge_pdata *pdata)
void xge_mac_set_station_addr(struct xge_pdata *pdata)
{
- u8 *dev_addr = pdata->ndev->dev_addr;
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 80399c8980bd..d022b6db9e06 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -36,7 +36,7 @@ static int xge_get_resources(struct xge_pdata *pdata)
return -ENOMEM;
}
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 5f657879134e..e641dbbea1e2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -378,8 +378,8 @@ u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f1fc6582d74..220dc42af31a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1731,7 +1731,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_get_port_id_acpi(dev, pdata);
#endif
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f482ced2cadd..72b5e8eb0ec7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -165,8 +165,8 @@ static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
{
+ const u8 *dev_addr = p->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = p->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 304b5d43f236..86607b79c09f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -207,8 +207,8 @@ static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a989d2df59ad..9a650d1c1bdd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -308,7 +308,7 @@ bmac_init_registers(struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
volatile unsigned short regValue;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
int i;
/* XXDEBUG(("bmac: enter init_registers\n")); */
@@ -371,7 +371,7 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
@@ -521,19 +521,16 @@ static int bmac_resume(struct macio_dev *mdev)
static int bmac_set_address(struct net_device *dev, void *addr)
{
struct bmac_data *bp = netdev_priv(dev);
- unsigned char *p = addr;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
unsigned long flags;
- int i;
XXDEBUG(("bmac: enter set_address\n"));
spin_lock_irqsave(&bp->lock, flags);
- for (i = 0; i < 6; ++i) {
- dev->dev_addr[i] = p[i];
- }
+ eth_hw_addr_set(dev, addr);
+
/* load up the hardware address */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bed481816ea3..062a300a566a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -217,7 +217,7 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_soft_reset)(struct aq_hw_s *self);
@@ -226,7 +226,7 @@ struct aq_hw_ops {
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -373,7 +373,7 @@ struct aq_fw_ops {
int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac);
+ const u8 *mac);
int (*send_fw_request)(struct aq_hw_s *self,
const struct hw_fw_request_iface *fw_req,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 4a6dfac857ca..02058fe79f52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -35,7 +35,7 @@ static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
static int aq_apply_secy_cfg(struct aq_nic_s *nic,
const struct macsec_secy *secy);
-static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
+static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac)
{
u32 tmp[2] = { 0 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6c049864dac0..1acf544afeb4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -300,6 +300,7 @@ static bool aq_nic_is_valid_ether_addr(const u8 *addr)
int aq_nic_ndev_register(struct aq_nic_s *self)
{
+ u8 addr[ETH_ALEN];
int err = 0;
if (!self->ndev) {
@@ -316,12 +317,13 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
#endif
mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
- self->ndev->dev_addr);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
+ eth_hw_addr_set(self->ndev, addr);
+
if (!is_valid_ether_addr(self->ndev->dev_addr) ||
!aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
netdev_warn(self->ndev, "MAC is invalid, will use random.");
@@ -332,7 +334,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
- ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+ eth_hw_addr_set(self->ndev, mac_addr_permanent);
}
#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 611875ef2cd1..4625ccb79499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -322,7 +322,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -348,7 +348,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 9f1b15077e7d..d875ce3ec759 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -533,7 +533,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -558,7 +558,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index d8db972113ec..5298846dd9f7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -58,7 +58,7 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr);
int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 404cbf60d3f2..fc0e66006644 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -944,7 +944,7 @@ u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -987,7 +987,7 @@ err_exit:
}
static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index ee0c22d04935..eac631c45c56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -358,7 +358,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
struct offload_info *info = NULL;
@@ -404,7 +404,7 @@ err_exit:
}
static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 92f64048bf69..c98708bb044c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -516,7 +516,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 92a79c4ffa2c..0a67612af228 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -26,7 +26,7 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on ARC || COMPILE_TEST
help
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
@@ -36,7 +36,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR
+ depends on OF_IRQ && REGULATOR
depends on ARCH_ROCKCHIP || COMPILE_TEST
help
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 38c288ec9059..c642c3d3e600 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -773,7 +773,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
arc_emac_set_address_internal(ndev);
@@ -941,7 +941,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
}
/* Get MAC address from device tree */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 54cdafdd067d..9acf589b1178 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -151,10 +151,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset",
GPIOD_OUT_LOW);
if (IS_ERR(data->reset_gpio)) {
- error = PTR_ERR(data->reset_gpio);
- dev_err(priv->dev, "Failed to request gpio: %d\n", error);
mdiobus_free(bus);
- return error;
+ return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio),
+ "Failed to request gpio\n");
}
of_property_read_u32(np, "phy-reset-duration", &data->msec);
@@ -166,9 +165,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
error = of_mdiobus_register(bus, priv->dev->of_node);
if (error) {
- dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
mdiobus_free(bus);
- return error;
+ return dev_err_probe(priv->dev, error,
+ "cannot register MDIO bus %s\n", bus->name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/Kconfig b/drivers/net/ethernet/asix/Kconfig
new file mode 100644
index 000000000000..eed02453314c
--- /dev/null
+++ b/drivers/net/ethernet/asix/Kconfig
@@ -0,0 +1,35 @@
+#
+# Asix network device configuration
+#
+
+config NET_VENDOR_ASIX
+ bool "Asix devices"
+ default y
+ help
+ If you have a network (Ethernet, non-USB, not NE2000 compatible)
+ interface based on a chip from ASIX, say Y.
+
+if NET_VENDOR_ASIX
+
+config SPI_AX88796C
+ tristate "Asix AX88796C-SPI support"
+ select PHYLIB
+ depends on SPI
+ depends on GPIOLIB
+ help
+ Say Y here if you intend to use ASIX AX88796C attached in SPI mode.
+
+config SPI_AX88796C_COMPRESSION
+ bool "SPI transfer compression"
+ default n
+ depends on SPI_AX88796C
+ help
+ Say Y here to enable SPI transfer compression. It saves up
+ to 24 dummy cycles during each transfer which may noticeably
+ speed up short transfers. This sets the default value that is
+ inherited by network interfaces during probe. It can be
+ changed at run time via spi-compression ethtool tunable.
+
+ If unsure say N.
+
+endif # NET_VENDOR_ASIX
diff --git a/drivers/net/ethernet/asix/Makefile b/drivers/net/ethernet/asix/Makefile
new file mode 100644
index 000000000000..0bfbbb042634
--- /dev/null
+++ b/drivers/net/ethernet/asix/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Asix network device drivers.
+#
+
+obj-$(CONFIG_SPI_AX88796C) += ax88796c.o
+ax88796c-y := ax88796c_main.o ax88796c_ioctl.o ax88796c_spi.o
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c
new file mode 100644
index 000000000000..916ae380a004
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+static const char ax88796c_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "SPICompression",
+};
+
+static void
+ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+}
+
+static u32 ax88796c_get_msglevel(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->msg_enable;
+}
+
+static void ax88796c_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ ax_local->msg_enable = level;
+}
+
+static void
+ax88796c_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ pause->tx_pause = !!(ax_local->flowctrl & AX_FC_TX);
+ pause->rx_pause = !!(ax_local->flowctrl & AX_FC_RX);
+ pause->autoneg = (ax_local->flowctrl & AX_FC_ANEG) ?
+ AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+}
+
+static int
+ax88796c_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int fc;
+
+ /* The following logic comes from phylink_ethtool_set_pauseparam() */
+ fc = pause->tx_pause ? AX_FC_TX : 0;
+ fc |= pause->rx_pause ? AX_FC_RX : 0;
+ fc |= pause->autoneg ? AX_FC_ANEG : 0;
+
+ ax_local->flowctrl = fc;
+
+ if (pause->autoneg) {
+ phy_set_asym_pause(ax_local->phydev, pause->tx_pause,
+ pause->rx_pause);
+ } else {
+ int maccr = 0;
+
+ phy_set_asym_pause(ax_local->phydev, 0, 0);
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+ }
+
+ return 0;
+}
+
+static int ax88796c_get_regs_len(struct net_device *ndev)
+{
+ return AX88796C_REGDUMP_LEN + AX88796C_PHY_REGDUMP_LEN;
+}
+
+static void
+ax88796c_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *_p)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int offset, i;
+ u16 *p = _p;
+
+ memset(p, 0, ax88796c_get_regs_len(ndev));
+
+ mutex_lock(&ax_local->spi_lock);
+
+ for (offset = 0; offset < AX88796C_REGDUMP_LEN; offset += 2) {
+ if (!test_bit(offset / 2, ax88796c_no_regs_mask))
+ *p = AX_READ(&ax_local->ax_spi, offset);
+ p++;
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ for (i = 0; i < AX88796C_PHY_REGDUMP_LEN / 2; i++) {
+ *p = phy_read(ax_local->phydev, i);
+ p++;
+ }
+}
+
+static void
+ax88796c_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ax88796c_priv_flag_names,
+ sizeof(ax88796c_priv_flag_names));
+ break;
+ }
+}
+
+static int
+ax88796c_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(ax88796c_priv_flag_names);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ax88796c_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ if (flags & ~AX_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ if ((ax_local->priv_flags ^ flags) & AX_CAP_COMP)
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ax_local->priv_flags = flags;
+
+ return 0;
+}
+
+static u32 ax88796c_get_priv_flags(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->priv_flags;
+}
+
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, MDIOCR_RADDR(loc)
+ | MDIOCR_FADDR(phy_id) | MDIOCR_READ, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret != 0),
+ 0, jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ if (!ret)
+ ret = AX_READ(&ax_local->ax_spi, P2_MDIODR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+int
+ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, val, P2_MDIODR);
+
+ AX_WRITE(&ax_local->ax_spi,
+ MDIOCR_RADDR(loc) | MDIOCR_FADDR(phy_id)
+ | MDIOCR_WRITE, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ ((ret & MDIOCR_VALID) != 0), 0,
+ jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+const struct ethtool_ops ax88796c_ethtool_ops = {
+ .get_drvinfo = ax88796c_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ax88796c_get_msglevel,
+ .set_msglevel = ax88796c_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_pauseparam = ax88796c_get_pauseparam,
+ .set_pauseparam = ax88796c_set_pauseparam,
+ .get_regs_len = ax88796c_get_regs_len,
+ .get_regs = ax88796c_get_regs,
+ .get_strings = ax88796c_get_strings,
+ .get_sset_count = ax88796c_get_sset_count,
+ .get_priv_flags = ax88796c_get_priv_flags,
+ .set_priv_flags = ax88796c_set_priv_flags,
+};
+
+int ax88796c_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ int ret;
+
+ ret = phy_mii_ioctl(ndev->phydev, ifr, cmd);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.h b/drivers/net/ethernet/asix/ax88796c_ioctl.h
new file mode 100644
index 000000000000..34d2a7dcc5ef
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_IOCTL_H
+#define _AX88796C_IOCTL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+
+extern const struct ethtool_ops ax88796c_ethtool_ops;
+
+bool ax88796c_check_power(const struct ax88796c_device *ax_local);
+bool ax88796c_check_power_and_wake(struct ax88796c_device *ax_local);
+void ax88796c_set_power_saving(struct ax88796c_device *ax_local, u8 ps_level);
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc);
+int ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val);
+int ax88796c_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
new file mode 100644
index 000000000000..4b0c5a09fd57
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+#include <linux/bitmap.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/lockdep.h>
+#include <linux/mdio.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION);
+static int msg_enable = NETIF_MSG_PROBE |
+ NETIF_MSG_LINK |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000";
+unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)];
+
+module_param(msg_enable, int, 0444);
+MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)");
+
+static int ax88796c_soft_reset(struct ax88796c_device *ax_local)
+{
+ u16 temp;
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR);
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(160 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret)
+ return ret;
+
+ temp = AX_READ(&ax_local->ax_spi, P4_SPICR);
+ if (ax_local->priv_flags & AX_CAP_COMP) {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR);
+ ax_local->ax_spi.comp = 1;
+ } else {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR);
+ ax_local->ax_spi.comp = 0;
+ }
+
+ return 0;
+}
+
+static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local)
+{
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(2 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret) {
+ dev_err(&ax_local->spi->dev,
+ "timeout waiting for reload eeprom\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ax88796c_set_hw_multicast(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int mc_count = netdev_mc_count(ndev);
+ u16 rx_ctl = RXCR_AB;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE);
+
+ if (ndev->flags & IFF_PROMISC) {
+ rx_ctl |= RXCR_PRO;
+
+ } else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) {
+ rx_ctl |= RXCR_AMALL;
+
+ } else if (mc_count == 0) {
+ /* just broadcast and directed */
+ } else {
+ u32 crc_bits;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr);
+ ax_local->multi_filter[crc_bits >> 29] |=
+ (1 << ((crc_bits >> 26) & 7));
+ }
+
+ for (i = 0; i < 4; i++) {
+ AX_WRITE(&ax_local->ax_spi,
+ ((ax_local->multi_filter[i * 2 + 1] << 8) |
+ ax_local->multi_filter[i * 2]), P3_MFAR(i));
+ }
+ }
+
+ AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR);
+}
+
+static void ax88796c_set_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) |
+ (u16)ndev->dev_addr[5]), P3_MACASR0);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) |
+ (u16)ndev->dev_addr[3]), P3_MACASR1);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) |
+ (u16)ndev->dev_addr[1]), P3_MACASR2);
+}
+
+static void ax88796c_load_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u16 temp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* Try the device tree first */
+ if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from device tree\n");
+ return;
+ }
+
+ /* Read the MAC address from AX88796C */
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
+ ndev->dev_addr[5] = (u8)temp;
+ ndev->dev_addr[4] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
+ ndev->dev_addr[3] = (u8)temp;
+ ndev->dev_addr[2] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
+ ndev->dev_addr[1] = (u8)temp;
+ ndev->dev_addr[0] = (u8)(temp >> 8);
+
+ if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from ASIX chip\n");
+ return;
+ }
+
+ /* Use random address if none found */
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev, "Use random MAC address\n");
+ eth_hw_addr_random(ndev);
+}
+
+static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed)
+{
+ u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR);
+
+ /* Prepare SOP header */
+ info->sop.flags_len = info->pkt_len |
+ ((ip_summed == CHECKSUM_NONE) ||
+ (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0);
+
+ info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM)
+ | pkt_len_bar;
+ cpu_to_be16s(&info->sop.flags_len);
+ cpu_to_be16s(&info->sop.seq_lenbar);
+
+ /* Prepare Segment header */
+ info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS
+ | info->pkt_len;
+
+ info->seg.eo_so_seglenbar = pkt_len_bar;
+
+ cpu_to_be16s(&info->seg.flags_seqnum_seglen);
+ cpu_to_be16s(&info->seg.eo_so_seglenbar);
+
+ /* Prepare EOP header */
+ info->eop.seq_len = ((info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUM) | info->pkt_len;
+ info->eop.seqbar_lenbar = ((~info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar;
+
+ cpu_to_be16s(&info->eop.seq_len);
+ cpu_to_be16s(&info->eop.seqbar_lenbar);
+}
+
+static int
+ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages)
+{
+ u8 free_pages;
+ u16 tmp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK;
+ if (free_pages < need_pages) {
+ /* schedule free page interrupt */
+ tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR)
+ & TFBFCR_SCHE_FREE_PAGE;
+ AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET |
+ TFBFCR_SET_FREE_PAGE(need_pages),
+ P0_TFBFCR);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 spi_len = ax_local->ax_spi.comp ? 1 : 4;
+ struct sk_buff *skb;
+ struct tx_pkt_info info;
+ struct skb_data *entry;
+ u16 pkt_len;
+ u8 padlen, seq_num;
+ u8 need_pages;
+ int headroom;
+ int tailroom;
+
+ if (skb_queue_empty(q))
+ return NULL;
+
+ skb = skb_peek(q);
+ pkt_len = skb->len;
+ need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7;
+ if (ax88796c_check_free_pages(ax_local, need_pages) != 0)
+ return NULL;
+
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+ padlen = round_up(pkt_len, 4) - pkt_len;
+ seq_num = ++ax_local->seq_num & 0x1F;
+
+ info.pkt_len = pkt_len;
+
+ if (skb_cloned(skb) ||
+ (headroom < (TX_OVERHEAD + spi_len)) ||
+ (tailroom < (padlen + TX_EOP_SIZE))) {
+ size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0);
+ size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0);
+
+ if (pskb_expand_head(skb, h, t, GFP_KERNEL))
+ return NULL;
+ }
+
+ info.seq_num = seq_num;
+ ax88796c_proc_tx_hdr(&info, skb->ip_summed);
+
+ /* SOP and SEG header */
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+
+ /* Write SPI TXQ header */
+ memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
+
+ /* Make 32-bit alignment */
+ skb_put(skb, padlen);
+
+ /* EOP header */
+ memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+
+ skb_unlink(skb, q);
+
+ entry = (struct skb_data *)skb->cb;
+ memset(entry, 0, sizeof(*entry));
+ entry->len = pkt_len;
+
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+
+ netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n",
+ pkt_len, skb->len, seq_num);
+
+ netdev_info(ndev, " SPI Header:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 4, 0);
+
+ netdev_info(ndev, " TX SOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4, TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX packet:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4 + TX_OVERHEAD,
+ skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX EOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + skb->len - 4, 4, 0);
+ }
+
+ return skb;
+}
+
+static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
+{
+ struct ax88796c_pcpu_stats *stats;
+ struct sk_buff *tx_skb;
+ struct skb_data *entry;
+ unsigned long flags;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ stats = this_cpu_ptr(ax_local->stats);
+ tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q);
+
+ if (!tx_skb) {
+ this_cpu_inc(ax_local->stats->tx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)tx_skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi,
+ (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR);
+
+ axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len);
+
+ if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) ||
+ ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) {
+ /* Ack tx error int */
+ AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR);
+
+ this_cpu_inc(ax_local->stats->tx_dropped);
+
+ if (net_ratelimit())
+ netif_err(ax_local, tx_err, ax_local->ndev,
+ "TX FIFO error, re-initialize the TX bridge\n");
+
+ /* Reinitial tx bridge */
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT |
+ AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR);
+ ax_local->seq_num = 0;
+ } else {
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, entry->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+
+ entry->state = tx_done;
+ dev_kfree_skb(tx_skb);
+
+ return 1;
+}
+
+static int
+ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ skb_queue_tail(&ax_local->tx_wait_q, skb);
+ if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER)
+ netif_stop_queue(ndev);
+
+ set_bit(EVENT_TX, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void
+ax88796c_skb_return(struct ax88796c_device *ax_local,
+ struct sk_buff *skb, struct rx_header *rxhdr)
+{
+ struct net_device *ndev = ax_local->ndev;
+ struct ax88796c_pcpu_stats *stats;
+ unsigned long flags;
+ int status;
+
+ stats = this_cpu_ptr(ax_local->stats);
+
+ do {
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ break;
+
+ /* checksum error bit is set */
+ if ((rxhdr->flags & RX_HDR3_L3_ERR) ||
+ (rxhdr->flags & RX_HDR3_L4_ERR))
+ break;
+
+ /* Other types may be indicated by more than one bit. */
+ if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) ||
+ (rxhdr->flags & RX_HDR3_L4_TYPE_UDP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } while (0);
+
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, skb->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ax_local->ndev);
+
+ netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+
+ status = netif_rx_ni(skb);
+ if (status != NET_RX_SUCCESS && net_ratelimit())
+ netif_info(ax_local, rx_err, ndev,
+ "netif_rx status %d\n", status);
+}
+
+static void
+ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb)
+{
+ struct rx_header *rxhdr = (struct rx_header *)rx_skb->data;
+ struct net_device *ndev = ax_local->ndev;
+ u16 len;
+
+ be16_to_cpus(&rxhdr->flags_len);
+ be16_to_cpus(&rxhdr->seq_lenbar);
+ be16_to_cpus(&rxhdr->flags);
+
+ if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) !=
+ (~rxhdr->seq_lenbar & 0x7FF)) {
+ netif_err(ax_local, rx_err, ndev, "Header error\n");
+
+ this_cpu_inc(ax_local->stats->rx_frame_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ if ((rxhdr->flags_len & RX_HDR1_MII_ERR) ||
+ (rxhdr->flags_len & RX_HDR1_CRC_ERR)) {
+ netif_err(ax_local, rx_err, ndev, "CRC or MII error\n");
+
+ this_cpu_inc(ax_local->stats->rx_crc_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ len = rxhdr->flags_len & RX_HDR1_PKT_LEN;
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+ netdev_info(ndev, "RX data, total len %d, packet len %d\n",
+ rx_skb->len, len);
+
+ netdev_info(ndev, " Dump RX packet header:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data, sizeof(*rxhdr), 0);
+
+ netdev_info(ndev, " Dump RX packet:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data + sizeof(*rxhdr), len, 0);
+ }
+
+ skb_pull(rx_skb, sizeof(*rxhdr));
+ pskb_trim(rx_skb, len);
+
+ ax88796c_skb_return(ax_local, rx_skb, rxhdr);
+}
+
+static int ax88796c_receive(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct skb_data *entry;
+ u16 w_count, pkt_len;
+ struct sk_buff *skb;
+ u8 pkt_cnt;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* check rx packet and total word count */
+ AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR)
+ | RTWCR_RX_LATCH, P0_RTWCR);
+
+ pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK;
+ if (!pkt_cnt)
+ return 0;
+
+ pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF;
+
+ w_count = round_up(pkt_len + 6, 4) >> 1;
+
+ skb = netdev_alloc_skb(ndev, w_count * 2);
+ if (!skb) {
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1);
+ this_cpu_inc(ax_local->stats->rx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1);
+
+ axspi_read_rxq(&ax_local->ax_spi,
+ skb_put(skb, w_count * 2), skb->len);
+
+ /* Check if rx bridge is idle */
+ if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) {
+ if (net_ratelimit())
+ netif_err(ax_local, rx_err, ndev,
+ "Rx Bridge is not idle\n");
+ AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2);
+
+ entry->state = rx_err;
+ } else {
+ entry->state = rx_done;
+ }
+
+ AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR);
+
+ ax88796c_rx_fixup(ax_local, skb);
+
+ return 1;
+}
+
+static int ax88796c_process_isr(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+ int todo = 0;
+ u16 isr;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ isr = AX_READ(&ax_local->ax_spi, P0_ISR);
+ AX_WRITE(&ax_local->ax_spi, isr, P0_ISR);
+
+ netif_dbg(ax_local, intr, ndev, " ISR 0x%04x\n", isr);
+
+ if (isr & ISR_TXERR) {
+ netif_dbg(ax_local, intr, ndev, " TXERR interrupt\n");
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR);
+ ax_local->seq_num = 0x1f;
+ }
+
+ if (isr & ISR_TXPAGES) {
+ netif_dbg(ax_local, intr, ndev, " TXPAGES interrupt\n");
+ set_bit(EVENT_TX, &ax_local->flags);
+ }
+
+ if (isr & ISR_LINK) {
+ netif_dbg(ax_local, intr, ndev, " Link change interrupt\n");
+ phy_mac_interrupt(ax_local->ndev->phydev);
+ }
+
+ if (isr & ISR_RXPKT) {
+ netif_dbg(ax_local, intr, ndev, " RX interrupt\n");
+ todo = ax88796c_receive(ax_local->ndev);
+ }
+
+ return todo;
+}
+
+static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance)
+{
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+
+ ndev = dev_instance;
+ if (!ndev) {
+ pr_err("irq %d for unknown device.\n", irq);
+ return IRQ_RETVAL(0);
+ }
+ ax_local = to_ax88796c_device(ndev);
+
+ disable_irq_nosync(irq);
+
+ netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n");
+
+ set_bit(EVENT_INTR, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return IRQ_HANDLED;
+}
+
+static void ax88796c_work(struct work_struct *work)
+{
+ struct ax88796c_device *ax_local =
+ container_of(work, struct ax88796c_device, ax_work);
+
+ mutex_lock(&ax_local->spi_lock);
+
+ if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) {
+ ax88796c_set_hw_multicast(ax_local->ndev);
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ }
+
+ if (test_bit(EVENT_INTR, &ax_local->flags)) {
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+
+ while (ax88796c_process_isr(ax_local))
+ /* nothing */;
+
+ clear_bit(EVENT_INTR, &ax_local->flags);
+
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ enable_irq(ax_local->ndev->irq);
+ }
+
+ if (test_bit(EVENT_TX, &ax_local->flags)) {
+ while (skb_queue_len(&ax_local->tx_wait_q)) {
+ if (!ax88796c_hard_xmit(ax_local))
+ break;
+ }
+
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ if (netif_queue_stopped(ax_local->ndev) &&
+ (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER))
+ netif_wake_queue(ax_local->ndev);
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u32 rx_frame_errors = 0, rx_crc_errors = 0;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct ax88796c_pcpu_stats *s;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ s = per_cpu_ptr(ax_local->stats, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&s->syncp);
+ rx_packets = u64_stats_read(&s->rx_packets);
+ rx_bytes = u64_stats_read(&s->rx_bytes);
+ tx_packets = u64_stats_read(&s->tx_packets);
+ tx_bytes = u64_stats_read(&s->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+
+ rx_dropped += s->rx_dropped;
+ tx_dropped += s->tx_dropped;
+ rx_frame_errors += s->rx_frame_errors;
+ rx_crc_errors += s->rx_crc_errors;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->rx_crc_errors = rx_crc_errors;
+}
+
+static void ax88796c_set_mac(struct ax88796c_device *ax_local)
+{
+ u16 maccr;
+
+ maccr = (ax_local->link) ? MACCR_RXEN : 0;
+
+ switch (ax_local->speed) {
+ case SPEED_100:
+ maccr |= MACCR_SPEED_100;
+ break;
+ case SPEED_10:
+ case SPEED_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ switch (ax_local->duplex) {
+ case DUPLEX_FULL:
+ maccr |= MACCR_SPEED_100;
+ break;
+ case DUPLEX_HALF:
+ case DUPLEX_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ if (ax_local->flowctrl & AX_FC_ANEG &&
+ ax_local->phydev->autoneg) {
+ maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0;
+ maccr |= !ax_local->pause != !ax_local->asym_pause ?
+ MACCR_TXFC_ENABLE : 0;
+ } else {
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 |
+ MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_handle_link_change(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool update = false;
+
+ if (phydev->link && (ax_local->speed != phydev->speed ||
+ ax_local->duplex != phydev->duplex ||
+ ax_local->pause != phydev->pause ||
+ ax_local->asym_pause != phydev->asym_pause)) {
+ ax_local->speed = phydev->speed;
+ ax_local->duplex = phydev->duplex;
+ ax_local->pause = phydev->pause;
+ ax_local->asym_pause = phydev->asym_pause;
+ update = true;
+ }
+
+ if (phydev->link != ax_local->link) {
+ if (!phydev->link) {
+ ax_local->speed = SPEED_UNKNOWN;
+ ax_local->duplex = DUPLEX_UNKNOWN;
+ }
+
+ ax_local->link = phydev->link;
+ update = true;
+ }
+
+ if (update)
+ ax88796c_set_mac(ax_local);
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static void ax88796c_set_csums(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ if (ndev->features & NETIF_F_RXCSUM) {
+ AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1);
+ }
+
+ if (ndev->features & NETIF_F_HW_CSUM) {
+ AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1);
+ }
+}
+
+static int
+ax88796c_open(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ unsigned long irq_flag = 0;
+ int fc = AX_FC_NONE;
+ int ret;
+ u16 t;
+
+ ret = request_irq(ndev->irq, ax88796c_interrupt,
+ irq_flag, ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n",
+ ndev->irq, ret);
+ return ret;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ free_irq(ndev->irq, ndev);
+ mutex_unlock(&ax_local->spi_lock);
+ return ret;
+ }
+ ax_local->seq_num = 0x1f;
+
+ ax88796c_set_mac_addr(ndev);
+ ax88796c_set_csums(ax_local);
+
+ /* Disable stuffing packet */
+ t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR);
+ t &= ~RXBSPCR_STUF_ENABLE;
+ AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR);
+
+ /* Enable RX packet process */
+ AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER);
+
+ t = AX_READ(&ax_local->ax_spi, P0_FER);
+ t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL;
+ AX_WRITE(&ax_local->ax_spi, t, P0_FER);
+
+ /* Setup LED mode */
+ AX_WRITE(&ax_local->ax_spi,
+ (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN |
+ LCR_LED1_100MODE), P2_LCR0);
+ AX_WRITE(&ax_local->ax_spi,
+ (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) |
+ LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1);
+
+ /* Disable PHY auto-polling */
+ AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR);
+
+ /* Enable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ /* Setup flow-control configuration */
+ phy_support_asym_pause(ax_local->phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising))
+ fc |= AX_FC_ANEG;
+
+ fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ? AX_FC_RX : 0;
+ fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) !=
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising)) ? AX_FC_TX : 0;
+ ax_local->flowctrl = fc;
+
+ phy_start(ax_local->ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ spi_message_init(&ax_local->ax_spi.rx_msg);
+
+ return 0;
+}
+
+static int
+ax88796c_close(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ phy_stop(ndev->phydev);
+
+ /* We lock the mutex early not only to protect the device
+ * against concurrent access, but also avoid waking up the
+ * queue in ax88796c_work(). phy_stop() needs to be called
+ * before because it locks the mutex to access SPI.
+ */
+ mutex_lock(&ax_local->spi_lock);
+
+ netif_stop_queue(ndev);
+
+ /* No more work can be scheduled now. Make any pending work,
+ * including one already waiting for the mutex to be unlocked,
+ * NOP.
+ */
+ netif_dbg(ax_local, ifdown, ndev, "clearing bits\n");
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ clear_bit(EVENT_INTR, &ax_local->flags);
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ /* Disable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+ __skb_queue_purge(&ax_local->tx_wait_q);
+ ax88796c_soft_reset(ax_local);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ cancel_work_sync(&ax_local->ax_work);
+
+ free_irq(ndev->irq, ndev);
+
+ return 0;
+}
+
+static int
+ax88796c_set_features(struct net_device *ndev, netdev_features_t features)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ netdev_features_t changed = features ^ ndev->features;
+
+ if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)))
+ return 0;
+
+ ndev->features = features;
+
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))
+ ax88796c_set_csums(ax_local);
+
+ return 0;
+}
+
+static const struct net_device_ops ax88796c_netdev_ops = {
+ .ndo_open = ax88796c_open,
+ .ndo_stop = ax88796c_close,
+ .ndo_start_xmit = ax88796c_start_xmit,
+ .ndo_get_stats64 = ax88796c_get_stats64,
+ .ndo_do_ioctl = ax88796c_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = ax88796c_set_features,
+};
+
+static int ax88796c_hard_reset(struct ax88796c_device *ax_local)
+{
+ struct device *dev = (struct device *)&ax_local->spi->dev;
+ struct gpio_desc *reset_gpio;
+
+ /* reset info */
+ reset_gpio = gpiod_get(dev, "reset", 0);
+ if (IS_ERR(reset_gpio)) {
+ dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio));
+ return PTR_ERR(reset_gpio);
+ }
+
+ /* set reset */
+ gpiod_direction_output(reset_gpio, 1);
+ msleep(100);
+ gpiod_direction_output(reset_gpio, 0);
+ gpiod_put(reset_gpio);
+ msleep(20);
+
+ return 0;
+}
+
+static int ax88796c_probe(struct spi_device *spi)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+ u16 temp;
+ int ret;
+
+ ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ax_local = to_ax88796c_device(ndev);
+
+ dev_set_drvdata(&spi->dev, ax_local);
+ ax_local->spi = spi;
+ ax_local->ax_spi.spi = spi;
+
+ ax_local->stats =
+ devm_netdev_alloc_pcpu_stats(&spi->dev,
+ struct ax88796c_pcpu_stats);
+ if (!ax_local->stats)
+ return -ENOMEM;
+
+ ax_local->ndev = ndev;
+ ax_local->priv_flags |= comp ? AX_CAP_COMP : 0;
+ ax_local->msg_enable = msg_enable;
+ mutex_init(&ax_local->spi_lock);
+
+ ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!ax_local->mdiobus)
+ return -ENOMEM;
+
+ ax_local->mdiobus->priv = ax_local;
+ ax_local->mdiobus->read = ax88796c_mdio_read;
+ ax_local->mdiobus->write = ax88796c_mdio_write;
+ ax_local->mdiobus->name = "ax88976c-mdiobus";
+ ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID);
+ ax_local->mdiobus->parent = &spi->dev;
+
+ snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
+ "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+ return ret;
+ }
+
+ if (netif_msg_probe(ax_local)) {
+ dev_info(&spi->dev, "AX88796C-SPI Configuration:\n");
+ dev_info(&spi->dev, " Compression : %s\n",
+ ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF");
+ }
+
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &ax88796c_netdev_ops;
+ ndev->ethtool_ops = &ax88796c_ethtool_ops;
+ ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->needed_headroom = TX_OVERHEAD;
+ ndev->needed_tailroom = TX_EOP_SIZE;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ /* ax88796c gpio reset */
+ ax88796c_hard_reset(ax_local);
+
+ /* Reset AX88796C */
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+ /* Check board revision */
+ temp = AX_READ(&ax_local->ax_spi, P2_CRIR);
+ if ((temp & 0xF) != 0x0) {
+ dev_err(&spi->dev, "spi read failed: %d\n", temp);
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+
+ /*Reload EEPROM*/
+ ax88796c_reload_eeprom(ax_local);
+
+ ax88796c_load_mac_addr(ndev);
+
+ if (netif_msg_probe(ax_local))
+ dev_info(&spi->dev,
+ "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ ndev->irq,
+ ndev->dev_addr[0], ndev->dev_addr[1],
+ ndev->dev_addr[2], ndev->dev_addr[3],
+ ndev->dev_addr[4], ndev->dev_addr[5]);
+
+ /* Disable power saving */
+ AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR)
+ & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ INIT_WORK(&ax_local->ax_work, ax88796c_work);
+
+ skb_queue_head_init(&ax_local->tx_wait_q);
+
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ ax_local->mdiobus->id, AX88796C_PHY_ID);
+ ax_local->phydev = phy_connect(ax_local->ndev, phy_id,
+ ax88796c_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(ax_local->phydev)) {
+ ret = PTR_ERR(ax_local->phydev);
+ goto err;
+ }
+ ax_local->phydev->irq = PHY_POLL;
+
+ ret = devm_register_netdev(&spi->dev, ndev);
+ if (ret) {
+ dev_err(&spi->dev, "failed to register a network device\n");
+ goto err_phy_dis;
+ }
+
+ netif_info(ax_local, probe, ndev, "%s %s registered\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+ phy_attached_info(ax_local->phydev);
+
+ return 0;
+
+err_phy_dis:
+ phy_disconnect(ax_local->phydev);
+err:
+ return ret;
+}
+
+static int ax88796c_remove(struct spi_device *spi)
+{
+ struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
+ struct net_device *ndev = ax_local->ndev;
+
+ phy_disconnect(ndev->phydev);
+
+ netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+
+ return 0;
+}
+
+static const struct of_device_id ax88796c_dt_ids[] = {
+ { .compatible = "asix,ax88796c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
+
+static const struct spi_device_id asix_id[] = {
+ { "ax88796c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, asix_id);
+
+static struct spi_driver ax88796c_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ax88796c_dt_ids),
+ },
+ .probe = ax88796c_probe,
+ .remove = ax88796c_remove,
+ .id_table = asix_id,
+};
+
+static __init int ax88796c_spi_init(void)
+{
+ int ret;
+
+ bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ ret = bitmap_parse(no_regs_list, 35,
+ ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ if (ret) {
+ bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ pr_err("Invalid bitmap description, masking all registers\n");
+ }
+
+ return spi_register_driver(&ax88796c_spi_driver);
+}
+
+static __exit void ax88796c_spi_exit(void)
+{
+ spi_unregister_driver(&ax88796c_spi_driver);
+}
+
+module_init(ax88796c_spi_init);
+module_exit(ax88796c_spi_exit);
+
+MODULE_AUTHOR("Łukasz Stelmach <l.stelmach@samsung.com>");
+MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
new file mode 100644
index 000000000000..80263c3cef75
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -0,0 +1,568 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_MAIN_H
+#define _AX88796C_MAIN_H
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+
+#include "ax88796c_spi.h"
+
+/* These identify the driver base version and may not be removed. */
+#define DRV_NAME "ax88796c"
+#define ADP_NAME "ASIX AX88796C SPI Ethernet Adapter"
+
+#define TX_QUEUE_HIGH_WATER 45 /* Tx queue high water mark */
+#define TX_QUEUE_LOW_WATER 20 /* Tx queue low water mark */
+
+#define AX88796C_REGDUMP_LEN 256
+#define AX88796C_PHY_REGDUMP_LEN 14
+#define AX88796C_PHY_ID 0x10
+
+#define TX_OVERHEAD 8
+#define TX_EOP_SIZE 4
+
+#define AX_MCAST_FILTER_SIZE 8
+#define AX_MAX_MCAST 64
+#define AX_MAX_CLK 80000000
+#define TX_HDR_SOP_DICF 0x8000
+#define TX_HDR_SOP_CPHI 0x4000
+#define TX_HDR_SOP_INT 0x2000
+#define TX_HDR_SOP_MDEQ 0x1000
+#define TX_HDR_SOP_PKTLEN 0x07FF
+#define TX_HDR_SOP_SEQNUM 0xF800
+#define TX_HDR_SOP_PKTLENBAR 0x07FF
+
+#define TX_HDR_SEG_FS 0x8000
+#define TX_HDR_SEG_LS 0x4000
+#define TX_HDR_SEG_SEGNUM 0x3800
+#define TX_HDR_SEG_SEGLEN 0x0700
+#define TX_HDR_SEG_EOFST 0xC000
+#define TX_HDR_SEG_SOFST 0x3800
+#define TX_HDR_SEG_SEGLENBAR 0x07FF
+
+#define TX_HDR_EOP_SEQNUM 0xF800
+#define TX_HDR_EOP_PKTLEN 0x07FF
+#define TX_HDR_EOP_SEQNUMBAR 0xF800
+#define TX_HDR_EOP_PKTLENBAR 0x07FF
+
+/* Rx header fields mask */
+#define RX_HDR1_MCBC 0x8000
+#define RX_HDR1_STUFF_PKT 0x4000
+#define RX_HDR1_MII_ERR 0x2000
+#define RX_HDR1_CRC_ERR 0x1000
+#define RX_HDR1_PKT_LEN 0x07FF
+
+#define RX_HDR2_SEQ_NUM 0xF800
+#define RX_HDR2_PKT_LEN_BAR 0x7FFF
+
+#define RX_HDR3_PE 0x8000
+#define RX_HDR3_L3_TYPE_IPV4V6 0x6000
+#define RX_HDR3_L3_TYPE_IP 0x4000
+#define RX_HDR3_L3_TYPE_IPV6 0x2000
+#define RX_HDR3_L4_TYPE_ICMPV6 0x1400
+#define RX_HDR3_L4_TYPE_TCP 0x1000
+#define RX_HDR3_L4_TYPE_IGMP 0x0c00
+#define RX_HDR3_L4_TYPE_ICMP 0x0800
+#define RX_HDR3_L4_TYPE_UDP 0x0400
+#define RX_HDR3_L3_ERR 0x0200
+#define RX_HDR3_L4_ERR 0x0100
+#define RX_HDR3_PRIORITY(x) ((x) << 4)
+#define RX_HDR3_STRIP 0x0008
+#define RX_HDR3_VLAN_ID 0x0007
+
+struct ax88796c_pcpu_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 rx_frame_errors;
+ u32 rx_crc_errors;
+};
+
+struct ax88796c_device {
+ struct spi_device *spi;
+ struct net_device *ndev;
+ struct ax88796c_pcpu_stats __percpu *stats;
+
+ struct work_struct ax_work;
+
+ struct mutex spi_lock; /* device access */
+
+ struct sk_buff_head tx_wait_q;
+
+ struct axspi_data ax_spi;
+
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+
+ int msg_enable;
+
+ u16 seq_num;
+
+ u8 multi_filter[AX_MCAST_FILTER_SIZE];
+
+ int link;
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+ int flowctrl;
+ #define AX_FC_NONE 0
+ #define AX_FC_RX BIT(0)
+ #define AX_FC_TX BIT(1)
+ #define AX_FC_ANEG BIT(2)
+
+ u32 priv_flags;
+ #define AX_CAP_COMP BIT(0)
+ #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
+
+ unsigned long flags;
+ #define EVENT_INTR BIT(0)
+ #define EVENT_TX BIT(1)
+ #define EVENT_SET_MULTI BIT(2)
+
+};
+
+#define to_ax88796c_device(ndev) ((struct ax88796c_device *)netdev_priv(ndev))
+
+enum skb_state {
+ illegal = 0,
+ tx_done,
+ rx_done,
+ rx_err,
+};
+
+struct skb_data {
+ enum skb_state state;
+ size_t len;
+};
+
+/* A88796C register definition */
+ /* Definition of PAGE0 */
+#define P0_PSR (0x00)
+ #define PSR_DEV_READY BIT(7)
+ #define PSR_RESET (0 << 15)
+ #define PSR_RESET_CLR BIT(15)
+#define P0_BOR (0x02)
+#define P0_FER (0x04)
+ #define FER_IPALM BIT(0)
+ #define FER_DCRC BIT(1)
+ #define FER_RH3M BIT(2)
+ #define FER_HEADERSWAP BIT(7)
+ #define FER_WSWAP BIT(8)
+ #define FER_BSWAP BIT(9)
+ #define FER_INTHI BIT(10)
+ #define FER_INTLO (0 << 10)
+ #define FER_IRQ_PULL BIT(11)
+ #define FER_RXEN BIT(14)
+ #define FER_TXEN BIT(15)
+#define P0_ISR (0x06)
+ #define ISR_RXPKT BIT(0)
+ #define ISR_MDQ BIT(4)
+ #define ISR_TXT BIT(5)
+ #define ISR_TXPAGES BIT(6)
+ #define ISR_TXERR BIT(8)
+ #define ISR_LINK BIT(9)
+#define P0_IMR (0x08)
+ #define IMR_RXPKT BIT(0)
+ #define IMR_MDQ BIT(4)
+ #define IMR_TXT BIT(5)
+ #define IMR_TXPAGES BIT(6)
+ #define IMR_TXERR BIT(8)
+ #define IMR_LINK BIT(9)
+ #define IMR_MASKALL (0xFFFF)
+ #define IMR_DEFAULT (IMR_TXERR)
+#define P0_WFCR (0x0A)
+ #define WFCR_PMEIND BIT(0) /* PME indication */
+ #define WFCR_PMETYPE BIT(1) /* PME I/O type */
+ #define WFCR_PMEPOL BIT(2) /* PME polarity */
+ #define WFCR_PMERST BIT(3) /* Reset PME */
+ #define WFCR_SLEEP BIT(4) /* Enable sleep mode */
+ #define WFCR_WAKEUP BIT(5) /* Enable wakeup mode */
+ #define WFCR_WAITEVENT BIT(6) /* Reserved */
+ #define WFCR_CLRWAKE BIT(7) /* Clear wakeup */
+ #define WFCR_LINKCH BIT(8) /* Enable link change */
+ #define WFCR_MAGICP BIT(9) /* Enable magic packet */
+ #define WFCR_WAKEF BIT(10) /* Enable wakeup frame */
+ #define WFCR_PMEEN BIT(11) /* Enable PME pin */
+ #define WFCR_LINKCHS BIT(12) /* Link change status */
+ #define WFCR_MAGICPS BIT(13) /* Magic packet status */
+ #define WFCR_WAKEFS BIT(14) /* Wakeup frame status */
+ #define WFCR_PMES BIT(15) /* PME pin status */
+#define P0_PSCR (0x0C)
+ #define PSCR_PS_MASK (0xFFF0)
+ #define PSCR_PS_D0 (0)
+ #define PSCR_PS_D1 BIT(0)
+ #define PSCR_PS_D2 BIT(1)
+ #define PSCR_FPS BIT(3) /* Enable fiber mode PS */
+ #define PSCR_SWPS BIT(4) /* Enable software */
+ /* PS control */
+ #define PSCR_WOLPS BIT(5) /* Enable WOL PS */
+ #define PSCR_SWWOL BIT(6) /* Enable software select */
+ /* WOL PS */
+ #define PSCR_PHYOSC BIT(7) /* Internal PHY OSC control */
+ #define PSCR_FOFEF BIT(8) /* Force PHY generate FEF */
+ #define PSCR_FOF BIT(9) /* Force PHY in fiber mode */
+ #define PSCR_PHYPD BIT(10) /* PHY power down. */
+ /* Active high */
+ #define PSCR_PHYRST BIT(11) /* PHY reset signal. */
+ /* Active low */
+ #define PSCR_PHYCSIL BIT(12) /* PHY cable energy detect */
+ #define PSCR_PHYCOFF BIT(13) /* PHY cable off */
+ #define PSCR_PHYLINK BIT(14) /* PHY link status */
+ #define PSCR_EEPOK BIT(15) /* EEPROM load complete */
+#define P0_MACCR (0x0E)
+ #define MACCR_RXEN BIT(0) /* Enable RX */
+ #define MACCR_DUPLEX_FULL BIT(1) /* 1: Full, 0: Half */
+ #define MACCR_SPEED_100 BIT(2) /* 1: 100Mbps, 0: 10Mbps */
+ #define MACCR_RXFC_ENABLE BIT(3)
+ #define MACCR_RXFC_MASK 0xFFF7
+ #define MACCR_TXFC_ENABLE BIT(4)
+ #define MACCR_TXFC_MASK 0xFFEF
+ #define MACCR_PSI BIT(6) /* Software Cable-Off */
+ /* Power Saving Interrupt */
+ #define MACCR_PF BIT(7)
+ #define MACCR_PMM_BITS 8
+ #define MACCR_PMM_MASK (0x1F00)
+ #define MACCR_PMM_RESET BIT(8)
+ #define MACCR_PMM_WAIT (2 << 8)
+ #define MACCR_PMM_READY (3 << 8)
+ #define MACCR_PMM_D1 (4 << 8)
+ #define MACCR_PMM_D2 (5 << 8)
+ #define MACCR_PMM_WAKE (7 << 8)
+ #define MACCR_PMM_D1_WAKE (8 << 8)
+ #define MACCR_PMM_D2_WAKE (9 << 8)
+ #define MACCR_PMM_SLEEP (10 << 8)
+ #define MACCR_PMM_PHY_RESET (11 << 8)
+ #define MACCR_PMM_SOFT_D1 (16 << 8)
+ #define MACCR_PMM_SOFT_D2 (17 << 8)
+#define P0_TFBFCR (0x10)
+ #define TFBFCR_SCHE_FREE_PAGE 0xE07F
+ #define TFBFCR_FREE_PAGE_BITS 0x07
+ #define TFBFCR_FREE_PAGE_LATCH BIT(6)
+ #define TFBFCR_SET_FREE_PAGE(x) (((x) & 0x3F) << TFBFCR_FREE_PAGE_BITS)
+ #define TFBFCR_TX_PAGE_SET BIT(13)
+ #define TFBFCR_MANU_ENTX BIT(15)
+ #define TX_FREEBUF_MASK 0x003F
+ #define TX_DPTSTART 0x4000
+
+#define P0_TSNR (0x12)
+ #define TXNR_TXB_ERR BIT(5)
+ #define TXNR_TXB_IDLE BIT(6)
+ #define TSNR_PKT_CNT(x) (((x) & 0x3F) << 8)
+ #define TXNR_TXB_REINIT BIT(14)
+ #define TSNR_TXB_START BIT(15)
+#define P0_RTDPR (0x14)
+#define P0_RXBCR1 (0x16)
+ #define RXBCR1_RXB_DISCARD BIT(14)
+ #define RXBCR1_RXB_START BIT(15)
+#define P0_RXBCR2 (0x18)
+ #define RXBCR2_PKT_MASK (0xFF)
+ #define RXBCR2_RXPC_MASK (0x7F)
+ #define RXBCR2_RXB_READY BIT(13)
+ #define RXBCR2_RXB_IDLE BIT(14)
+ #define RXBCR2_RXB_REINIT BIT(15)
+#define P0_RTWCR (0x1A)
+ #define RTWCR_RXWC_MASK (0x3FFF)
+ #define RTWCR_RX_LATCH BIT(15)
+#define P0_RCPHR (0x1C)
+
+ /* Definition of PAGE1 */
+#define P1_RPPER (0x22)
+ #define RPPER_RXEN BIT(0)
+#define P1_MRCR (0x28)
+#define P1_MDR (0x2A)
+#define P1_RMPR (0x2C)
+#define P1_TMPR (0x2E)
+#define P1_RXBSPCR (0x30)
+ #define RXBSPCR_STUF_WORD_CNT(x) (((x) & 0x7000) >> 12)
+ #define RXBSPCR_STUF_ENABLE BIT(15)
+#define P1_MCR (0x32)
+ #define MCR_SBP BIT(8)
+ #define MCR_SM BIT(9)
+ #define MCR_CRCENLAN BIT(11)
+ #define MCR_STP BIT(12)
+ /* Definition of PAGE2 */
+#define P2_CIR (0x42)
+#define P2_PCR (0x44)
+ #define PCR_POLL_EN BIT(0)
+ #define PCR_POLL_FLOWCTRL BIT(1)
+ #define PCR_POLL_BMCR BIT(2)
+ #define PCR_PHYID(x) ((x) << 8)
+#define P2_PHYSR (0x46)
+#define P2_MDIODR (0x48)
+#define P2_MDIOCR (0x4A)
+ #define MDIOCR_RADDR(x) ((x) & 0x1F)
+ #define MDIOCR_FADDR(x) (((x) & 0x1F) << 8)
+ #define MDIOCR_VALID BIT(13)
+ #define MDIOCR_READ BIT(14)
+ #define MDIOCR_WRITE BIT(15)
+#define P2_LCR0 (0x4C)
+ #define LCR_LED0_EN BIT(0)
+ #define LCR_LED0_100MODE BIT(1)
+ #define LCR_LED0_DUPLEX BIT(2)
+ #define LCR_LED0_LINK BIT(3)
+ #define LCR_LED0_ACT BIT(4)
+ #define LCR_LED0_COL BIT(5)
+ #define LCR_LED0_10MODE BIT(6)
+ #define LCR_LED0_DUPCOL BIT(7)
+ #define LCR_LED1_EN BIT(8)
+ #define LCR_LED1_100MODE BIT(9)
+ #define LCR_LED1_DUPLEX BIT(10)
+ #define LCR_LED1_LINK BIT(11)
+ #define LCR_LED1_ACT BIT(12)
+ #define LCR_LED1_COL BIT(13)
+ #define LCR_LED1_10MODE BIT(14)
+ #define LCR_LED1_DUPCOL BIT(15)
+#define P2_LCR1 (0x4E)
+ #define LCR_LED2_MASK (0xFF00)
+ #define LCR_LED2_EN BIT(0)
+ #define LCR_LED2_100MODE BIT(1)
+ #define LCR_LED2_DUPLEX BIT(2)
+ #define LCR_LED2_LINK BIT(3)
+ #define LCR_LED2_ACT BIT(4)
+ #define LCR_LED2_COL BIT(5)
+ #define LCR_LED2_10MODE BIT(6)
+ #define LCR_LED2_DUPCOL BIT(7)
+#define P2_IPGCR (0x50)
+#define P2_CRIR (0x52)
+#define P2_FLHWCR (0x54)
+#define P2_RXCR (0x56)
+ #define RXCR_PRO BIT(0)
+ #define RXCR_AMALL BIT(1)
+ #define RXCR_SEP BIT(2)
+ #define RXCR_AB BIT(3)
+ #define RXCR_AM BIT(4)
+ #define RXCR_AP BIT(5)
+ #define RXCR_ARP BIT(6)
+#define P2_JLCR (0x58)
+#define P2_MPLR (0x5C)
+
+ /* Definition of PAGE3 */
+#define P3_MACASR0 (0x62)
+ #define P3_MACASR(x) (P3_MACASR0 + 2 * (x))
+ #define MACASR_LOWBYTE_MASK 0x00FF
+ #define MACASR_HIGH_BITS 0x08
+#define P3_MACASR1 (0x64)
+#define P3_MACASR2 (0x66)
+#define P3_MFAR01 (0x68)
+#define P3_MFAR_BASE (0x68)
+ #define P3_MFAR(x) (P3_MFAR_BASE + 2 * (x))
+
+#define P3_MFAR23 (0x6A)
+#define P3_MFAR45 (0x6C)
+#define P3_MFAR67 (0x6E)
+#define P3_VID0FR (0x70)
+#define P3_VID1FR (0x72)
+#define P3_EECSR (0x74)
+#define P3_EEDR (0x76)
+#define P3_EECR (0x78)
+ #define EECR_ADDR_MASK (0x00FF)
+ #define EECR_READ_ACT BIT(8)
+ #define EECR_WRITE_ACT BIT(9)
+ #define EECR_WRITE_DISABLE BIT(10)
+ #define EECR_WRITE_ENABLE BIT(11)
+ #define EECR_EE_READY BIT(13)
+ #define EECR_RELOAD BIT(14)
+ #define EECR_RESET BIT(15)
+#define P3_TPCR (0x7A)
+ #define TPCR_PATT_MASK (0xFF)
+ #define TPCR_RAND_PKT_EN BIT(14)
+ #define TPCR_FIXED_PKT_EN BIT(15)
+#define P3_TPLR (0x7C)
+ /* Definition of PAGE4 */
+#define P4_SPICR (0x8A)
+ #define SPICR_RCEN BIT(0)
+ #define SPICR_QCEN BIT(1)
+ #define SPICR_RBRE BIT(3)
+ #define SPICR_PMM BIT(4)
+ #define SPICR_LOOPBACK BIT(8)
+ #define SPICR_CORE_RES_CLR BIT(10)
+ #define SPICR_SPI_RES_CLR BIT(11)
+#define P4_SPIISMR (0x8C)
+
+#define P4_COERCR0 (0x92)
+ #define COERCR0_RXIPCE BIT(0)
+ #define COERCR0_RXIPVE BIT(1)
+ #define COERCR0_RXV6PE BIT(2)
+ #define COERCR0_RXTCPE BIT(3)
+ #define COERCR0_RXUDPE BIT(4)
+ #define COERCR0_RXICMP BIT(5)
+ #define COERCR0_RXIGMP BIT(6)
+ #define COERCR0_RXICV6 BIT(7)
+
+ #define COERCR0_RXTCPV6 BIT(8)
+ #define COERCR0_RXUDPV6 BIT(9)
+ #define COERCR0_RXICMV6 BIT(10)
+ #define COERCR0_RXIGMV6 BIT(11)
+ #define COERCR0_RXICV6V6 BIT(12)
+
+ #define COERCR0_DEFAULT (COERCR0_RXIPCE | COERCR0_RXV6PE | \
+ COERCR0_RXTCPE | COERCR0_RXUDPE | \
+ COERCR0_RXTCPV6 | COERCR0_RXUDPV6)
+#define P4_COERCR1 (0x94)
+ #define COERCR1_IPCEDP BIT(0)
+ #define COERCR1_IPVEDP BIT(1)
+ #define COERCR1_V6VEDP BIT(2)
+ #define COERCR1_TCPEDP BIT(3)
+ #define COERCR1_UDPEDP BIT(4)
+ #define COERCR1_ICMPDP BIT(5)
+ #define COERCR1_IGMPDP BIT(6)
+ #define COERCR1_ICV6DP BIT(7)
+ #define COERCR1_RX64TE BIT(8)
+ #define COERCR1_RXPPPE BIT(9)
+ #define COERCR1_TCP6DP BIT(10)
+ #define COERCR1_UDP6DP BIT(11)
+ #define COERCR1_IC6DP BIT(12)
+ #define COERCR1_IG6DP BIT(13)
+ #define COERCR1_ICV66DP BIT(14)
+ #define COERCR1_RPCE BIT(15)
+
+ #define COERCR1_DEFAULT (COERCR1_RXPPPE)
+
+#define P4_COETCR0 (0x96)
+ #define COETCR0_TXIP BIT(0)
+ #define COETCR0_TXTCP BIT(1)
+ #define COETCR0_TXUDP BIT(2)
+ #define COETCR0_TXICMP BIT(3)
+ #define COETCR0_TXIGMP BIT(4)
+ #define COETCR0_TXICV6 BIT(5)
+ #define COETCR0_TXTCPV6 BIT(8)
+ #define COETCR0_TXUDPV6 BIT(9)
+ #define COETCR0_TXICMV6 BIT(10)
+ #define COETCR0_TXIGMV6 BIT(11)
+ #define COETCR0_TXICV6V6 BIT(12)
+
+ #define COETCR0_DEFAULT (COETCR0_TXIP | COETCR0_TXTCP | \
+ COETCR0_TXUDP | COETCR0_TXTCPV6 | \
+ COETCR0_TXUDPV6)
+#define P4_COETCR1 (0x98)
+ #define COETCR1_TX64TE BIT(0)
+ #define COETCR1_TXPPPE BIT(1)
+
+#define P4_COECEDR (0x9A)
+#define P4_L2CECR (0x9C)
+
+ /* Definition of PAGE5 */
+#define P5_WFTR (0xA2)
+ #define WFTR_2MS (0x01)
+ #define WFTR_4MS (0x02)
+ #define WFTR_8MS (0x03)
+ #define WFTR_16MS (0x04)
+ #define WFTR_32MS (0x05)
+ #define WFTR_64MS (0x06)
+ #define WFTR_128MS (0x07)
+ #define WFTR_256MS (0x08)
+ #define WFTR_512MS (0x09)
+ #define WFTR_1024MS (0x0A)
+ #define WFTR_2048MS (0x0B)
+ #define WFTR_4096MS (0x0C)
+ #define WFTR_8192MS (0x0D)
+ #define WFTR_16384MS (0x0E)
+ #define WFTR_32768MS (0x0F)
+#define P5_WFCCR (0xA4)
+#define P5_WFCR03 (0xA6)
+ #define WFCR03_F0_EN BIT(0)
+ #define WFCR03_F1_EN BIT(4)
+ #define WFCR03_F2_EN BIT(8)
+ #define WFCR03_F3_EN BIT(12)
+#define P5_WFCR47 (0xA8)
+ #define WFCR47_F4_EN BIT(0)
+ #define WFCR47_F5_EN BIT(4)
+ #define WFCR47_F6_EN BIT(8)
+ #define WFCR47_F7_EN BIT(12)
+#define P5_WF0BMR0 (0xAA)
+#define P5_WF0BMR1 (0xAC)
+#define P5_WF0CR (0xAE)
+#define P5_WF0OBR (0xB0)
+#define P5_WF1BMR0 (0xB2)
+#define P5_WF1BMR1 (0xB4)
+#define P5_WF1CR (0xB6)
+#define P5_WF1OBR (0xB8)
+#define P5_WF2BMR0 (0xBA)
+#define P5_WF2BMR1 (0xBC)
+
+ /* Definition of PAGE6 */
+#define P6_WF2CR (0xC2)
+#define P6_WF2OBR (0xC4)
+#define P6_WF3BMR0 (0xC6)
+#define P6_WF3BMR1 (0xC8)
+#define P6_WF3CR (0xCA)
+#define P6_WF3OBR (0xCC)
+#define P6_WF4BMR0 (0xCE)
+#define P6_WF4BMR1 (0xD0)
+#define P6_WF4CR (0xD2)
+#define P6_WF4OBR (0xD4)
+#define P6_WF5BMR0 (0xD6)
+#define P6_WF5BMR1 (0xD8)
+#define P6_WF5CR (0xDA)
+#define P6_WF5OBR (0xDC)
+
+/* Definition of PAGE7 */
+#define P7_WF6BMR0 (0xE2)
+#define P7_WF6BMR1 (0xE4)
+#define P7_WF6CR (0xE6)
+#define P7_WF6OBR (0xE8)
+#define P7_WF7BMR0 (0xEA)
+#define P7_WF7BMR1 (0xEC)
+#define P7_WF7CR (0xEE)
+#define P7_WF7OBR (0xF0)
+#define P7_WFR01 (0xF2)
+#define P7_WFR23 (0xF4)
+#define P7_WFR45 (0xF6)
+#define P7_WFR67 (0xF8)
+#define P7_WFPC0 (0xFA)
+#define P7_WFPC1 (0xFC)
+
+/* Tx headers structure */
+struct tx_sop_header {
+ /* bit 15-11: flags, bit 10-0: packet length */
+ u16 flags_len;
+ /* bit 15-11: sequence number, bit 11-0: packet length bar */
+ u16 seq_lenbar;
+};
+
+struct tx_segment_header {
+ /* bit 15-14: flags, bit 13-11: segment number */
+ /* bit 10-0: segment length */
+ u16 flags_seqnum_seglen;
+ /* bit 15-14: end offset, bit 13-11: start offset */
+ /* bit 10-0: segment length bar */
+ u16 eo_so_seglenbar;
+};
+
+struct tx_eop_header {
+ /* bit 15-11: sequence number, bit 10-0: packet length */
+ u16 seq_len;
+ /* bit 15-11: sequence number bar, bit 10-0: packet length bar */
+ u16 seqbar_lenbar;
+};
+
+struct tx_pkt_info {
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ struct tx_eop_header eop;
+ u16 pkt_len;
+ u16 seq_num;
+};
+
+/* Rx headers structure */
+struct rx_header {
+ u16 flags_len;
+ u16 seq_lenbar;
+ u16 flags;
+};
+
+extern unsigned long ax88796c_no_regs_mask[];
+
+#endif /* #ifndef _AX88796C_MAIN_H */
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
new file mode 100644
index 000000000000..94df4f96d2be
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/string.h>
+#include <linux/spi/spi.h>
+
+#include "ax88796c_spi.h"
+
+const u8 ax88796c_rx_cmd_buf[5] = {AX_SPICMD_READ_RXQ, 0xFF, 0xFF, 0xFF, 0xFF};
+const u8 ax88796c_tx_cmd_buf[4] = {AX_SPICMD_WRITE_TXQ, 0xFF, 0xFF, 0xFF};
+
+/* driver bus management functions */
+int axspi_wakeup(struct axspi_data *ax_spi)
+{
+ int ret;
+
+ ax_spi->cmd_buf[0] = AX_SPICMD_EXIT_PWD; /* OP */
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 1);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
+{
+ int ret;
+
+ /* OP */
+ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ else
+ le16_to_cpus(&status->isr);
+
+ return ret;
+}
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len)
+{
+ struct spi_transfer *xfer = ax_spi->spi_rx_xfer;
+ int ret;
+
+ memcpy(ax_spi->cmd_buf, ax88796c_rx_cmd_buf, 5);
+
+ xfer->tx_buf = ax_spi->cmd_buf;
+ xfer->rx_buf = NULL;
+ xfer->len = ax_spi->comp ? 2 : 5;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+
+ xfer++;
+ xfer->rx_buf = data;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+ ret = spi_sync(ax_spi->spi, &ax_spi->rx_msg);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+
+ return ret;
+}
+
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len)
+{
+ return spi_write(ax_spi->spi, data, len);
+}
+
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg)
+{
+ int ret;
+ int len = ax_spi->comp ? 3 : 4;
+
+ ax_spi->cmd_buf[0] = 0x03; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = 0xFF; /* dumy cycle */
+ ax_spi->cmd_buf[3] = 0xFF; /* dumy cycle */
+ ret = spi_write_then_read(ax_spi->spi,
+ ax_spi->cmd_buf, len,
+ ax_spi->rx_buf, 2);
+ if (ret) {
+ dev_err(&ax_spi->spi->dev,
+ "%s() failed: ret = %d\n", __func__, ret);
+ return 0xFFFF;
+ }
+
+ le16_to_cpus((u16 *)ax_spi->rx_buf);
+
+ return *(u16 *)ax_spi->rx_buf;
+}
+
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value)
+{
+ int ret;
+
+ memset(ax_spi->cmd_buf, 0, sizeof(ax_spi->cmd_buf));
+ ax_spi->cmd_buf[0] = AX_SPICMD_WRITE_REG; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = value;
+ ax_spi->cmd_buf[3] = value >> 8;
+
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 4);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.h b/drivers/net/ethernet/asix/ax88796c_spi.h
new file mode 100644
index 000000000000..5bcf91f603fb
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_SPI_H
+#define _AX88796C_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* Definition of SPI command */
+#define AX_SPICMD_WRITE_TXQ 0x02
+#define AX_SPICMD_READ_REG 0x03
+#define AX_SPICMD_READ_STATUS 0x05
+#define AX_SPICMD_READ_RXQ 0x0B
+#define AX_SPICMD_BIDIR_WRQ 0xB2
+#define AX_SPICMD_WRITE_REG 0xD8
+#define AX_SPICMD_EXIT_PWD 0xAB
+
+extern const u8 ax88796c_rx_cmd_buf[];
+extern const u8 ax88796c_tx_cmd_buf[];
+
+struct axspi_data {
+ struct spi_device *spi;
+ struct spi_message rx_msg;
+ struct spi_transfer spi_rx_xfer[2];
+ u8 cmd_buf[6];
+ u8 rx_buf[6];
+ u8 comp;
+};
+
+struct spi_status {
+ u16 isr;
+ u8 status;
+# define AX_STATUS_READY 0x80
+};
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len);
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len);
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg);
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value);
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status);
+int axspi_wakeup(struct axspi_data *ax_spi);
+
+static inline u16 AX_READ(struct axspi_data *ax_spi, u8 offset)
+{
+ return axspi_read_reg(ax_spi, offset);
+}
+
+static inline int AX_WRITE(struct axspi_data *ax_spi, u16 value, u8 offset)
+{
+ return axspi_write_reg(ax_spi, offset, value);
+}
+
+static inline int AX_READ_STATUS(struct axspi_data *ax_spi,
+ struct spi_status *status)
+{
+ return axspi_read_status(ax_spi, status);
+}
+
+static inline int AX_WAKEUP(struct axspi_data *ax_spi)
+{
+ return axspi_wakeup(ax_spi);
+}
+#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02ae98aabf91..88d2ab748399 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1082,14 +1082,12 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, 1000baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
}
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
@@ -1968,10 +1966,10 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
- err = of_get_mac_address(np, ndev->dev_addr);
+ err = of_get_ethdev_address(np, ndev);
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
err = of_get_phy_mode(np, &ag->phy_if_mode);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ea157efca86..4ad3fc72e74e 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -607,7 +607,7 @@ static int alx_set_mac_address(struct net_device *netdev, void *data)
if (netdev->addr_assign_type & NET_ADDR_RANDOM)
netdev->addr_assign_type ^= NET_ADDR_RANDOM;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
alx_set_macaddr(hw, hw->mac_addr);
@@ -1832,7 +1832,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
- memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, hw->mac_addr);
memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
hw->mdio.prtad = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 3b51b172b317..da595242bc13 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -482,7 +482,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
buffer_info->skb = NULL;
buffer_info->length = 0;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
- netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed");
break;
}
buffer_info->dma = mapping;
@@ -2662,10 +2662,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* The atl1c chip can DMA to 64-bit addresses, but it uses a single
@@ -2769,7 +2767,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 753973ac922e..56e5f440e666 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -374,7 +374,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1e_hw_set_mac_addr(&adapter->hw);
@@ -2297,10 +2297,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err = 0;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* The atl1e chip can DMA to 64-bit addresses, but it uses a single
@@ -2392,7 +2390,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 68f6c0bbd945..b4c9e805e981 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3027,7 +3027,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* mark random mac */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index b69298ddb647..bbc4d7b08a49 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -931,7 +931,7 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl2_set_mac_addr(&adapter->hw);
@@ -1405,7 +1405,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 0941d07d0833..e8cfbf4ff1b5 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -69,7 +69,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atlx_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index fa784953c601..969591bbc066 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -218,7 +218,8 @@ static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index
data[1] = (val >> 0) & 0xFF;
}
-static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+static inline void __b44_cam_write(struct b44 *bp,
+ const unsigned char *data, int index)
{
u32 val;
@@ -1200,7 +1201,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
&bp->rx_ring_dma, gfp);
if (!bp->rx_ring) {
- /* Allocation may have failed due to pci_alloc_consistent
+ /* Allocation may have failed due to dma_alloc_coherent
insisting on use of GFP_DMA, which is more restrictive
than necessary... */
struct dma_desc *rx_ring;
@@ -1383,7 +1384,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&bp->lock);
@@ -1507,7 +1508,8 @@ static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
}
}
-static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
+ int offset)
{
int magicsync = 6;
int k, j, len = offset;
@@ -2171,7 +2173,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, addr);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 02a569500234..7cc5213c575a 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -170,7 +170,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
goto err_free_buf_descs;
}
- ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL);
if (!ring->slots)
goto err_free_buf_descs;
@@ -715,7 +715,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
- err = of_get_mac_address(dev->of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d56886300ecf..a568994a03a6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -670,7 +670,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
u32 val;
priv = netdev_priv(dev);
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
@@ -1762,7 +1762,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
priv->has_phy = pd->has_phy;
priv->phy_id = pd->phy_id;
priv->has_phy_interrupt = pd->has_phy_interrupt;
@@ -2665,7 +2665,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
memcpy(priv->used_ports, pd->used_ports,
sizeof(pd->used_ports));
priv->num_ports = pd->num_ports;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7fa1b695400d..40933bf5a710 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1818,7 +1818,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
addr[3];
@@ -1850,7 +1850,7 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* interface is disabled, changes to MAC will be reflected on next
* open call
@@ -2555,7 +2555,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
}
/* Initialize netdevice members */
- ret = of_get_mac_address(dn, dev->dev_addr);
+ ret = of_get_ethdev_address(dn, dev);
if (ret) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 6ce80cbcb48e..086739e4f40a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -10,6 +10,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
+#include <linux/of_mdio.h>
#include "bgmac.h"
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
@@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
+ struct device_node *np;
int err;
mii_bus = mdiobus_alloc();
@@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
mii_bus->parent = &core->dev;
mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
- err = mdiobus_register(mii_bus);
+ np = of_get_child_by_name(core->dev.of_node, "mdio");
+
+ err = of_mdiobus_register(mii_bus, np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 9513cfb5ba58..e6f48786949c 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "bgmac.h"
@@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac *bgmac)
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
+ /* DT info should be the most accurate */
+ phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node,
+ bgmac_adjust_link);
+ if (phy_dev)
+ return 0;
+
/* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
- bgmac->phyaddr);
- phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connection failed\n");
- return PTR_ERR(phy_dev);
+ if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "PHY connection failed\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ return 0;
}
- return 0;
+ /* Assume a fixed link to the switch port */
+ return bgmac_phy_connect_direct(bgmac);
}
static const struct bcma_device_id bgmac_bcma_tbl[] = {
@@ -128,7 +140,7 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ err = of_get_ethdev_address(bgmac->dev->of_node, bgmac->net_dev);
if (err == -EPROBE_DEFER)
return err;
@@ -150,7 +162,7 @@ static int bgmac_probe(struct bcma_device *core)
err = -ENOTSUPP;
goto err;
}
- ether_addr_copy(bgmac->net_dev->dev_addr, mac);
+ eth_hw_addr_set(bgmac->net_dev, mac);
}
/* On BCM4706 we need common core to access PHY */
@@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
- if (bgmac->mii_bus)
- bgmac->phy_connect = bcma_phy_connect;
- else
- bgmac->phy_connect = bgmac_phy_connect_direct;
+ bgmac->phy_connect = bcma_phy_connect;
err = bgmac_enet_probe(bgmac);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index df8ff839cc62..c6412c523637 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -191,7 +191,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
- ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+ ret = of_get_ethdev_address(np, bgmac->net_dev);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fe4d99abd548..7b525c65bacb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -768,7 +768,7 @@ static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
udelay(2);
}
-static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr)
{
u32 tmp;
@@ -1241,7 +1241,7 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
if (ret < 0)
return ret;
- ether_addr_copy(net_dev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(net_dev, sa->sa_data);
bgmac_write_mac_address(bgmac, net_dev->dev_addr);
eth_commit_mac_addr_change(net_dev, addr);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8c83973adca5..babc955ba64e 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2704,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
}
static void
-bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
{
u32 val;
@@ -7910,7 +7910,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
@@ -8574,7 +8574,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_kdump_kernel())
bnx2_wait_dma_complete(bp);
- memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, bp->mac_addr);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e789430f407c..2b06d78baa08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1994,7 +1994,7 @@ int bnx2x_idle_chk(struct bnx2x *bp);
* operation has been successfully scheduled and a negative - if a requested
* operations has failed.
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index b5d954cb409a..e8e8c2d593c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4336,7 +4336,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ae87296ae1ff..aec666e97683 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8417,7 +8417,7 @@ alloc_mem_err:
* Init service functions
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags)
{
@@ -9146,7 +9146,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u8 *mac_addr = bp->dev->dev_addr;
+ const u8 *mac_addr = bp->dev->dev_addr;
struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -11790,7 +11790,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* as the SAN mac was copied from the primary MAC.
*/
if (IS_MF_FCOE_AFEX(bp))
- memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, fip_mac);
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11823,9 +11823,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+ u8 addr[ETH_ALEN] = {};
/* Zero primary MAC configuration */
- eth_zero_addr(bp->dev->dev_addr);
+ eth_hw_addr_set(bp->dev, addr);
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11834,8 +11835,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+ }
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -11843,7 +11846,8 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/* in SF read MACs from port configuration */
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -12291,7 +12295,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
if (rc)
return rc;
} else {
- eth_zero_addr(bp->dev->dev_addr);
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(bp->dev, zero_addr);
}
bnx2x_set_modes_bitmap(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fbf735fca31..74a8931ce1d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3058,7 +3058,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
!ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
- memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin->mac);
}
if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 966d5722c5e2..8c2cf5519787 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -508,7 +508,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid,
+ bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index ea0e9394f898..c9129b9ba446 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -384,9 +384,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
- memcpy(bp->dev->dev_addr,
- bp->acquire_resp.resc.current_mac_addr,
- ETH_ALEN);
+ eth_hw_addr_set(bp->dev,
+ bp->acquire_resp.resc.current_mac_addr);
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
@@ -722,7 +721,7 @@ out:
}
/* request pf to add a mac for the vf */
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -767,7 +766,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
"vfpf SET MAC failed. Check bulletin board for new posts\n");
/* copy mac from bulletin to device */
- memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin.mac);
/* check if bulletin board was updated */
if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index c6ef7ec2c115..2bc2b707d6ee 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
+bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 62f84cc91e4d..c04ea83188e2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -49,8 +49,6 @@
#include <linux/log2.h>
#include <linux/aer.h>
#include <linux/bitmap.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/timecounter.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
@@ -85,55 +83,7 @@ MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
#define BNXT_TX_PUSH_THRESH 164
-enum board_idx {
- BCM57301,
- BCM57302,
- BCM57304,
- BCM57417_NPAR,
- BCM58700,
- BCM57311,
- BCM57312,
- BCM57402,
- BCM57404,
- BCM57406,
- BCM57402_NPAR,
- BCM57407,
- BCM57412,
- BCM57414,
- BCM57416,
- BCM57417,
- BCM57412_NPAR,
- BCM57314,
- BCM57417_SFP,
- BCM57416_SFP,
- BCM57404_NPAR,
- BCM57406_NPAR,
- BCM57407_SFP,
- BCM57407_NPAR,
- BCM57414_NPAR,
- BCM57416_NPAR,
- BCM57452,
- BCM57454,
- BCM5745x_NPAR,
- BCM57508,
- BCM57504,
- BCM57502,
- BCM57508_NPAR,
- BCM57504_NPAR,
- BCM57502_NPAR,
- BCM58802,
- BCM58804,
- BCM58808,
- NETXTREME_E_VF,
- NETXTREME_C_VF,
- NETXTREME_S_VF,
- NETXTREME_C_VF_HV,
- NETXTREME_E_VF_HV,
- NETXTREME_E_P5_VF,
- NETXTREME_E_P5_VF_HV,
-};
-
-/* indexed by enum above */
+/* indexed by enum board_idx */
static const struct {
char *name;
} board_info[] = {
@@ -2172,7 +2122,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
- char *fatal_str = "non-fatal";
+ char *type_str = "Solicited";
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2184,13 +2134,21 @@ static int bnxt_async_event_process(struct bnxt *bp,
bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
- if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- fatal_str = "fatal";
+ if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
+ set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
+ } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ type_str = "Fatal";
+ bp->fw_health->fatalities++;
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
+ EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
+ type_str = "Non-fatal";
+ bp->fw_health->survivals++;
+ set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
}
netif_warn(bp, hw, bp->dev,
- "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
- fatal_str, data1, data2,
+ "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ type_str, data1, data2,
bp->fw_reset_min_dsecs * 100,
bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
@@ -2198,17 +2156,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
+ char *status_desc = "healthy";
+ u32 status;
if (!fw_health)
goto async_event_process_exit;
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
fw_health->enabled = false;
- netif_info(bp, drv, bp->dev,
- "Error recovery info: error recovery[0]\n");
+ netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
break;
}
- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2218,10 +2177,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (status != BNXT_FW_STATUS_HEALTHY)
+ status_desc = "unhealthy";
netif_info(bp, drv, bp->dev,
- "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
- fw_health->master, fw_health->last_fw_reset_cnt,
- bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
+ "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
+ fw_health->primary ? "primary" : "backup", status,
+ status_desc, fw_health->last_fw_reset_cnt);
if (!fw_health->enabled) {
/* Make sure tmr_counter is set and visible to
* bnxt_health_check() before setting enabled to true.
@@ -4651,7 +4613,7 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
return rc;
}
-static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
struct hwrm_func_drv_unrgtr_input *req;
int rc;
@@ -4869,7 +4831,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
- u8 *mac_addr)
+ const u8 *mac_addr)
{
struct hwrm_cfa_l2_filter_alloc_output *resp;
struct hwrm_cfa_l2_filter_alloc_input *req;
@@ -6366,7 +6328,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (rx_rings != bp->rx_nr_rings) {
netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
rx_rings, bp->rx_nr_rings);
- if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+ if (netif_is_rxfh_configured(bp->dev) &&
(bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
bnxt_get_max_rss_ring(bp) >= rx_rings)) {
@@ -7192,7 +7154,7 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
-static void bnxt_free_ctx_mem(struct bnxt *bp)
+void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
int i;
@@ -7518,12 +7480,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
flags_ext = le32_to_cpu(resp->flags_ext);
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7579,6 +7547,32 @@ hwrm_func_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
+{
+ struct hwrm_dbg_qcaps_output *resp;
+ struct hwrm_dbg_qcaps_input *req;
+ int rc;
+
+ bp->fw_dbg_cap = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto hwrm_dbg_qcaps_exit;
+
+ bp->fw_dbg_cap = le32_to_cpu(resp->flags);
+
+hwrm_dbg_qcaps_exit:
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
@@ -7588,6 +7582,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
+
+ bnxt_hwrm_dbg_qcaps(bp);
+
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
@@ -7642,6 +7639,7 @@ static int __bnxt_alloc_fw_health(struct bnxt *bp)
if (!bp->fw_health)
return -ENOMEM;
+ mutex_init(&bp->fw_health->lock);
return 0;
}
@@ -7688,12 +7686,16 @@ static void bnxt_inv_fw_health_reg(struct bnxt *bp)
struct bnxt_fw_health *fw_health = bp->fw_health;
u32 reg_type;
- if (!fw_health || !fw_health->status_reliable)
+ if (!fw_health)
return;
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
fw_health->status_reliable = false;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->resets_reliable = false;
}
static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
@@ -7750,6 +7752,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
int i;
bp->fw_health->status_reliable = false;
+ bp->fw_health->resets_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
@@ -7763,6 +7766,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
if (reg_base == 0xffffffff)
return 0;
@@ -8208,6 +8212,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
if (!rc) {
bp->fw_rx_stats_ext_size =
le16_to_cpu(resp_qs->rx_stat_size) / 8;
+ if (BNXT_FW_MAJ(bp) < 220 &&
+ bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
+ bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
+
bp->fw_tx_stats_ext_size = tx_stat_size ?
le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
} else {
@@ -9246,7 +9254,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
}
}
-static void bnxt_report_link(struct bnxt *bp)
+void bnxt_report_link(struct bnxt *bp)
{
if (bp->link_info.link_up) {
const char *signal = "";
@@ -9691,8 +9699,6 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
-static int bnxt_fw_init_one(struct bnxt *bp);
-
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
{
#ifdef CONFIG_TEE_BNXT_FW
@@ -9739,6 +9745,33 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
@@ -9822,25 +9855,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
return rc;
}
}
- if (BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- if (rc)
- netdev_err(bp->dev, "resc_qcaps failed\n");
-
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- if (!fw_reset) {
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
- }
- }
+ rc = bnxt_cancel_reservations(bp, fw_reset);
}
return rc;
}
@@ -10318,7 +10333,7 @@ void bnxt_half_close_nic(struct bnxt *bp)
bnxt_free_mem(bp, false);
}
-static void bnxt_reenable_sriov(struct bnxt *bp)
+void bnxt_reenable_sriov(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -11295,14 +11310,18 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- if (val == fw_health->last_fw_heartbeat)
+ if (val == fw_health->last_fw_heartbeat) {
+ fw_health->arrests++;
goto fw_reset;
+ }
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- if (val != fw_health->last_fw_reset_cnt)
+ if (val != fw_health->last_fw_reset_cnt) {
+ fw_health->discoveries++;
goto fw_reset;
+ }
fw_health->tmr_counter = fw_health->tmr_multiplier;
return;
@@ -11508,7 +11527,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
}
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
- if (fw_health->master) {
+ if (fw_health->primary) {
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
wait_dsecs = 0;
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
@@ -11772,13 +11791,17 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
bnxt_rx_ring_reset(bp);
- if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
- bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_devlink_health_fw_report(bp);
+ else
+ bnxt_fw_reset(bp);
+ }
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
if (!is_bnxt_fw_ok(bp))
- bnxt_devlink_health_report(bp,
- BNXT_FW_EXCEPTION_SP_EVENT);
+ bnxt_devlink_health_fw_report(bp);
}
smp_mb__before_atomic();
@@ -11989,7 +12012,7 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
-static int bnxt_fw_init_one(struct bnxt *bp)
+int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -12051,6 +12074,27 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
}
}
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ bool result = true; /* firmware will enforce if unknown */
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return result;
+
+ if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
+ return result;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req))
+ result = !!(le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
+ hwrm_req_drop(bp, req);
+ return result;
+}
+
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -12093,7 +12137,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
bnxt_ulp_start(bp, rc);
- bnxt_dl_health_status_update(bp, false);
+ bnxt_dl_health_fw_status_update(bp, false);
}
bp->fw_reset_state = 0;
dev_close(bp->dev);
@@ -12159,7 +12203,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
- if (!bp->fw_health->master) {
+ if (!bp->fw_health->primary) {
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
@@ -12192,6 +12236,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
+ !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
+ bnxt_dl_remote_reload(bp);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
rc = -ENODEV;
@@ -12241,8 +12289,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
- bnxt_dl_health_recovery_done(bp);
- bnxt_dl_health_status_update(bp, true);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
+ bnxt_dl_health_fw_recovery_done(bp);
+ bnxt_dl_health_fw_status_update(bp, true);
+ }
rtnl_unlock();
break;
}
@@ -12369,7 +12420,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (rc)
return rc;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13103,7 +13154,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
int rc = 0;
if (BNXT_PF(bp)) {
- memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -13111,7 +13162,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, vf->mac_addr);
/* Older PF driver or firmware may not approve this
* correctly.
*/
@@ -13186,6 +13237,15 @@ static int bnxt_map_db_bar(struct bnxt *bp)
return 0;
}
+void bnxt_print_device_info(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[bp->board_idx].name,
+ (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
+
+ pcie_print_link_status(bp->pdev);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -13209,10 +13269,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
- if (bnxt_vf_pciid(ent->driver_data))
+ if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
if (pdev->msix_cap)
@@ -13370,7 +13431,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnxt_inv_fw_health_reg(bp);
- bnxt_dl_register(bp);
+ rc = bnxt_dl_register(bp);
+ if (rc)
+ goto init_err_dl;
rc = register_netdev(dev);
if (rc)
@@ -13380,16 +13443,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
- netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
- board_info[ent->driver_data].name,
- (long)pci_resource_start(pdev, 0), dev->dev_addr);
- pcie_print_link_status(pdev);
+ bnxt_print_device_info(bp);
pci_save_state(pdev);
return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
+init_err_dl:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 19fe6478e9b4..d0d5da9b78f8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -489,6 +489,15 @@ struct rx_tpa_end_cmp_ext {
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
@@ -1514,6 +1523,21 @@ struct bnxt_ctx_mem_info {
struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
+enum bnxt_health_severity {
+ SEVERITY_NORMAL = 0,
+ SEVERITY_WARNING,
+ SEVERITY_RECOVERABLE,
+ SEVERITY_FATAL,
+};
+
+enum bnxt_health_remedy {
+ REMEDY_DEVLINK_RECOVER,
+ REMEDY_POWER_CYCLE_DEVICE,
+ REMEDY_POWER_CYCLE_HOST,
+ REMEDY_FW_UPDATE,
+ REMEDY_HW_REPLACE,
+};
+
struct bnxt_fw_health {
u32 flags;
u32 polling_dsecs;
@@ -1531,9 +1555,9 @@ struct bnxt_fw_health {
u32 last_fw_heartbeat;
u32 last_fw_reset_cnt;
u8 enabled:1;
- u8 master:1;
- u8 fatal:1;
+ u8 primary:1;
u8 status_reliable:1;
+ u8 resets_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1543,12 +1567,15 @@ struct bnxt_fw_health {
u32 echo_req_data1;
u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
- struct devlink_health_reporter *fw_reset_reporter;
- struct devlink_health_reporter *fw_fatal_reporter;
-};
-
-struct bnxt_fw_reporter_ctx {
- unsigned long sp_event;
+ /* Protects severity and remedy */
+ struct mutex lock;
+ enum bnxt_health_severity severity;
+ enum bnxt_health_remedy remedy;
+ u32 arrests;
+ u32 discoveries;
+ u32 survivals;
+ u32 fatalities;
+ u32 diagnoses;
};
#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
@@ -1586,6 +1613,54 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
+enum board_idx {
+ BCM57301,
+ BCM57302,
+ BCM57304,
+ BCM57417_NPAR,
+ BCM58700,
+ BCM57311,
+ BCM57312,
+ BCM57402,
+ BCM57404,
+ BCM57406,
+ BCM57402_NPAR,
+ BCM57407,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57412_NPAR,
+ BCM57314,
+ BCM57417_SFP,
+ BCM57416_SFP,
+ BCM57404_NPAR,
+ BCM57406_NPAR,
+ BCM57407_SFP,
+ BCM57407_NPAR,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
+ BCM5745x_NPAR,
+ BCM57508,
+ BCM57504,
+ BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
+ BCM58802,
+ BCM58804,
+ BCM58808,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
+ NETXTREME_S_VF,
+ NETXTREME_C_VF_HV,
+ NETXTREME_E_VF_HV,
+ NETXTREME_E_P5_VF,
+ NETXTREME_E_P5_VF_HV,
+};
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1840,6 +1915,10 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_FW_ACTIVATE 11
+#define BNXT_STATE_RECOVER 12
+#define BNXT_STATE_FW_NON_FATAL_COND 13
+#define BNXT_STATE_FW_ACTIVATE_RESET 14
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1879,8 +1958,13 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_LIVEPATCH 0x08000000
#define BNXT_FW_CAP_PTP_PPS 0x10000000
+ #define BNXT_FW_CAP_HOT_RESET_IF 0x20000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000
+ #define BNXT_FW_CAP_DBG_QCAPS 0x80000000
+
+ u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -2049,6 +2133,7 @@ struct bnxt {
struct list_head tc_indr_block_list;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
+ enum board_idx board_idx;
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2090,6 +2175,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_RX_STATS_EXT_NUM_LEGACY \
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)
+
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
@@ -2181,11 +2269,13 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
+void bnxt_free_ctx_mem(struct bnxt *bp);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
@@ -2194,9 +2284,11 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
@@ -2205,6 +2297,7 @@ int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
+void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
@@ -2212,6 +2305,8 @@ void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
+int bnxt_fw_init_one(struct bnxt *bp);
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
@@ -2219,5 +2314,5 @@ int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
-
+void bnxt_print_device_info(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
new file mode 100644
index 000000000000..d3cb2f21946d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -0,0 +1,444 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_coredump.h"
+
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ struct hwrm_dbg_cmn_output *cmn_resp;
+ u16 seq = 0, len, segs_off;
+ dma_addr_t dma_handle;
+ void *dma_buf, *resp;
+ int rc, off = 0;
+
+ dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
+ if (!dma_buf) {
+ hwrm_req_drop(bp, msg);
+ return -ENOMEM;
+ }
+
+ hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
+ cmn_resp = hwrm_req_hold(bp, msg);
+ resp = cmn_resp;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = hwrm_req_send(bp, msg);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+ memcpy(info->dest_buf + off, dma_buf, len);
+ } else {
+ rc = -ENOBUFS;
+ break;
+ }
+ }
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ hwrm_req_drop(bp, msg);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ struct hwrm_dbg_coredump_list_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
+ if (rc)
+ return rc;
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
+ if (rc)
+ return rc;
+
+ hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_req_send(bp, req);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 buf_len, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input *req;
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
+ if (rc)
+ return rc;
+
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf) {
+ info.dest_buf = buf + offset;
+ info.buf_len = buf_len;
+ info.seg_start = offset;
+ }
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
+{
+ struct mm_struct *mm = current->mm;
+ int i, len, last = 0;
+
+ if (mm) {
+ len = min_t(int, mm->arg_end - mm->arg_start,
+ sizeof(record->commandline) - 1);
+ if (len && !copy_from_user(record->commandline,
+ (char __user *)mm->arg_start, len)) {
+ for (i = 0; i < len; i++) {
+ if (record->commandline[i])
+ last = i;
+ else
+ record->commandline[i] = ' ';
+ }
+ record->commandline[last + 1] = 0;
+ return;
+ }
+ }
+
+ strscpy(record->commandline, current->comm, TASK_COMM_LEN);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strscpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year + 1900);
+ record->month = cpu_to_le16(tm.tm_mon + 1);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ bnxt_fill_cmdline(record);
+ record->total_segments = cpu_to_le32(total_segs);
+
+ if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
+ netdev_warn(bp->dev, "Unknown OS release in coredump\n");
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
+ struct coredump_segment_record *seg_record = NULL;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ if (buf)
+ buf_len = *dump_len;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ if (buf && ((offset + seg_hdr_len) >
+ BNXT_COREDUMP_BUF_LEN(buf_len))) {
+ rc = -ENOBUFS;
+ goto err;
+ }
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf, buf_len,
+ offset + seg_hdr_len);
+ if (rc && rc == -ENOBUFS)
+ goto err;
+ else if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ if (rc == -ENOBUFS)
+ netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ return rc;
+}
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
+{
+ if (dump_type == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
+#else
+ return -EOPNOTSUPP;
+#endif
+ } else {
+ return __bnxt_get_coredump(bp, buf, dump_len);
+ }
+}
+
+static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+{
+ struct hwrm_dbg_qcfg_output *resp;
+ struct hwrm_dbg_qcfg_input *req;
+ int rc, hdr_len = 0;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return -EOPNOTSUPP;
+
+ if (dump_type == BNXT_DUMP_CRASH &&
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ if (dump_type == BNXT_DUMP_CRASH)
+ req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto get_dump_len_exit;
+
+ if (dump_type == BNXT_DUMP_CRASH) {
+ *dump_len = le32_to_cpu(resp->crashdump_size);
+ } else {
+ /* Driver adds coredump header and "HWRM_VER_GET response"
+ * segment additionally to coredump.
+ */
+ hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
+ sizeof(struct hwrm_ver_get_output) +
+ sizeof(struct bnxt_coredump_record);
+ *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
+ }
+ if (*dump_len <= hdr_len)
+ rc = -EINVAL;
+
+get_dump_len_exit:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
+{
+ u32 len = 0;
+
+ if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
+ if (dump_type == BNXT_DUMP_CRASH)
+ len = BNXT_CRASH_DUMP_LEN;
+ else
+ __bnxt_get_coredump(bp, NULL, &len);
+ }
+ return len;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index 09c22f8fe399..b1a1b2fffb19 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -10,6 +10,10 @@
#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+
struct bnxt_coredump_segment_hdr {
__u8 signature[4];
__le32 component_id;
@@ -63,4 +67,51 @@ struct bnxt_coredump_record {
__u8 ioctl_high_version;
__le16 rsvd3[313];
};
+
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+ u32 seg_start;
+ u32 buf_len;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 228a5db7e143..217ff597cdf2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -159,10 +159,10 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
int tc;
- memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
if (i == 0)
cos2bw.queue_id = resp->queue_id0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 6eed231de565..716742522161 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -23,13 +23,15 @@ struct bnxt_dcb {
struct bnxt_cos2bw_cfg {
u8 pad[3];
- u8 queue_id;
- __le32 min_bw;
- __le32 max_bw;
+ struct_group_attr(cfg, __packed,
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- u8 tsa;
- u8 pri_lvl;
- u8 bw_weight;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ );
u8 unused;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 9576547df4ab..ce790e9b45c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -16,6 +16,18 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
#include "bnxt_ethtool.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ptp.h"
+#include "bnxt_coredump.h"
+
+static void __bnxt_fw_recover(struct bnxt *bp)
+{
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_fw_reset(bp);
+ else
+ bnxt_fw_exception(bp);
+}
static int
bnxt_dl_flash_update(struct devlink *dl,
@@ -40,146 +52,208 @@ bnxt_dl_flash_update(struct devlink *dl,
return rc;
}
-static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT);
+ if (remote_reset)
+ req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS);
+
+ return hwrm_req_send(bp, req);
+}
+
+static char *bnxt_health_severity_str(enum bnxt_health_severity severity)
+{
+ switch (severity) {
+ case SEVERITY_NORMAL: return "normal";
+ case SEVERITY_WARNING: return "warning";
+ case SEVERITY_RECOVERABLE: return "recoverable";
+ case SEVERITY_FATAL: return "fatal";
+ default: return "unknown";
+ }
+}
+
+static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy)
+{
+ switch (remedy) {
+ case REMEDY_DEVLINK_RECOVER: return "devlink recover";
+ case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle";
+ case REMEDY_POWER_CYCLE_HOST: return "host power cycle";
+ case REMEDY_FW_UPDATE: return "update firmware";
+ case REMEDY_HW_REPLACE: return "replace hardware";
+ default: return "unknown";
+ }
+}
+
+static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val;
+ struct bnxt_fw_health *h = bp->fw_health;
+ u32 fw_status, fw_resets;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- return 0;
+ return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
- val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!h->status_reliable)
+ return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
- if (BNXT_FW_IS_BOOTING(val)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Not yet completed initialization");
+ mutex_lock(&h->lock);
+ fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (BNXT_FW_IS_BOOTING(fw_status)) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
if (rc)
- return rc;
- } else if (BNXT_FW_IS_ERR(val)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Encountered fatal error and cannot recover");
+ goto unlock;
+ } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
+ if (!h->severity) {
+ h->severity = SEVERITY_FATAL;
+ h->remedy = REMEDY_POWER_CYCLE_DEVICE;
+ h->diagnoses++;
+ devlink_health_report(h->fw_reporter,
+ "FW error diagnosed", h);
+ }
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error");
if (rc)
- return rc;
+ goto unlock;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
+ if (rc)
+ goto unlock;
+ } else {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
+ if (rc)
+ goto unlock;
}
- if (val >> 16) {
- rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
+ rc = devlink_fmsg_string_pair_put(fmsg, "Severity",
+ bnxt_health_severity_str(h->severity));
+ if (rc)
+ goto unlock;
+
+ if (h->severity) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Remedy",
+ bnxt_health_remedy_str(h->remedy));
if (rc)
- return rc;
+ goto unlock;
+ if (h->remedy == REMEDY_DEVLINK_RECOVER) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Impact",
+ "traffic+ntuple_cfg");
+ if (rc)
+ goto unlock;
+ }
}
- val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
- if (rc)
+unlock:
+ mutex_unlock(&h->lock);
+ if (rc || !h->resets_reliable)
return rc;
- return 0;
+ fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
+ if (rc)
+ return rc;
+ return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
}
-static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
- .name = "fw",
- .diagnose = bnxt_fw_reporter_diagnose,
-};
-
-static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ u32 dump_len;
+ void *data;
+ int rc;
- if (!priv_ctx)
+ /* TODO: no firmware dump support in devlink_health_report() context */
+ if (priv_ctx)
return -EOPNOTSUPP;
- bnxt_fw_reset(bp);
- return -EINPROGRESS;
-}
+ dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE);
+ if (!dump_len)
+ return -EIO;
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
- .name = "fw_reset",
- .recover = bnxt_fw_reset_recover,
-};
+ data = vmalloc(dump_len);
+ if (!data)
+ return -ENOMEM;
-static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+ rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len);
+ if (!rc) {
+ rc = devlink_fmsg_pair_nest_start(fmsg, "core");
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_pair_nest_end(fmsg);
+ }
+
+exit:
+ vfree(data);
+ return rc;
+}
+
+static int bnxt_fw_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- unsigned long event;
- if (!priv_ctx)
- return -EOPNOTSUPP;
+ if (bp->fw_health->severity == SEVERITY_FATAL)
+ return -ENODEV;
- bp->fw_health->fatal = true;
- event = fw_reporter_ctx->sp_event;
- if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
- bnxt_fw_reset(bp);
- else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
- bnxt_fw_exception(bp);
+ set_bit(BNXT_STATE_RECOVER, &bp->state);
+ __bnxt_fw_recover(bp);
return -EINPROGRESS;
}
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
- .name = "fw_fatal",
- .recover = bnxt_fw_fatal_recover,
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_diagnose,
+ .dump = bnxt_fw_dump,
+ .recover = bnxt_fw_recover,
};
void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health || health->fw_reporter)
return;
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
- goto err_recovery;
-
- health->fw_reset_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reset_reporter_ops,
+ health->fw_reporter =
+ devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
0, bp);
- if (IS_ERR(health->fw_reset_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reset_reporter));
- health->fw_reset_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
- }
-
-err_recovery:
- if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- return;
-
- if (!health->fw_reporter) {
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- return;
- }
- }
-
- if (health->fw_fatal_reporter)
- return;
-
- health->fw_fatal_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_fatal_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_fatal_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
- health->fw_fatal_reporter = NULL;
+ if (IS_ERR(health->fw_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reporter));
+ health->fw_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
}
}
@@ -188,15 +262,9 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health)
return;
- if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
- health->fw_reset_reporter) {
- devlink_health_reporter_destroy(health->fw_reset_reporter);
- health->fw_reset_reporter = NULL;
- }
-
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
return;
@@ -204,82 +272,319 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
devlink_health_reporter_destroy(health->fw_reporter);
health->fw_reporter = NULL;
}
-
- if (health->fw_fatal_reporter) {
- devlink_health_reporter_destroy(health->fw_fatal_reporter);
- health->fw_fatal_reporter = NULL;
- }
}
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+void bnxt_devlink_health_fw_report(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
- struct bnxt_fw_reporter_ctx fw_reporter_ctx;
-
- fw_reporter_ctx.sp_event = event;
- switch (event) {
- case BNXT_FW_RESET_NOTIFY_SP_EVENT:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- if (!fw_health->fw_fatal_reporter)
- return;
-
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal async event received",
- &fw_reporter_ctx);
- return;
- }
- if (!fw_health->fw_reset_reporter)
- return;
+ int rc;
- devlink_health_report(fw_health->fw_reset_reporter,
- "FW non-fatal reset event received",
- &fw_reporter_ctx);
+ if (!fw_health)
return;
- case BNXT_FW_EXCEPTION_SP_EVENT:
- if (!fw_health->fw_fatal_reporter)
- return;
-
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal error reported",
- &fw_reporter_ctx);
+ if (!fw_health->fw_reporter) {
+ __bnxt_fw_recover(bp);
return;
}
+
+ mutex_lock(&fw_health->lock);
+ fw_health->severity = SEVERITY_RECOVERABLE;
+ fw_health->remedy = REMEDY_DEVLINK_RECOVER;
+ mutex_unlock(&fw_health->lock);
+ rc = devlink_health_report(fw_health->fw_reporter, "FW error reported",
+ fw_health);
+ if (rc == -ECANCELED)
+ __bnxt_fw_recover(bp);
}
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
u8 state;
- if (healthy)
+ mutex_lock(&fw_health->lock);
+ if (healthy) {
+ fw_health->severity = SEVERITY_NORMAL;
state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
- else
+ } else {
+ fw_health->severity = SEVERITY_FATAL;
+ fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE;
state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
-
- if (health->fatal)
- devlink_health_reporter_state_update(health->fw_fatal_reporter,
- state);
- else
- devlink_health_reporter_state_update(health->fw_reset_reporter,
- state);
-
- health->fatal = false;
+ }
+ mutex_unlock(&fw_health->lock);
+ devlink_health_reporter_state_update(fw_health->fw_reporter, state);
}
-void bnxt_dl_health_recovery_done(struct bnxt *bp)
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp)
{
- struct bnxt_fw_health *hlth = bp->fw_health;
+ struct bnxt_dl *dl = devlink_priv(bp->dl);
- if (hlth->fatal)
- devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
- else
- devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+ devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter);
+ bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset);
}
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
+static void
+bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
+ struct hwrm_fw_livepatch_output *resp)
+{
+ int err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (err) {
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE:
+ netdev_err(bp->dev, "Illegal live patch opcode");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid opcode");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not found");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Live patch deactivation failed. Firmware not patched.");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER:
+ NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
+ break;
+ default:
+ netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
+ break;
+ }
+}
+
+static int
+bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ struct hwrm_fw_livepatch_query_output *query_resp;
+ struct hwrm_fw_livepatch_query_input *query_req;
+ struct hwrm_fw_livepatch_output *patch_resp;
+ struct hwrm_fw_livepatch_input *patch_req;
+ u32 installed = 0;
+ u16 flags;
+ u8 target;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch");
+ return -EOPNOTSUPP;
+ }
+
+ rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+ query_resp = hwrm_req_hold(bp, query_req);
+
+ rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH);
+ if (rc) {
+ hwrm_req_drop(bp, query_req);
+ return rc;
+ }
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
+ patch_resp = hwrm_req_hold(bp, patch_req);
+
+ for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) {
+ query_req->fw_target = target;
+ rc = hwrm_req_send(bp, query_req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query packages");
+ break;
+ }
+
+ flags = le16_to_cpu(query_resp->status_flags);
+ if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ continue;
+ if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
+ !strncmp(query_resp->active_ver, query_resp->install_ver,
+ sizeof(query_resp->active_ver)))
+ continue;
+
+ patch_req->fw_target = target;
+ rc = hwrm_req_send(bp, patch_req);
+ if (rc) {
+ bnxt_dl_livepatch_report_err(bp, extack, patch_resp);
+ break;
+ }
+ installed++;
+ }
+
+ if (!rc && !installed) {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
+ hwrm_req_drop(bp, query_req);
+ hwrm_req_drop(bp, patch_req);
+ return rc;
+}
+
+static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ if (BNXT_PF(bp) && bp->pf.active_vfs) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "reload is unsupported when VFs are allocated\n");
+ return -EOPNOTSUPP;
+ }
+ rtnl_lock();
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ bnxt_ulp_stop(bp);
+ if (netif_running(bp->dev)) {
+ rc = bnxt_close_nic(bp, true, true);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to close");
+ dev_close(bp->dev);
+ rtnl_unlock();
+ break;
+ }
+ }
+ bnxt_vf_reps_free(bp);
+ rc = bnxt_hwrm_func_drv_unrgtr(bp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
+ if (netif_running(bp->dev))
+ dev_close(bp->dev);
+ rtnl_unlock();
+ break;
+ }
+ bnxt_cancel_reservations(bp, false);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ return bnxt_dl_livepatch_activate(bp, extack);
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) {
+ NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot");
+ return -EOPNOTSUPP;
+ }
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+ rtnl_lock();
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ if (netif_running(bp->dev))
+ set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rc = bnxt_hwrm_firmware_reset(bp->dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ FW_RESET_REQ_FLAGS_RESET_GRACEFUL |
+ FW_RESET_REQ_FLAGS_FW_ACTIVATION);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rtnl_unlock();
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ *actions_performed = 0;
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_fw_init_one(bp);
+ bnxt_vf_reps_alloc(bp);
+ if (netif_running(bp->dev))
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
+ if (!rc) {
+ bnxt_reenable_sriov(bp);
+ bnxt_ptp_reapply_pps(bp);
+ }
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ unsigned long start = jiffies;
+ unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10;
+
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ break;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10;
+ if (!netif_running(bp->dev))
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device is closed, not waiting for reset notice that will never come");
+ rtnl_unlock();
+ while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
+ if (time_after(jiffies, timeout)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation incomplete");
+ rc = -ETIMEDOUT;
+ break;
+ }
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation aborted");
+ rc = -ENODEV;
+ break;
+ }
+ msleep(50);
+ }
+ rtnl_lock();
+ if (!rc)
+ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!rc) {
+ bnxt_print_device_info(bp);
+ if (netif_running(bp->dev)) {
+ mutex_lock(&bp->link_lock);
+ bnxt_report_link(bp);
+ mutex_unlock(&bp->link_lock);
+ }
+ *actions_performed |= BIT(action);
+ } else if (netif_running(bp->dev)) {
+ dev_close(bp->dev);
+ }
+ rtnl_unlock();
+ return rc;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -287,6 +592,11 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
.info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
+ .reload_down = bnxt_dl_reload_down,
+ .reload_up = bnxt_dl_reload_up,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -430,6 +740,57 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
return 0;
}
+#define BNXT_FW_SRT_PATCH "fw.srt.patch"
+#define BNXT_FW_CRT_PATCH "fw.crt.patch"
+
+static int bnxt_dl_livepatch_info_put(struct bnxt *bp,
+ struct devlink_info_req *req,
+ const char *key)
+{
+ struct hwrm_fw_livepatch_query_input *query;
+ struct hwrm_fw_livepatch_query_output *resp;
+ u16 flags;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH)
+ return 0;
+
+ rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+
+ if (!strcmp(key, BNXT_FW_SRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW;
+ else if (!strcmp(key, BNXT_FW_CRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW;
+ else
+ goto exit;
+
+ resp = hwrm_req_hold(bp, query);
+ rc = hwrm_req_send(bp, query);
+ if (rc)
+ goto exit;
+
+ flags = le16_to_cpu(resp->status_flags);
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) {
+ resp->active_ver[sizeof(resp->active_ver) - 1] = '\0';
+ rc = devlink_info_version_running_put(req, key, resp->active_ver);
+ if (rc)
+ goto exit;
+ }
+
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) {
+ resp->install_ver[sizeof(resp->install_ver) - 1] = '\0';
+ rc = devlink_info_version_stored_put(req, key, resp->install_ver);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ hwrm_req_drop(bp, query);
+ return rc;
+}
+
#define HWRM_FW_VER_STR_LEN 16
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
@@ -554,8 +915,13 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
if (rc ||
- !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) {
+ if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf)))
+ return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
return 0;
+ }
buf[0] = 0;
strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
@@ -583,8 +949,16 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
- return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
- DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
+
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
@@ -712,6 +1086,32 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
return 0;
}
+static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = bnxt_dl_get_remote_reset(dl);
+ return 0;
+}
+
+static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
+
+ rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool);
+ if (rc)
+ return rc;
+
+ bnxt_dl_set_remote_reset(dl, ctx->val.vbool);
+ return rc;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
@@ -734,53 +1134,49 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
-};
-
-static const struct devlink_param bnxt_dl_port_params[] = {
+ /* keep REMOTE_DEV_RESET last, it is excluded based on caps */
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ bnxt_remote_dev_reset_get,
+ bnxt_remote_dev_reset_set, NULL),
};
static int bnxt_dl_params_register(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
int rc;
if (bp->hwrm_spec_code < 0x10600)
return 0;
- rc = devlink_params_register(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params);
+ if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
- return rc;
- }
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed\n");
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- return rc;
- }
- devlink_params_publish(bp->dl);
-
- return 0;
+ return rc;
}
static void bnxt_dl_params_unregister(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
+
if (bp->hwrm_spec_code < 0x10600)
return;
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params, num_params);
}
int bnxt_dl_register(struct bnxt *bp)
{
const struct devlink_ops *devlink_ops;
struct devlink_port_attrs attrs = {};
+ struct bnxt_dl *bp_dl;
struct devlink *dl;
int rc;
@@ -795,21 +1191,18 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOMEM;
}
- bnxt_link_bp_to_dl(bp, dl);
+ bp->dl = dl;
+ bp_dl = devlink_priv(dl);
+ bp_dl->bp = bp;
+ bnxt_dl_set_remote_reset(dl, true);
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
bp->hwrm_spec_code > 0x10803)
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- rc = devlink_register(dl);
- if (rc) {
- netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
- goto err_dl_free;
- }
-
if (!BNXT_PF(bp))
- return 0;
+ goto out;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = bp->pf.port_id;
@@ -819,21 +1212,20 @@ int bnxt_dl_register(struct bnxt *bp)
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed\n");
- goto err_dl_unreg;
+ goto err_dl_free;
}
rc = bnxt_dl_params_register(bp);
if (rc)
goto err_dl_port_unreg;
+out:
+ devlink_register(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_unreg:
- devlink_unregister(dl);
err_dl_free:
- bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl);
return rc;
}
@@ -842,13 +1234,10 @@ void bnxt_dl_unregister(struct bnxt *bp)
{
struct devlink *dl = bp->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
if (BNXT_PF(bp)) {
bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
}
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d889f240da2b..a715458abc30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -13,6 +13,7 @@
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
+ bool remote_reset;
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
@@ -20,17 +21,21 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
-/* To clear devlink pointer from bp, pass NULL dl */
-static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
+static inline void bnxt_dl_remote_reload(struct bnxt *bp)
{
- bp->dl = dl;
+ devlink_remote_reload_actions_performed(bp->dl, 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+}
- /* add a back pointer in dl to bp */
- if (dl) {
- struct bnxt_dl *bp_dl = devlink_priv(dl);
+static inline bool bnxt_dl_get_remote_reset(struct devlink *dl)
+{
+ return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset;
+}
- bp_dl->bp = bp;
- }
+static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
+{
+ ((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value;
}
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
@@ -66,9 +71,9 @@ enum bnxt_dl_version_type {
BNXT_VERSION_STORED,
};
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
-void bnxt_dl_health_recovery_done(struct bnxt *bp);
+void bnxt_devlink_health_fw_report(struct bnxt *bp);
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7260910e75fb..8188d55722e4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -427,6 +427,8 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
};
static const struct {
@@ -909,7 +911,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
- (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
+ netif_is_rxfh_configured(dev)) {
netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
return -EINVAL;
}
@@ -2180,13 +2182,18 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
return rc;
}
-static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
- u8 self_reset, u8 flags)
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input *req;
int rc;
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+
rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
if (rc)
return rc;
@@ -2825,39 +2832,56 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
-static void bnxt_get_pkgver(struct net_device *dev)
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
{
struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
char *pkgver;
u32 pkglen;
u8 *pkgbuf;
- int len;
+ int rc;
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, NULL, &pkglen) != 0)
- return;
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &pkglen);
+ if (rc)
+ return rc;
pkgbuf = kzalloc(pkglen, GFP_KERNEL);
if (!pkgbuf) {
dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
pkglen);
- return;
+ return -ENOMEM;
}
- if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
+ rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
+ if (rc)
goto err;
pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
pkglen);
- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+ strscpy(ver, pkgver, size);
+ else
+ rc = -ENOENT;
+
+err:
+ kfree(pkgbuf);
+
+ return rc;
+}
+
+static void bnxt_get_pkgver(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ char buf[FW_VER_STR_LEN];
+ int len;
+
+ if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
- "/pkg %s", pkgver);
+ "/pkg %s", buf);
}
-err:
- kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
@@ -3609,337 +3633,6 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return 0;
}
-static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
- struct bnxt_hwrm_dbg_dma_info *info)
-{
- struct hwrm_dbg_cmn_input *cmn_req = msg;
- __le16 *seq_ptr = msg + info->seq_off;
- struct hwrm_dbg_cmn_output *cmn_resp;
- u16 seq = 0, len, segs_off;
- dma_addr_t dma_handle;
- void *dma_buf, *resp;
- int rc, off = 0;
-
- dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
- if (!dma_buf) {
- hwrm_req_drop(bp, msg);
- return -ENOMEM;
- }
-
- hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
- cmn_resp = hwrm_req_hold(bp, msg);
- resp = cmn_resp;
-
- segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
- total_segments);
- cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
- cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
- while (1) {
- *seq_ptr = cpu_to_le16(seq);
- rc = hwrm_req_send(bp, msg);
- if (rc)
- break;
-
- len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
- if (!seq &&
- cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
- info->segs = le16_to_cpu(*((__le16 *)(resp +
- segs_off)));
- if (!info->segs) {
- rc = -EIO;
- break;
- }
-
- info->dest_buf_size = info->segs *
- sizeof(struct coredump_segment_record);
- info->dest_buf = kmalloc(info->dest_buf_size,
- GFP_KERNEL);
- if (!info->dest_buf) {
- rc = -ENOMEM;
- break;
- }
- }
-
- if (info->dest_buf) {
- if ((info->seg_start + off + len) <=
- BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
- } else {
- rc = -ENOBUFS;
- break;
- }
- }
-
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
- if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
- break;
-
- seq++;
- off += len;
- }
- hwrm_req_drop(bp, msg);
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
- struct bnxt_coredump *coredump)
-{
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- struct hwrm_dbg_coredump_list_input *req;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
- if (rc)
- return rc;
-
- info.dma_len = COREDUMP_LIST_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
- data_len);
-
- rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
- if (!rc) {
- coredump->data = info.dest_buf;
- coredump->data_size = info.dest_buf_size;
- coredump->total_segs = info.segs;
- }
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
-{
- struct hwrm_dbg_coredump_initiate_input *req;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
- if (rc)
- return rc;
-
- hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
- req->component_id = cpu_to_le16(component_id);
- req->segment_id = cpu_to_le16(segment_id);
-
- return hwrm_req_send(bp, req);
-}
-
-static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
- u16 segment_id, u32 *seg_len,
- void *buf, u32 buf_len, u32 offset)
-{
- struct hwrm_dbg_coredump_retrieve_input *req;
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
- if (rc)
- return rc;
-
- req->component_id = cpu_to_le16(component_id);
- req->segment_id = cpu_to_le16(segment_id);
-
- info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
- seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
- data_len);
- if (buf) {
- info.dest_buf = buf + offset;
- info.buf_len = buf_len;
- info.seg_start = offset;
- }
-
- rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
- if (!rc)
- *seg_len = info.dest_buf_size;
-
- return rc;
-}
-
-static void
-bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
- struct bnxt_coredump_segment_hdr *seg_hdr,
- struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
-{
- memset(seg_hdr, 0, sizeof(*seg_hdr));
- memcpy(seg_hdr->signature, "sEgM", 4);
- if (seg_rec) {
- seg_hdr->component_id = (__force __le32)seg_rec->component_id;
- seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
- seg_hdr->low_version = seg_rec->version_low;
- seg_hdr->high_version = seg_rec->version_hi;
- } else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
- }
- seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
- seg_hdr->length = cpu_to_le32(seg_len);
- seg_hdr->status = cpu_to_le32(status);
- seg_hdr->duration = cpu_to_le32(duration);
- seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
- seg_hdr->instance = cpu_to_le32(instance);
-}
-
-static void
-bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
- time64_t start, s16 start_utc, u16 total_segs,
- int status)
-{
- time64_t end = ktime_get_real_seconds();
- u32 os_ver_major = 0, os_ver_minor = 0;
- struct tm tm;
-
- time64_to_tm(start, 0, &tm);
- memset(record, 0, sizeof(*record));
- memcpy(record->signature, "cOrE", 4);
- record->flags = 0;
- record->low_version = 0;
- record->high_version = 1;
- record->asic_state = 0;
- strlcpy(record->system_name, utsname()->nodename,
- sizeof(record->system_name));
- record->year = cpu_to_le16(tm.tm_year + 1900);
- record->month = cpu_to_le16(tm.tm_mon + 1);
- record->day = cpu_to_le16(tm.tm_mday);
- record->hour = cpu_to_le16(tm.tm_hour);
- record->minute = cpu_to_le16(tm.tm_min);
- record->second = cpu_to_le16(tm.tm_sec);
- record->utc_bias = cpu_to_le16(start_utc);
- strcpy(record->commandline, "ethtool -w");
- record->total_segments = cpu_to_le32(total_segs);
-
- sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
- record->os_ver_major = cpu_to_le32(os_ver_major);
- record->os_ver_minor = cpu_to_le32(os_ver_minor);
-
- strlcpy(record->os_name, utsname()->sysname, 32);
- time64_to_tm(end, 0, &tm);
- record->end_year = cpu_to_le16(tm.tm_year + 1900);
- record->end_month = cpu_to_le16(tm.tm_mon + 1);
- record->end_day = cpu_to_le16(tm.tm_mday);
- record->end_hour = cpu_to_le16(tm.tm_hour);
- record->end_minute = cpu_to_le16(tm.tm_min);
- record->end_second = cpu_to_le16(tm.tm_sec);
- record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
- record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
- bp->ver_resp.chip_rev << 8 |
- bp->ver_resp.chip_metal);
- record->asic_id2 = 0;
- record->coredump_status = cpu_to_le32(status);
- record->ioctl_low_version = 0;
- record->ioctl_high_version = 0;
-}
-
-static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
-{
- u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
- u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
- struct coredump_segment_record *seg_record = NULL;
- struct bnxt_coredump_segment_hdr seg_hdr;
- struct bnxt_coredump coredump = {NULL};
- time64_t start_time;
- u16 start_utc;
- int rc = 0, i;
-
- if (buf)
- buf_len = *dump_len;
-
- start_time = ktime_get_real_seconds();
- start_utc = sys_tz.tz_minuteswest * 60;
- seg_hdr_len = sizeof(seg_hdr);
-
- /* First segment should be hwrm_ver_get response */
- *dump_len = seg_hdr_len + ver_get_resp_len;
- if (buf) {
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len;
- memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
- offset += ver_get_resp_len;
- }
-
- rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
- if (rc) {
- netdev_err(bp->dev, "Failed to get coredump segment list\n");
- goto err;
- }
-
- *dump_len += seg_hdr_len * coredump.total_segs;
-
- seg_record = (struct coredump_segment_record *)coredump.data;
- seg_record_len = sizeof(*seg_record);
-
- for (i = 0; i < coredump.total_segs; i++) {
- u16 comp_id = le16_to_cpu(seg_record->component_id);
- u16 seg_id = le16_to_cpu(seg_record->segment_id);
- u32 duration = 0, seg_len = 0;
- unsigned long start, end;
-
- if (buf && ((offset + seg_hdr_len) >
- BNXT_COREDUMP_BUF_LEN(buf_len))) {
- rc = -ENOBUFS;
- goto err;
- }
-
- start = jiffies;
-
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
- if (rc) {
- netdev_err(bp->dev,
- "Failed to initiate coredump for seg = %d\n",
- seg_record->segment_id);
- goto next_seg;
- }
-
- /* Write segment data into the buffer */
- rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
- &seg_len, buf, buf_len,
- offset + seg_hdr_len);
- if (rc && rc == -ENOBUFS)
- goto err;
- else if (rc)
- netdev_err(bp->dev,
- "Failed to retrieve coredump for seg = %d\n",
- seg_record->segment_id);
-
-next_seg:
- end = jiffies;
- duration = jiffies_to_msecs(end - start);
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
-
- if (buf) {
- /* Write segment header into the buffer */
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len + seg_len;
- }
-
- *dump_len += seg_len;
- seg_record =
- (struct coredump_segment_record *)((u8 *)seg_record +
- seg_record_len);
- }
-
-err:
- if (buf)
- bnxt_fill_coredump_record(bp, buf + offset, start_time,
- start_utc, coredump.total_segs + 1,
- rc);
- kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
- netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
- return rc;
-}
-
static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3971,10 +3664,7 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_rsvd_8b;
dump->flag = bp->dump_flag;
- if (bp->dump_flag == BNXT_DUMP_CRASH)
- dump->len = BNXT_CRASH_DUMP_LEN;
- else
- bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
return 0;
}
@@ -3989,15 +3679,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
dump->flag = bp->dump_flag;
- if (dump->flag == BNXT_DUMP_CRASH) {
-#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, dump->len);
-#endif
- } else {
- return bnxt_get_coredump(bp, buf, &dump->len);
- }
-
- return 0;
+ return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
}
static int bnxt_get_ts_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 0a57cb6a4a4b..6aa44840f13a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,49 +22,6 @@ struct bnxt_led_cfg {
u8 rsvd;
};
-#define COREDUMP_LIST_BUF_LEN 2048
-#define COREDUMP_RETRIEVE_BUF_LEN 4096
-
-struct bnxt_coredump {
- void *data;
- int data_size;
- u16 total_segs;
-};
-
-#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
-
-struct bnxt_hwrm_dbg_dma_info {
- void *dest_buf;
- int dest_buf_size;
- u16 dma_len;
- u16 seq_off;
- u16 data_len_off;
- u16 segs;
- u32 seg_start;
- u32 buf_len;
-};
-
-struct hwrm_dbg_cmn_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
-};
-
-struct hwrm_dbg_cmn_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define HWRM_DBG_CMN_FLAGS_MORE 1
-};
-
-#define BNXT_CRASH_DUMP_LEN (8 << 20)
-
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
@@ -94,8 +51,11 @@ u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
u32 install_type);
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 94d07a9f7034..ea86c54247c7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -532,8 +532,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 52
-#define HWRM_VERSION_STR "1.10.2.52"
+#define HWRM_VERSION_RSVD 63
+#define HWRM_VERSION_STR "1.10.2.63"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1587,6 +1587,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1956,6 +1958,18 @@ struct hwrm_func_cfg_output {
u8 valid;
};
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
+ u8 unused_0[7];
+};
+
/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
__le16 req_type;
@@ -3601,7 +3615,15 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4040,7 +4062,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:3648b/456B) */
+/* rx_port_stats_ext (size:3776b/472B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -4099,6 +4121,8 @@ struct rx_port_stats_ext {
__le64 rx_discard_packets_cos5;
__le64 rx_discard_packets_cos6;
__le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -4372,7 +4396,10 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
- u8 unused_0[3];
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ u8 unused_0[1];
u8 valid;
};
@@ -6076,6 +6103,11 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
#define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -6206,7 +6238,15 @@ struct hwrm_vnic_rss_cfg_input {
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
- u8 unused_1[6];
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ u8 rss_hash_function;
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM
+ u8 unused_1[4];
};
/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
@@ -6331,7 +6371,24 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
#define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
__le64 page_tbl_addr;
@@ -7099,6 +7156,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -7234,6 +7292,7 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le32 flags;
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -7834,11 +7893,11 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
u8 unused_0[3];
u8 valid;
};
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8414,6 +8473,86 @@ struct hwrm_fw_get_structured_data_cmd_err {
u8 unused_0[7];
};
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f0aa480799ca..8388be119f9a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -11,9 +11,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
-#include <linux/timecounter.h>
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include "bnxt_hsi.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fa5f05708e6d..7c528e1f8713 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -10,6 +10,9 @@
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
#define BNXT_PTP_GRC_WIN 6
#define BNXT_PTP_GRC_WIN_BASE 0x6000
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 70d8ca3039dc..1d177fed44a6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1151,7 +1151,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
}
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input *req;
int rc = 0;
@@ -1217,7 +1217,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
update_vf_mac_exit:
hwrm_req_drop(bp, req);
if (inform_pf)
@@ -1246,7 +1246,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 995535e4c11b..9a4bacba477b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -41,5 +41,5 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *, bool);
+int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 6b4d2556a6df..54d59f681b86 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -11,8 +11,7 @@
#define BNXT_ULP_H
#define BNXT_ROCE_ULP 0
-#define BNXT_OTHER_ULP 1
-#define BNXT_MAX_ULP 2
+#define BNXT_MAX_ULP 1
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 9401936b74fa..8eb28e088582 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -475,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->features |= pf_dev->features;
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
- ether_addr_copy(dev->dev_addr, dev->perm_addr);
+ eth_hw_addr_set(dev, dev->perm_addr);
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
dev->max_mtu = max_mtu;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 23c7595d2a1d..226f4403cfed 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
return 0;
}
+static void bcmgenet_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv;
+ u32 umac_cmd;
+
+ priv = netdev_priv(dev);
+
+ epause->autoneg = priv->autoneg_pause;
+
+ if (netif_carrier_ok(dev)) {
+ /* report active state when link is up */
+ umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
+ epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
+ epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
+ } else {
+ /* otherwise report stored settings */
+ epause->tx_pause = priv->tx_pause;
+ epause->rx_pause = priv->rx_pause;
+ }
+}
+
+static int bcmgenet_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(dev->phydev, epause))
+ return -EINVAL;
+
+ priv->autoneg_pause = !!epause->autoneg;
+ priv->tx_pause = !!epause->tx_pause;
+ priv->rx_pause = !!epause->rx_pause;
+
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_pauseparam = bcmgenet_get_pauseparam,
+ .set_pauseparam = bcmgenet_set_pauseparam,
};
/* Power down the unimac, based on mode. */
@@ -1609,7 +1653,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv))
+ if (GENET_IS_V5(priv) && !priv->ephy_16nm)
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1646,7 +1690,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv)) {
+ if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -3222,7 +3266,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
}
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
@@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq1;
}
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
@@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev)
*/
cancel_work_sync(&priv->bcmgenet_irq_work);
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
/* tx reclaim */
bcmgenet_tx_reclaim_all(dev);
bcmgenet_fini_dma(priv);
@@ -3519,7 +3560,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
- unsigned char *addr,
+ const unsigned char *addr,
int *i)
{
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
@@ -3592,7 +3633,7 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -3869,6 +3910,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
+ bool ephy_16nm;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3901,6 +3943,12 @@ static const struct bcmgenet_plat_data bcm2711_plat_data = {
.dma_max_burst_length = 0x08,
};
+static const struct bcmgenet_plat_data bcm7712_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .ephy_16nm = true,
+};
+
static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v1", .data = &v1_plat_data },
{ .compatible = "brcm,genet-v2", .data = &v2_plat_data },
@@ -3908,6 +3956,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v4", .data = &v4_plat_data },
{ .compatible = "brcm,genet-v5", .data = &v5_plat_data },
{ .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
+ { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3950,6 +3999,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
+ /* Set default pause parameters */
+ priv->autoneg_pause = 1;
+ priv->tx_pause = 1;
+ priv->rx_pause = 1;
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
dev->watchdog_timeo = 2 * HZ;
@@ -3983,6 +4037,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
+ priv->ephy_16nm = pdata->ephy_16nm;
} else {
priv->version = pd->genet_version;
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
@@ -4036,11 +4091,15 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- ether_addr_copy(dev->dev_addr, pd->mac_address);
+ eth_hw_addr_set(dev, pd->mac_address);
else
- if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
- if (has_acpi_companion(&pdev->dev))
- bcmgenet_get_hw_addr(priv, dev->dev_addr);
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
+
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0a6d91b0f0aa..946f6e283c4e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -329,6 +329,7 @@ struct bcmgenet_mib_counters {
#define EXT_CFG_IDDQ_BIAS (1 << 0)
#define EXT_CFG_PWR_DOWN (1 << 1)
#define EXT_CK25_DIS (1 << 4)
+#define EXT_CFG_IDDQ_GLOBAL_PWR (1 << 3)
#define EXT_GPHY_RESET (1 << 5)
/* DMA rings size */
@@ -594,6 +595,9 @@ struct bcmgenet_priv {
/* other misc variables */
struct bcmgenet_hw_params *hw_params;
+ unsigned autoneg_pause:1;
+ unsigned tx_pause:1;
+ unsigned rx_pause:1;
/* MDIO bus variables */
wait_queue_head_t wq;
@@ -606,13 +610,10 @@ struct bcmgenet_priv {
bool clk_eee_enabled;
/* PHY device variables */
- int old_link;
- int old_speed;
- int old_duplex;
- int old_pause;
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
+ bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -690,6 +691,7 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 89d16c587bb7..5f259641437a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -25,92 +25,80 @@
#include "bcmgenet.h"
-/* setup netdev link state when PHY link status change and
- * update UMAC and RGMII block when link up
- */
-void bcmgenet_mii_setup(struct net_device *dev)
+static void bcmgenet_mac_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
- bool status_changed = false;
- if (priv->old_link != phydev->link) {
- status_changed = true;
- priv->old_link = phydev->link;
- }
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = CMD_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = CMD_SPEED_100;
+ else
+ cmd_bits = CMD_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
- if (phydev->link) {
- /* check speed/duplex/pause changes */
- if (priv->old_speed != phydev->speed) {
- status_changed = true;
- priv->old_speed = phydev->speed;
- }
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL) {
+ cmd_bits |= CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+ } else {
+ /* pause capability defaults to Symmetric */
+ if (priv->autoneg_pause) {
+ bool tx_pause = 0, rx_pause = 0;
- if (priv->old_duplex != phydev->duplex) {
- status_changed = true;
- priv->old_duplex = phydev->duplex;
- }
+ if (phydev->autoneg)
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
- if (priv->old_pause != phydev->pause) {
- status_changed = true;
- priv->old_pause = phydev->pause;
+ if (!tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ if (!rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
}
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* speed */
- if (phydev->speed == SPEED_1000)
- cmd_bits = CMD_SPEED_1000;
- else if (phydev->speed == SPEED_100)
- cmd_bits = CMD_SPEED_100;
- else
- cmd_bits = CMD_SPEED_10;
- cmd_bits <<= CMD_SPEED_SHIFT;
-
- /* duplex */
- if (phydev->duplex != DUPLEX_FULL)
- cmd_bits |= CMD_HD_EN;
-
- /* pause capability */
- if (!phydev->pause)
- cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
-
- /*
- * Program UMAC and RGMII block based on established
- * link speed, duplex, and pause. The speed set in
- * umac->cmd tell RGMII block which clock to use for
- * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
- * Receive clock is provided by the PHY.
- */
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
- reg |= RGMII_LINK;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ /* Manual override */
+ if (!priv->rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
+ if (!priv->tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ }
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
- CMD_HD_EN |
- CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
- reg |= cmd_bits;
- if (reg & CMD_SW_RESET) {
- reg &= ~CMD_SW_RESET;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- udelay(2);
- reg |= CMD_TX_EN | CMD_RX_EN;
- }
+ /* Program UMAC and RGMII block based on established
+ * link speed, duplex, and pause. The speed set in
+ * umac->cmd tell RGMII block which clock to use for
+ * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
+ * Receive clock is provided by the PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ if (reg & CMD_SW_RESET) {
+ reg &= ~CMD_SW_RESET;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- } else {
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* needed for MoCA fixed PHY to reflect correct link status */
- netif_carrier_off(dev);
+ udelay(2);
+ reg |= CMD_TX_EN | CMD_RX_EN;
}
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+}
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
phy_print_status(phydev);
}
@@ -130,20 +118,36 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
return 0;
}
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising,
+ rx | tx);
+ phy_start_aneg(phydev);
+
+ mutex_lock(&phydev->lock);
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
+ mutex_unlock(&phydev->lock);
+}
+
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V4(priv) || priv->ephy_16nm) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+ EXT_CFG_IDDQ_GLOBAL_PWR);
reg |= EXT_GPHY_RESET;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
@@ -151,7 +155,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET;
+ EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
reg |= EXT_CK25_DIS;
@@ -286,23 +290,53 @@ int bcmgenet_mii_probe(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct device_node *dn = kdev->of_node;
+ phy_interface_t phy_iface = priv->phy_interface;
struct phy_device *phydev;
- u32 phy_flags = 0;
+ u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
int ret;
/* Communicate the integrated PHY revision */
if (priv->internal_phy)
phy_flags = priv->gphy_rev;
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
+ /* This is an ugly quirk but we have not been correctly interpreting
+ * the phy_interface values and we have done that across different
+ * drivers, so at least we are consistent in our mistakes.
+ *
+ * When the Generic PHY driver is in use either the PHY has been
+ * strapped or programmed correctly by the boot loader so we should
+ * stick to our incorrect interpretation since we have validated it.
+ *
+ * Now when a dedicated PHY driver is in use, we need to reverse the
+ * meaning of the phy_interface_mode values to something that the PHY
+ * driver will interpret and act on such that we have two mistakes
+ * canceling themselves so to speak. We only do this for the two
+ * modes that GENET driver officially supports on Broadcom STB chips:
+ * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other
+ * modes are not *officially* supported with the boot loader and the
+ * scripted environment generating Device Tree blobs for those
+ * platforms.
+ *
+ * Note that internal PHY, MoCA and fixed-link configurations are not
+ * affected because they use different phy_interface_t values or the
+ * Generic PHY driver.
+ */
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ default:
+ break;
+ }
if (dn) {
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
+ phy_flags, phy_iface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -332,7 +366,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
+ phy_iface);
if (ret) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -350,8 +384,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
* that prevents the signaling of link UP interrupts when
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5e0e0e70d801..b1328c5524b5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3942,7 +3942,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
}
/* tp->lock is held. */
-static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
+ int index)
{
u32 addr_high, addr_low;
@@ -5746,7 +5747,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
udelay(40);
- current_link_up = false;
tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
@@ -9366,7 +9366,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -10273,8 +10273,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- if (tg3_flag(tp, TSO_CAPABLE) &&
- tg3_asic_rev(tp) == ASIC_REV_5705) {
+ if (tg3_flag(tp, TSO_CAPABLE)) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -11213,12 +11212,8 @@ static void tg3_reset_task(struct work_struct *work)
}
tg3_netif_start(tp);
-
tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
-
+ tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
rtnl_unlock();
@@ -16915,19 +16910,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-static int tg3_get_device_address(struct tg3 *tp)
+static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
{
- struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
int err;
- if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
return 0;
if (tg3_flag(tp, IS_SSB_CORE)) {
- err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
- if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ err = ssb_gige_get_macaddr(tp->pdev, addr);
+ if (!err && is_valid_ether_addr(addr))
return 0;
}
@@ -16951,41 +16945,41 @@ static int tg3_get_device_address(struct tg3 *tp)
/* First try to get it from MAC address mailbox. */
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
if ((hi >> 16) == 0x484b) {
- dev->dev_addr[0] = (hi >> 8) & 0xff;
- dev->dev_addr[1] = (hi >> 0) & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
+ addr[1] = (hi >> 0) & 0xff;
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[5] = (lo >> 0) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[5] = (lo >> 0) & 0xff;
/* Some old bootcode may report a 0 MAC address in SRAM */
- addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+ addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
- memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+ memcpy(&addr[0], ((char *)&hi) + 2, 2);
+ memcpy(&addr[2], (char *)&lo, sizeof(lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
hi = tr32(MAC_ADDR_0_HIGH);
lo = tr32(MAC_ADDR_0_LOW);
- dev->dev_addr[5] = lo & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[1] = hi & 0xff;
- dev->dev_addr[0] = (hi >> 8) & 0xff;
+ addr[5] = lo & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[1] = hi & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0]))
+ if (!is_valid_ether_addr(addr))
return -EINVAL;
return 0;
}
@@ -17561,6 +17555,7 @@ static int tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
err = pci_enable_device(pdev);
if (err) {
@@ -17783,12 +17778,13 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->rx_pending = 63;
}
- err = tg3_get_device_address(tp);
+ err = tg3_get_device_address(tp, addr);
if (err) {
dev_err(&pdev->dev,
"Could not obtain valid ethernet address, aborting\n");
goto err_out_apeunmap;
}
+ eth_hw_addr_set(dev, addr);
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ba47777d9cff..bbdc829c3524 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -875,7 +875,7 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
+ eth_hw_addr_set(netdev, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -3249,7 +3249,7 @@ bnad_set_mac_address(struct net_device *netdev, void *addr)
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
if (!err)
- ether_addr_copy(netdev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(netdev, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3515,7 +3515,6 @@ static void
bnad_uninit(struct bnad *bnad)
{
if (bnad->work_q) {
- flush_workqueue(bnad->work_q);
destroy_workqueue(bnad->work_q);
bnad->work_q = NULL;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d8d87213697c..5620b97b3482 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -243,9 +243,11 @@
#define MACB_NCR_TPF_SIZE 1
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
-#define MACB_SRTSM_OFFSET 15
-#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
+#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
+#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
+#define MACB_MIIONRGMII_SIZE 1
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -713,6 +715,7 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d13fb1d31821..ffce528aa00e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -313,7 +313,7 @@ static void macb_get_hwaddr(struct macb *bp)
addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
return;
}
}
@@ -522,21 +522,21 @@ static void macb_validate(struct phylink_config *config,
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_10GBASER &&
!phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
if (!macb_is_gem(bp) &&
(state->interface == PHY_INTERFACE_MODE_GMII ||
phy_interface_mode_is_rgmii(state->interface))) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
!(bp->caps & MACB_CAPS_HIGH_SPEED &&
bp->caps & MACB_CAPS_PCS)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -547,13 +547,8 @@ static void macb_validate(struct phylink_config *config,
if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
(state->interface == PHY_INTERFACE_MODE_NA ||
state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
goto out;
}
@@ -575,9 +570,8 @@ static void macb_validate(struct phylink_config *config,
phylink_set(mask, 1000baseT_Half);
}
out:
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
@@ -684,6 +678,9 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
ctrl |= GEM_BIT(PCSSEL);
ncr |= GEM_BIT(ENABLE_HS_MAC);
+ } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
+ bp->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ncr |= MACB_BIT(MIIONRGMII);
}
}
@@ -900,6 +897,17 @@ static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
+ /* If we have a child named mdio, probe it instead of looking for PHYs
+ * directly under the MAC node
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child) {
+ int ret = of_mdiobus_register(bp->mii_bus, child);
+
+ of_node_put(child);
+ return ret;
+ }
+
if (of_phy_is_fixed_link(np))
return mdiobus_register(bp->mii_bus);
@@ -4594,7 +4602,8 @@ static const struct macb_config zynq_config = {
};
static const struct macb_config sama7g5_gem_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_MIIONRGMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4602,7 +4611,8 @@ static const struct macb_config sama7g5_gem_config = {
};
static const struct macb_config sama7g5_emac_config = {
- .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4774,7 +4784,7 @@ static int macb_probe(struct platform_device *pdev)
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
- err = of_get_mac_address(np, bp->dev->dev_addr);
+ err = of_get_ethdev_address(np, bp->dev);
if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
else if (err)
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index c2e1f163bb14..095c5a2144a7 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
return NULL;
}
-static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
unsigned long flags;
@@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
u32 secl, sech;
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ ptp_read_system_prets(sts);
first = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
second = gem_readl(bp, TN);
@@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
/* if so, use later read & re-read seconds
* (assume all done within 1s)
*/
+ ptp_read_system_prets(sts);
ts->tv_nsec = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
} else {
@@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
if (delta > TSU_NSEC_MAX_VAL) {
- gem_tsu_get_time(&bp->ptp_clock_info, &now);
+ gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL);
now = timespec64_add(now, then);
gem_tsu_set_time(&bp->ptp_clock_info,
@@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = {
.pps = 1,
.adjfine = gem_ptp_adjfine,
.adjtime = gem_ptp_adjtime,
- .gettime64 = gem_tsu_get_time,
+ .gettimex64 = gem_tsu_get_time,
.settime64 = gem_tsu_set_time,
.enable = gem_ptp_enable,
};
@@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
* The timestamp only contains lower few bits of seconds,
* so add value from 1588 timer
*/
- gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
+ gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b6a066404f4b..457cb7121000 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -607,7 +607,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr)
writel(value, ioaddr + XGMAC_CONTROL);
}
-static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr,
int num)
{
u32 data;
@@ -1479,7 +1479,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
@@ -1693,6 +1693,7 @@ static int xgmac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct xgmac_priv *priv = NULL;
+ u8 addr[ETH_ALEN];
u32 uid;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1785,7 +1786,8 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->max_mtu = XGMAC_MAX_MTU;
/* Get the MAC address */
- xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ xgmac_get_mac_addr(priv->base, addr, 0);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr))
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a0d64e5797c..73cb03266549 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -411,7 +411,7 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
if (!ether_addr_equal(netdev->dev_addr, mac)) {
macaddr_changed = true;
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
}
@@ -490,7 +490,6 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
wq = &lio->rxq_status_wq[q_no];
if (wq->wq) {
cancel_delayed_work_sync(&wq->wk.work);
- flush_workqueue(wq->wq);
destroy_workqueue(wq->wq);
wq->wq = NULL;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 2907e13b9df6..12eee2bc7f5c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -892,12 +892,11 @@ liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *
bus = pdev->bus->number;
device = PCI_SLOT(pdev->devfn);
function = PCI_FUNC(pdev->devfn);
- oct_dev->watchdog_task = kthread_create(
- liquidio_watchdog, oct_dev,
- "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
- if (!IS_ERR(oct_dev->watchdog_task)) {
- wake_up_process(oct_dev->watchdog_task);
- } else {
+ oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
+ oct_dev,
+ "liowd/%02hhx:%02hhx.%hhx",
+ bus, device, function);
+ if (IS_ERR(oct_dev->watchdog_task)) {
oct_dev->watchdog_task = NULL;
dev_err(&oct_dev->pci_dev->dev,
"failed to create kernel_thread\n");
@@ -1279,6 +1278,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
struct lio *lio;
dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ device_lock(&oct->pci_dev->dev);
+ if (oct->devlink) {
+ devlink_unregister(oct->devlink);
+ devlink_free(oct->devlink);
+ oct->devlink = NULL;
+ }
+ device_unlock(&oct->pci_dev->dev);
+
if (!oct->ifcount) {
dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
return 1;
@@ -1300,12 +1307,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++)
liquidio_destroy_nic_device(oct, i);
- if (oct->devlink) {
- devlink_unregister(oct->devlink);
- devlink_free(oct->devlink);
- oct->devlink = NULL;
- }
-
dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
return 0;
}
@@ -2022,7 +2023,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EIO;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
return 0;
@@ -3632,7 +3633,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
/* By default all interfaces on a single Octeon uses the same
* tx and rx queues
@@ -3749,10 +3750,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
}
}
+ device_lock(&octeon_dev->pci_dev->dev);
devlink = devlink_alloc(&liquidio_devlink_ops,
sizeof(struct lio_devlink_priv),
&octeon_dev->pci_dev->dev);
if (!devlink) {
+ device_unlock(&octeon_dev->pci_dev->dev);
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
goto setup_nic_dev_free;
}
@@ -3760,15 +3763,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio_devlink = devlink_priv(devlink);
lio_devlink->oct = octeon_dev;
- if (devlink_register(devlink)) {
- devlink_free(devlink);
- dev_err(&octeon_dev->pci_dev->dev,
- "devlink registration failed\n");
- goto setup_nic_dev_free;
- }
-
octeon_dev->devlink = devlink;
octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_register(devlink);
+ device_unlock(&octeon_dev->pci_dev->dev);
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f6396ac64006..c607756b731f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1168,7 +1168,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EPERM;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
return 0;
@@ -2148,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
if (liquidio_setup_io_queues(octeon_dev, i,
lio->linfo.num_txpciq,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 30463a6d1f8c..4e39d712e121 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1501,7 +1501,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
- result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ result = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (result)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 691e1475d55e..f2f1ce81fd9c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
dev_err(&nic->pdev->dev,
"Request for #%d msix vectors failed, returned %d\n",
nic->num_vec, ret);
- return 1;
+ return ret;
}
/* Register mailbox interrupt handler */
@@ -1311,9 +1311,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
- return err;
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d1667b759522..bb45d5df2856 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -221,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
- ether_addr_copy(nic->netdev->dev_addr,
- mbx.nic_cfg.mac_addr);
+ eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr);
nic->sqs_mode = mbx.nic_cfg.sqs_mode;
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
@@ -1224,7 +1223,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (ret < 0) {
netdev_err(nic->netdev,
"Req for #%d msix vectors failed\n", nic->num_vec);
- return 1;
+ return ret;
}
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
@@ -1243,7 +1242,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (!nicvf_check_pf_ready(nic)) {
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic);
- return 1;
+ return -EIO;
}
return 0;
@@ -1612,7 +1611,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
if (nic->pdev->msix_enabled) {
if (nicvf_hw_set_mac_addr(nic, netdev))
@@ -2119,10 +2118,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c36fed9c3d73..574a32f23f96 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1387,10 +1387,10 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
- u8 *addr;
+ int ret;
- addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
- if (!addr) {
+ ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac);
+ if (ret) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
return -EINVAL;
}
@@ -1597,9 +1597,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pcim_enable_device(pdev);
if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
- return err;
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d246eee4b6d5..609820e214a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -853,7 +853,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
if (!mac->ops->macaddress_set)
return -EOPNOTSUPP;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
mac->ops->macaddress_set(mac, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index dfa77491a910..5913eaf442b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -117,7 +117,7 @@ struct cmac_ops {
const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
- int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+ int (*macaddress_set)(struct cmac *, const u8 mac_addr[6]);
};
typedef struct _cmac_instance cmac_instance;
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index c27908e66f5e..0bb37e4680c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -496,7 +496,7 @@ static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
return 0;
}
-static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+static int pm3393_macaddress_set(struct cmac *cmac, const u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 310add28fcf5..007c591b8bf5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1140,7 +1140,7 @@ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
adapter->port[i].dev->name);
goto error;
}
- memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i].dev, hw_addr);
init_link_config(&adapter->port[i].link_config, bi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index 873c1c7b4ca0..2ad3efb550c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -379,7 +379,7 @@ static int mac_intr_clear(struct cmac *mac)
}
/* Expect MAC address to be in network byte order. */
-static int mac_set_address(struct cmac* mac, u8 addr[6])
+static int mac_set_address(struct cmac* mac, const u8 addr[6])
{
u32 val;
int port = mac->instance->index;
@@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac)
} hw_stats[] = {
#define HW_STAT(reg, stat_name) \
- { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+ { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) }
/* Rx stats */
HW_STAT(RxUnicast, RxUnicastFramesOK),
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index b706f2fbe4f4..a309016f7f8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -710,7 +710,7 @@ int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 38e47703f9ab..9cf9e33664e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2586,7 +2586,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 7ff31d1026fb..53feac8da503 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@ -3758,8 +3759,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i], hw_addr);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
index 3af19a550372..1bdc6cad1e49 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -240,7 +240,7 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
}
/* Set one of the station's unicast MAC addresses. */
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecea3cdd30b3..5657ac8cfca0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1545,7 +1545,7 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ eth_hw_addr_set(adapter->port[port_idx], hw_addr);
ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d9cda4ab303..dde1cf51d0ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3468,7 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 64144b6171d7..e7b4e3ed056c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9706,7 +9706,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
if (ret)
return ret;
- memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(adap->port[i], addr);
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index f55105a4112f..03cb1410d6fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -40,6 +40,7 @@
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
+#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -507,7 +508,7 @@ static inline const char *port_name(struct adapter *adapter, int pidx)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
- memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[pidx], hw_addr);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 49b76fd47daa..64479c464b4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1218,7 +1218,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -2902,10 +2902,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Initialize generic PCI device state.
*/
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* Reserve PCI resources for the device. If we can't get them some
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index bcad69c48074..4af5561cbfc5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- percpu_counter_inc((child)->sk_prot->orphan_count);
+ INC_ORPHAN_COUNT(child);
chtls_release_resources(child);
chtls_conn_done(child);
} else {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index b1161bdeda4d..f61ca657601c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -95,7 +95,7 @@ struct deferred_skb_cb {
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
+#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d0c4c8b7a15a..4a97aa8e1387 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1227,7 +1227,7 @@ static int set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
dev->name, dev->dev_addr);
@@ -1314,6 +1314,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
int tmp;
unsigned rev_type = 0;
int eeprom_buff[CHKSUM_LEN];
+ u8 addr[ETH_ALEN];
int retval;
/* Initialize the device structure. */
@@ -1387,9 +1388,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
for (i = 0; i < ETH_ALEN / 2; i++) {
unsigned int Addr;
Addr = readreg(dev, PP_IA + i * 2);
- dev->dev_addr[i * 2] = Addr & 0xFF;
- dev->dev_addr[i * 2 + 1] = Addr >> 8;
+ addr[i * 2] = Addr & 0xFF;
+ addr[i * 2 + 1] = Addr >> 8;
}
+ eth_hw_addr_set(dev, addr);
/* Load the Adapter Configuration.
* Note: Barring any more specific information from some
@@ -1464,9 +1466,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
/* eeprom_buff has 32-bit ints, so we can't just memcpy it */
/* store the initial memory base address */
for (i = 0; i < ETH_ALEN / 2; i++) {
- dev->dev_addr[i * 2] = eeprom_buff[i];
- dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
+ addr[i * 2] = eeprom_buff[i];
+ addr[i * 2 + 1] = eeprom_buff[i] >> 8;
}
+ eth_hw_addr_set(dev, addr);
cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
dev->name, lp->adapter_cnf);
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 072fac5f5d24..21ba6e893072 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -746,7 +746,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
if (dev == NULL)
return NULL;
- memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, data->dev_addr);
dev->ethtool_ops = &ep93xx_ethtool_ops;
dev->netdev_ops = &ep93xx_netdev_ops;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 6324e80960c3..84251b85fc93 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -541,7 +541,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr);
/* set the Ethernet address */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 12ffc14fbecd..6ded4d9fa32a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev,
int err;
err = enic_dev_fw_info(enic, &fw_info);
- /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ /* return only when dma_alloc_coherent fails in vnic_dev_fw_info
* For other failures, like devcmd failure, we return previously
* recorded info.
*/
@@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &vstats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d0a8f7106958..aacf141986d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &stats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
@@ -985,7 +985,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return -EADDRNOTAVAIL;
}
- memcpy(netdev->dev_addr, addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr);
return 0;
}
@@ -1098,6 +1098,7 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
+ static const u8 zero_addr[ETH_ALEN] = {};
struct enic *enic = netdev_priv(netdev);
struct enic_port_profile prev_pp;
struct enic_port_profile *pp;
@@ -1162,7 +1163,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
} else {
memset(pp, 0, sizeof(*pp));
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
} else {
/* Set flag to indicate that the port assoc/disassoc
@@ -1174,7 +1175,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (pp->request == PORT_REQUEST_DISASSOCIATE) {
eth_zero_addr(pp->mac_addr);
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index e6a83198c3dd..80f46dbd5117 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -73,9 +73,9 @@ static int enic_set_port_profile(struct enic *enic, int vf)
struct vic_provinfo *vp;
const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ const u8 *client_mac;
char uuid_str[38];
char client_mac_str[18];
- u8 *client_mac;
int err;
ENIC_PP_BY_INDEX(enic, vf, pp, &err);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6e745ca4c433..941f175fb911 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1889,7 +1889,7 @@ static int gmac_set_mac_address(struct net_device *netdev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, sa->sa_data);
gmac_write_mac_address(netdev);
return 0;
@@ -2467,13 +2467,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
DEFAULT_NAPI_WEIGHT);
if (is_valid_ether_addr((void *)port->mac_addr)) {
- memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
} else {
dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- eth_random_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
}
gmac_write_mac_address(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e842de6f6635..0985ab216566 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1425,6 +1425,7 @@ dm9000_probe(struct platform_device *pdev)
enum of_gpio_flags flags;
struct regulator *power;
bool inv_mac_addr = false;
+ u8 addr[ETH_ALEN];
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1666,11 +1667,12 @@ dm9000_probe(struct platform_device *pdev)
/* try reading the node address from the attached EEPROM */
for (i = 0; i < 6; i += 2)
- dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
+ dm9000_read_eeprom(db, i / 2, addr + i);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -1678,7 +1680,8 @@ dm9000_probe(struct platform_device *pdev)
mac_src = "chip";
for (i = 0; i < 6; i++)
- ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
+ addr[i] = ior(db, i + DM9000_PAR);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 117c26fa5909..d51b3d24a0c8 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -666,8 +666,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -685,7 +685,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -695,7 +695,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -710,7 +710,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1713,6 +1713,7 @@ static const struct ethtool_ops de_ethtool_ops = {
static void de21040_get_mac_address(struct de_private *de)
{
+ u8 addr[ETH_ALEN];
unsigned i;
dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
@@ -1724,12 +1725,13 @@ static void de21040_get_mac_address(struct de_private *de)
value = dr32(ROMCmd);
rmb();
} while (value < 0 && --boguscnt > 0);
- de->dev->dev_addr[i] = value;
+ addr[i] = value;
udelay(1);
if (boguscnt <= 0)
pr_warn("timeout reading 21040 MAC address byte %u\n",
i);
}
+ eth_hw_addr_set(de->dev, addr);
}
static void de21040_get_media_info(struct de_private *de)
@@ -1821,8 +1823,7 @@ static void de21041_get_srom_info(struct de_private *de)
#endif
/* store MAC address */
- for (i = 0; i < 6; i ++)
- de->dev->dev_addr[i] = ee_data[i + sa_offset];
+ eth_hw_addr_set(de->dev, &ee_data[sa_offset]);
/* get offset of controller 0 info leaf. ignore 2nd byte. */
ofs = ee_data[SROMC0InfoLeaf];
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 36ab4cbf2ad0..13121c4dcfe6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4031,6 +4031,7 @@ get_hw_addr(struct net_device *dev)
int broken, i, k, tmp, status = 0;
u_short j,chksum;
struct de4x5_private *lp = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
broken = de4x5_bad_srom(lp);
@@ -4042,28 +4043,30 @@ get_hw_addr(struct net_device *dev)
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_char) tmp;
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_short) (tmp << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
} else if (!broken) {
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
} else if ((broken == SMC) || (broken == ACCTON)) {
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
}
} else {
k += (u_char) (tmp = inb(EISA_APROM));
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
}
if (k > 0xffff) k-=0xffff;
}
if (k == 0xffff) k=0;
+ eth_hw_addr_set(dev, addr);
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4095,8 +4098,9 @@ get_hw_addr(struct net_device *dev)
int x = dev->dev_addr[i];
x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
}
+ eth_hw_addr_set(dev, addr);
}
#endif /* CONFIG_PPC_PMAC */
@@ -4158,12 +4162,9 @@ test_bad_enet(struct net_device *dev, int status)
if ((tmp == 0) || (tmp == 0x5fa)) {
if ((lp->chipset == last.chipset) &&
(lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
- for (i=ETH_ALEN-1; i>2; --i) {
- dev->dev_addr[i] += 1;
- if (dev->dev_addr[i] != 0) break;
- }
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ eth_addr_inc(last.addr);
+ eth_hw_addr_set(dev, last.addr);
+
if (!an_exception(lp)) {
dev->irq = last.irq;
}
@@ -5391,9 +5392,7 @@ de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data
if (netif_queue_stopped(dev))
return -EBUSY;
netif_stop_queue(dev);
- for (i=0; i<ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
+ eth_hw_addr_set(dev, tmp.addr);
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c763b692e164..83f1727d1423 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -476,8 +476,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set Node address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ eth_hw_addr_set(dev, &db->srom[20]);
err = register_netdev (dev);
if (err)
@@ -1436,9 +1435,9 @@ static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
static void dm9132_id_table(struct net_device *dev)
{
+ const u16 *addrptr = (const u16 *)dev->dev_addr;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr + 0xc0;
- u16 *addrptr = (u16 *)dev->dev_addr;
struct netdev_hw_addr *ha;
u16 i, hash_table[4];
@@ -1477,7 +1476,7 @@ static void send_filter_frame(struct net_device *dev)
struct dmfe_board_info *db = netdev_priv(dev);
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1487,7 +1486,7 @@ static void send_filter_frame(struct net_device *dev)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fcedd733bacb..79df5a72877b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -339,7 +339,7 @@ static void tulip_up(struct net_device *dev)
}
} else {
/* This is set_rx_mode(), but without starting the transmitter. */
- u16 *eaddrs = (u16 *)dev->dev_addr;
+ const u16 *eaddrs = (const u16 *)dev->dev_addr;
u16 *setup_frm = &tp->setup_frame[15*6];
dma_addr_t mapping;
@@ -1001,8 +1001,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -1019,7 +1019,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1029,7 +1029,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -1044,7 +1044,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1305,6 +1305,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
const char *chip_name = tulip_tbl[chip_idx].chip_name;
unsigned int eeprom_missing = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
unsigned int force_csr0 = 0;
board_idx++;
@@ -1506,13 +1507,15 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
do {
value = ioread32(ioaddr + CSR9);
} while (value < 0 && --boguscnt > 0);
- put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
+ put_unaligned_le16(value, ((__le16 *)addr) + i);
sum += value & 0xffff;
}
+ eth_hw_addr_set(dev, addr);
} else if (chip_idx == COMET) {
/* No need to read the EEPROM. */
- put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
- put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
+ put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
+ put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
+ eth_hw_addr_set(dev, addr);
for (i = 0; i < 6; i ++)
sum += dev->dev_addr[i];
} else {
@@ -1575,20 +1578,23 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
for (i = 0; i < 6; i ++) {
- dev->dev_addr[i] = ee_data[i + sa_offset];
+ addr[i] = ee_data[i + sa_offset];
sum += ee_data[i + sa_offset];
}
+ eth_hw_addr_set(dev, addr);
}
/* Lite-On boards have the address byte-swapped. */
if ((dev->dev_addr[0] == 0xA0 ||
dev->dev_addr[0] == 0xC0 ||
dev->dev_addr[0] == 0x02) &&
- dev->dev_addr[1] == 0x00)
+ dev->dev_addr[1] == 0x00) {
for (i = 0; i < 6; i+=2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i+1];
- dev->dev_addr[i+1] = tmp;
+ addr[i] = dev->dev_addr[i+1];
+ addr[i+1] = dev->dev_addr[i];
}
+ eth_hw_addr_set(dev, addr);
+ }
+
/* On the Zynx 315 Etherarray and other multiport boards only the
first Tulip has an EEPROM.
On Sparc systems the mac address is held in the OBP property
@@ -1599,17 +1605,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sum == 0 || sum == 6*0xff) {
#if defined(CONFIG_SPARC)
struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
+ const unsigned char *addr2;
int len;
#endif
eeprom_missing = 1;
for (i = 0; i < 5; i++)
- dev->dev_addr[i] = last_phys_addr[i];
- dev->dev_addr[i] = last_phys_addr[i] + 1;
+ addr[i] = last_phys_addr[i];
+ addr[i] = last_phys_addr[i] + 1;
+ eth_hw_addr_set(dev, addr);
#if defined(CONFIG_SPARC)
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ addr2 = of_get_property(dp, "local-mac-address", &len);
+ if (addr2 && len == ETH_ALEN)
+ eth_hw_addr_set(dev, addr2);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index d67ef7d02d6b..77d9058431e3 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -272,6 +272,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
void __iomem *ioaddr;
+ u8 addr[ETH_ALEN];
int i, err;
ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -379,7 +380,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
uw32(DCR13, 0x1b0); //Select ID Table access port
//Read MAC address from CR14
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ur32(DCR14);
+ addr[i] = ur32(DCR14);
//Read end
uw32(DCR13, 0); //Clear CR13
uw32(DCR0, 0); //Clear CR0
@@ -388,8 +389,10 @@ static int uli526x_init_one(struct pci_dev *pdev,
else /*Exist SROM*/
{
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ addr[i] = db->srom[20 + i];
}
+ eth_hw_addr_set(dev, addr);
+
err = register_netdev (dev);
if (err)
goto err_out_unmap;
@@ -1343,7 +1346,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
void __iomem *ioaddr = db->ioaddr;
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1353,7 +1356,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 85b99099c6b9..86b1d23eba83 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -355,6 +355,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ __le16 addr[ETH_ALEN / 2];
void __iomem *ioaddr;
i = pcim_enable_device(pdev);
@@ -382,7 +383,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_netdev;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ addr[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Reset the chip to erase previous misconfiguration.
No hold time required! */
@@ -877,7 +879,7 @@ static void init_registers(struct net_device *dev)
8000 16 longwords 0200 2 longwords 2000 32 longwords
C000 32 longwords 0400 4 longwords */
-#if defined (__i386__) && !defined(MODULE)
+#if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index a8de79355578..8759f9f76b62 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -1015,12 +1015,14 @@ static void read_mac_address(struct xircom_private *card)
xw32(CSR10, i + 3);
data_count = xr32(CSR9);
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
+ u8 addr[ETH_ALEN];
int j;
for (j = 0; j < 6; j++) {
xw32(CSR10, i + j + 4);
- card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
+ addr[j] = xr32(CSR9) & 0xff;
}
+ eth_hw_addr_set(card->dev, addr);
break;
} else if (link == 0) {
break;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 202ecb132053..a301f7e6a440 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -349,8 +349,7 @@ parse_eeprom (struct net_device *dev)
}
/* Set MAC address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = psrom->mac_addr[i];
+ eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
np->led_mode = psrom->led_mode;
@@ -567,7 +566,7 @@ static void rio_hw_init(struct net_device *dev)
*/
for (i = 0; i < 3; i++)
dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((u16 *)dev->dev_addr)[i]));
+ cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
set_multicast (dev);
if (np->coalesce) {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c36d186dffed..c710dc17be90 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev,
int bar = 1;
#endif
int phy, phy_end, phy_idx = 0;
+ __le16 addr[ETH_ALEN / 2];
if (pci_enable_device(pdev))
return -EIO;
@@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev,
goto err_out_res;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ eth_hw_addr_set(dev, (u8 *)addr);
np = netdev_priv(dev);
np->ndev = dev;
@@ -1611,7 +1613,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
__set_mac_addr(dev);
return 0;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 6c51cf991dad..92462ed87bc4 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -60,11 +60,11 @@ static void __dnet_set_hwaddr(struct dnet *bp)
{
u16 tmp;
- tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
+ tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
}
@@ -93,7 +93,7 @@ static void dnet_get_hwaddr(struct dnet *bp)
*((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
if (is_valid_ether_addr(addr))
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
}
static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index b2d4fb3feb74..46e3a05e9582 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -479,6 +479,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct net_device *net_dev;
struct ec_bhf_priv *priv;
void __iomem *dma_io;
+ u8 addr[ETH_ALEN];
void __iomem *io;
int err = 0;
@@ -539,7 +540,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err < 0)
goto err_free_net_dev;
- memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+ memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN);
+ eth_hw_addr_set(net_dev, addr);
err = register_netdev(net_dev);
if (err < 0)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 649c5c429bd7..528eb0f223b1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1080,7 +1080,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index c30d6d6f0f3a..db1f3b908582 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2385,7 +2385,7 @@ int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id,
u32 *pmac_id, u32 domain);
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 361c1c87c183..d51f24c9e1b8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -272,7 +272,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
-static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
{
int i;
@@ -369,7 +369,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
done:
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
@@ -4599,7 +4599,7 @@ static int be_mac_setup(struct be_adapter *adapter)
if (status)
return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(adapter->netdev, mac);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
/* Initial MAC for BE3 VFs is already programmed by PF */
@@ -4621,7 +4621,6 @@ static void be_destroy_err_recovery_workq(void)
if (!be_err_recovery_workq)
return;
- flush_workqueue(be_err_recovery_workq);
destroy_workqueue(be_err_recovery_workq);
be_err_recovery_workq = NULL;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ed1ed48e7483..b1c8ffea6ad2 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -707,20 +707,16 @@ static int ethoc_mdio_probe(struct net_device *dev)
else
phy = phy_find_first(priv->mdio);
- if (!phy) {
- dev_err(&dev->dev, "no PHY found\n");
- return -ENXIO;
- }
+ if (!phy)
+ return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n");
priv->old_duplex = -1;
priv->old_link = -1;
err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
PHY_INTERFACE_MODE_GMII);
- if (err) {
- dev_err(&dev->dev, "could not attach to PHY\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->dev, err, "could not attach to PHY\n");
phy_set_max_speed(phy, SPEED_100);
@@ -806,8 +802,8 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ethoc_do_set_mac_address(struct net_device *dev)
{
+ const unsigned char *mac = dev->dev_addr;
struct ethoc *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
@@ -820,7 +816,7 @@ static int ethoc_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1148,18 +1144,22 @@ static int ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
- ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
+ eth_hw_addr_set(netdev, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
- of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ of_get_ethdev_address(pdev->dev.of_node, netdev);
priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
* current MAC from the controller.
*/
- if (!is_valid_ether_addr(netdev->dev_addr))
- ethoc_get_mac_address(netdev, netdev->dev_addr);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
+ ethoc_get_mac_address(netdev, addr);
+ eth_hw_addr_set(netdev, addr);
+ }
/* Check the MAC again for validity, if it still isn't choose and
* program a random one.
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
index 38aa824efb25..9241b9b1c7a3 100644
--- a/drivers/net/ethernet/ezchip/Kconfig
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP
config EZCHIP_NPS_MANAGEMENT_ENET
tristate "EZchip NPS management enet support"
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on HAS_IOMEM
help
Simple LAN device for debug or management purposes.
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f9a288a6ec8c..323340826dab 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -421,7 +421,7 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
res = eth_mac_addr(ndev, p);
if (!res) {
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
nps_enet_set_hw_mac_address(ndev);
}
@@ -601,7 +601,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
/* set kernel MAC address to dev */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ff76e401a014..97c5d70de76e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -182,13 +182,10 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
- void *addr;
- addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
- if (addr) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
- mac);
+ priv->netdev->dev_addr);
return;
}
@@ -203,7 +200,7 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
mac[5] = l & 0xff;
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
} else {
eth_hw_addr_random(priv->netdev);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 25c91b3c5fd3..b3939a5f7b03 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -482,6 +482,7 @@ static int fealnx_init_one(struct pci_dev *pdev,
struct net_device *dev;
void *ring_space;
dma_addr_t ring_dma;
+ u8 addr[ETH_ALEN];
#ifdef USE_IO_OPS
int bar = 0;
#else
@@ -525,7 +526,8 @@ static int fealnx_init_one(struct pci_dev *pdev,
/* read ethernet id */
for (i = 0; i < 6; ++i)
- dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
+ addr[i] = ioread8(ioaddr + PAR0 + i);
+ eth_hw_addr_set(dev, addr);
/* Reset the chip to erase previous misconfiguration. */
iowrite32(0x00000001, ioaddr + BCR);
@@ -827,7 +829,7 @@ static int netdev_open(struct net_device *dev)
return -EAGAIN;
for (i = 0; i < 3; i++)
- iowrite16(((unsigned short*)dev->dev_addr)[i],
+ iowrite16(((const unsigned short *)dev->dev_addr)[i],
ioaddr + PAR0 + i*2);
init_ring(dev);
@@ -857,7 +859,7 @@ static int netdev_open(struct net_device *dev)
np->bcrvalue |= 0x04; /* big-endian */
#endif
-#if defined(__i386__) && !defined(MODULE)
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
if (boot_cpu_data.x86 <= 4)
np->crvalue = 0xa00;
else
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 685d2d8a3b36..6b2927d863e2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -268,11 +268,11 @@ static int dpaa_netdev_init(struct net_device *net_dev,
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
return -EINVAL;
@@ -452,7 +452,7 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
mac_dev = priv->mac_dev;
err = mac_dev->change_addr(mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err < 0) {
netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
err);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 605a39f892b9..7fefe1574b6a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -189,12 +189,11 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = {
.trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
};
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_devlink_priv *dl_priv;
- int err;
priv->devlink =
devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev);
@@ -204,25 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
}
dl_priv = devlink_priv(priv->devlink);
dl_priv->dpaa2_priv = priv;
-
- err = devlink_register(priv->devlink);
- if (err) {
- dev_err(dev, "devlink_register() = %d\n", err);
- goto devlink_free;
- }
-
return 0;
+}
-devlink_free:
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv)
+{
devlink_free(priv->devlink);
+}
- return err;
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ devlink_register(priv->devlink);
}
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
{
devlink_unregister(priv->devlink);
- devlink_free(priv->devlink);
}
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7065c71ed7b8..714e961e7a77 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -533,6 +533,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
list_add_tail(&skb->list, ch->rx_list);
@@ -641,6 +642,7 @@ static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
fq->stats.frames += cleaned;
ch->stats.frames += cleaned;
+ ch->stats.frames_per_cdan += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -1264,7 +1266,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch __always_unused,
+ struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq)
{
@@ -1279,6 +1281,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
percpu_extras->tx_conf_bytes += fd_len;
+ ch->stats.bytes_per_cdan += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -1601,6 +1604,12 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
}
} while (store_cleaned);
+ /* Update NET DIM with the values for this CDAN */
+ dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ ch->stats.bytes_per_cdan);
+ ch->stats.frames_per_cdan = 0;
+ ch->stats.bytes_per_cdan = 0;
+
/* We didn't consume the entire budget, so finish napi and
* re-enable data availability notifications
*/
@@ -4013,7 +4022,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
return err;
}
}
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else if (is_zero_ether_addr(dpni_mac_addr)) {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
@@ -4038,7 +4047,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
/* NET_ADDR_PERM is default, all we have to do is
* fill in the device addr.
*/
- memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, dpni_mac_addr);
}
return 0;
@@ -4431,7 +4440,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_connect_mac;
- err = dpaa2_eth_dl_register(priv);
+ err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4453,6 +4462,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_dbg_add(priv);
#endif
+ dpaa2_eth_dl_register(priv);
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
@@ -4461,7 +4471,7 @@ err_netdev_reg:
err_dl_port_add:
dpaa2_eth_dl_traps_unregister(priv);
err_dl_trap_register:
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
err_dl_register:
dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
@@ -4508,6 +4518,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
+ dpaa2_eth_dl_unregister(priv);
+
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
@@ -4519,7 +4531,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index cdb623d5f2c1..2085844227fe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -384,6 +384,8 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_redirect;
/* Must be last, does not show up in ethtool stats */
__u64 frames;
+ __u64 frames_per_cdan;
+ __u64 bytes_per_cdan;
};
/* Maximum number of queues associated with a DPNI */
@@ -725,7 +727,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 2da5f881f630..adb8ce5306ee 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -820,7 +820,63 @@ static int dpaa2_eth_set_tunable(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio = priv->channel[0]->dpio;
+
+ dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
+ ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio;
+ int prev_adaptive;
+ u32 prev_rx_usecs;
+ int i, j, err;
+
+ /* Keep track of the previous value, just in case we fail */
+ dpio = priv->channel[0]->dpio;
+ dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
+ prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ /* Setup new value for rx coalescing */
+ for (i = 0; i < priv->num_channels; i++) {
+ dpio = priv->channel[i]->dpio;
+
+ dpaa2_io_set_adaptive_coalescing(dpio,
+ ic->use_adaptive_rx_coalesce);
+ err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
+ if (err)
+ goto restore_rx_usecs;
+ }
+
+ return 0;
+
+restore_rx_usecs:
+ for (j = 0; j < i; j++) {
+ dpio = priv->channel[j]->dpio;
+
+ dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
+ dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
+ }
+
+ return err;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = dpaa2_eth_get_drvinfo,
.nway_reset = dpaa2_eth_nway_reset,
.get_link = ethtool_op_get_link,
@@ -836,4 +892,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
+ .get_coalesce = dpaa2_eth_get_coalesce,
+ .set_coalesce = dpaa2_eth_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ae6d382d8735..ef8f0a055024 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -139,7 +139,7 @@ static void dpaa2_mac_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
- phylink_set(mask, 10000baseT_Full);
+ phylink_set_10g_modes(mask);
if (state->interface == PHY_INTERFACE_MODE_10GBASER)
break;
phylink_set(mask, 5000baseT_Full);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 175f15c46842..d039457928b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -980,7 +980,7 @@ static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
/* First check if firmware has any address configured by bootloader */
if (!is_zero_ether_addr(mac_addr)) {
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 042327b9981f..504e12554079 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -7,7 +7,9 @@
#include <linux/udp.h>
#include <linux/vmalloc.h>
#include <linux/ptp_classify.h>
+#include <net/ip6_checksum.h>
#include <net/pkt_sched.h>
+#include <net/tso.h>
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
@@ -314,12 +316,261 @@ dma_err:
return 0;
}
+static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
+{
+ union enetc_tx_bd txbd_tmp;
+ u8 flags = 0, e_flags = 0;
+ dma_addr_t addr;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ if (skb_vlan_tag_present(skb))
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
+ txbd_tmp.flags = flags;
+
+ /* For the TSO header we do not set the dma address since we do not
+ * want it unmapped when we do cleanup. We still set len so that we
+ * count the bytes sent.
+ */
+ tx_swbd->len = hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Add extension BD for VLAN */
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Setup the VLAN fields */
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ *txbd = txbd_tmp;
+ }
+}
+
+static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, char *data,
+ int size, bool last_bd)
+{
+ union enetc_tx_bd txbd_tmp;
+ dma_addr_t addr;
+ u8 flags = 0;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+
+ addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -ENOMEM;
+ }
+
+ if (last_bd) {
+ flags |= ENETC_TXBD_FLAGS_F;
+ tx_swbd->is_eof = 1;
+ }
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(size);
+ txbd_tmp.flags = flags;
+
+ tx_swbd->dma = addr;
+ tx_swbd->len = size;
+ tx_swbd->dir = DMA_TO_DEVICE;
+
+ *txbd = txbd_tmp;
+
+ return 0;
+}
+
+static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
+ char *hdr, int hdr_len, int *l4_hdr_len)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ int mac_hdr_len = skb_network_offset(skb);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = 0;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = 0;
+ }
+
+ /* Compute the IP checksum. This is necessary since tso_build_hdr()
+ * already incremented the IP ID field.
+ */
+ if (!tso->ipv6) {
+ struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ /* Compute the checksum over the L4 header. */
+ *l4_hdr_len = hdr_len - skb_transport_offset(skb);
+ return csum_partial(l4_hdr, *l4_hdr_len, 0);
+}
+
+static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
+ struct sk_buff *skb, char *hdr, int len,
+ __wsum sum)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ __sum16 csum_final;
+
+ /* Complete the L4 checksum by appending the pseudo-header to the
+ * already computed checksum.
+ */
+ if (!tso->ipv6)
+ csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ len, ip_hdr(skb)->protocol, sum);
+ else
+ csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ len, ipv6_hdr(skb)->nexthdr, sum);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = csum_final;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = csum_final;
+ }
+}
+
+static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ int hdr_len, total_len, data_len;
+ struct enetc_tx_swbd *tx_swbd;
+ union enetc_tx_bd *txbd;
+ struct tso_t tso;
+ __wsum csum, csum2;
+ int count = 0, pos;
+ int err, i, bd_data_num;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ i = tx_ring->next_to_use;
+
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Get the BD */
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Determine the length of this packet */
+ data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_len;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
+
+ /* compute the csum over the L4 header */
+ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+ enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ bd_data_num = 0;
+ count++;
+
+ while (data_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_len);
+
+ /* Advance the index in the BDR */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Compute the checksum over this segment of data and
+ * add it to the csum already computed (over the L4
+ * header and possible other data segments).
+ */
+ csum2 = csum_partial(tso.data, size, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ pos += size;
+
+ err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ tso.data, size,
+ size == data_len);
+ if (err)
+ goto err_map_data;
+
+ data_len -= size;
+ count++;
+ bd_data_num++;
+ tso_build_data(skb, &tso, size);
+
+ if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ goto err_chained_bd;
+ }
+
+ enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
+
+ if (total_len == 0)
+ tx_swbd->skb = skb;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+err_map_data:
+ dev_err(tx_ring->dev, "DMA map error");
+
+err_chained_bd:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count;
+ int count, err;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -332,19 +583,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ if (skb_is_gso(skb)) {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ } else {
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
+ goto drop_packet_err;
+ }
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
if (unlikely(!count))
goto drop_packet_err;
@@ -546,10 +813,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- union enetc_tx_bd *txbd;
-
- txbd = ENETC_TXBD(*tx_ring, i);
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
tx_swbd->do_twostep_tstamp) {
@@ -567,8 +831,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(tx_swbd->skb->cb[0] &
- ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -1493,15 +1756,32 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr)
return -ENOMEM;
err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
- if (err) {
- vfree(txr->tx_swbd);
- return err;
+ if (err)
+ goto err_alloc_bdr;
+
+ txr->tso_headers = dma_alloc_coherent(txr->dev,
+ txr->bd_count * TSO_HEADER_SIZE,
+ &txr->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txr->tso_headers) {
+ err = -ENOMEM;
+ goto err_alloc_tso;
}
txr->next_to_clean = 0;
txr->next_to_use = 0;
return 0;
+
+err_alloc_tso:
+ dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
+ txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+err_alloc_bdr:
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+
+ return err;
}
static void enetc_free_txbdr(struct enetc_bdr *txr)
@@ -1513,6 +1793,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
size = txr->bd_count * sizeof(union enetc_tx_bd);
+ dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
+ txr->tso_headers, txr->tso_headers_dma);
+ txr->tso_headers = NULL;
+
dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL;
@@ -2607,10 +2891,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 08b283347d9c..fb39e406b7fc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -112,6 +112,10 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
bool ext_en; /* enable h/w descriptor extensions */
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 9690e36e9e85..910b9f722504 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -157,7 +157,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frames" },
+ { ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 0f5f081a5baf..1514e6a4a3ff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -635,10 +635,14 @@ struct enetc_cmd_rfse {
#define ENETC_RFSE_EN BIT(15)
#define ENETC_RFSE_MODE_BD 2
-static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw,
+ struct net_device *ndev)
{
+ u8 addr[ETH_ALEN] __aligned(4);
+
*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+ eth_hw_addr_set(ndev, addr);
}
#define ENETC_SI_INT_IDX 0
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 4c977dfc44f0..0e87c7043b77 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -40,7 +40,7 @@ static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, saddr->sa_data);
enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
return 0;
@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
static void enetc_configure_port_mac(struct enetc_hw *hw)
{
+ int tc;
+
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ for (tc = 0; tc < 8; tc++)
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -759,10 +762,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -779,7 +786,7 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
}
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
@@ -803,10 +810,8 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
err = of_mdiobus_register(bus, np);
- if (err) {
- dev_err(dev, "cannot register MDIO bus\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
pf->mdio = bus;
@@ -937,7 +942,7 @@ static void enetc_pl_mac_validate(struct phylink_config *config,
state->interface != PHY_INTERFACE_MODE_2500BASEX &&
state->interface != PHY_INTERFACE_MODE_USXGMII &&
!phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -960,10 +965,8 @@ static void enetc_pl_mac_validate(struct phylink_config *config,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void enetc_pl_mac_config(struct phylink_config *config,
@@ -1215,10 +1218,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
ERR_PTR(err));
err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
- if (err) {
- dev_err(&pdev->dev, "PCI probing failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
if (!si->hw.port || !si->hw.global) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index bc594892507a..36b4f51dd297 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -39,10 +39,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
}
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 4577226d3c6a..0536d2c76fbc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -486,14 +486,16 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
data_size = sizeof(struct streamid_data);
si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!si_data)
+ return -ENOMEM;
cbd.length = cpu_to_le16(data_size);
dma = dma_map_single(&priv->si->pdev->dev, si_data,
data_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(si_data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
@@ -512,12 +514,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- return -EINVAL;
+ goto out;
- if (!enable) {
- kfree(si_data);
- return 0;
- }
+ if (!enable)
+ goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
memset(&cbd, 0, sizeof(cbd));
@@ -560,6 +560,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
}
err = enetc_send_cmd(priv->si, &cbd);
+out:
+ if (!dma_mapping_error(&priv->si->pdev->dev, dma))
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
+
kfree(si_data);
return err;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 1a9d1e8b772c..17924305afa2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -122,16 +122,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_vf_probe(struct pci_dev *pdev,
@@ -143,10 +147,8 @@ static int enetc_vf_probe(struct pci_dev *pdev,
int err;
err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
- if (err) {
- dev_err(&pdev->dev, "PCI probing failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ec87b370bba1..bc418b910999 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1768,11 +1768,8 @@ static int fec_get_mac(struct net_device *ndev)
return 0;
}
- memcpy(ndev->dev_addr, iap, ETH_ALEN);
-
/* Adjust MAC if using macaddr */
- if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+ eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
return 0;
}
@@ -3326,7 +3323,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (addr) {
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
}
/* Add netif status check here to avoid system hang in below case:
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 73ff359a15f1..bbbde9f701c2 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -112,7 +112,7 @@ static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
- memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sock->sa_data);
mpc52xx_fec_set_paddr(dev, sock->sa_data);
return 0;
@@ -890,7 +890,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*
* First try to read MAC address from DT
*/
- rv = of_get_mac_address(np, ndev->dev_addr);
+ rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index bce3c9398887..1950a8936bc0 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,7 +366,7 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
{
u32 tmp;
@@ -516,7 +516,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
if (addr) {
MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
- set_mac_address(regs, (u8 *)eth_addr);
+ set_mac_address(regs, (const u8 *)eth_addr);
}
/* HASH */
@@ -1022,7 +1022,7 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
enum comm_mode mode = COMM_MODE_NONE;
@@ -1041,7 +1041,7 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
* Station address have to be swapped (big endian to little endian
*/
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
- set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+ set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
graceful_start(dtsec, mode);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 5149d96ec2c1..68512c3bd6e5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -37,7 +37,7 @@
struct fman_mac *dtsec_config(struct fman_mac_params *params);
int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
int dtsec_adjust_link(struct fman_mac *dtsec,
u16 speed);
int dtsec_restart_autoneg(struct fman_mac *dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 62f42921933d..2216b7f51d26 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -354,7 +354,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
u8 paddr_num)
{
u32 tmp0, tmp1;
@@ -897,12 +897,12 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
- add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
}
@@ -1058,7 +1058,7 @@ int memac_init(struct fman_mac *memac)
/* MAC Address */
if (memac->addr != 0) {
MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b2c671ec0ce7..3820f7a22983 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -40,7 +40,7 @@
struct fman_mac *memac_config(struct fman_mac_params *params);
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
int memac_adjust_link(struct fman_mac *memac, u16 speed);
int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 41946b16f6c7..311c1906e044 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -221,7 +221,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr)
{
u32 tmp0, tmp1;
@@ -514,13 +514,13 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
- set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+ set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
return 0;
}
@@ -704,7 +704,7 @@ int tgec_init(struct fman_mac *tgec)
if (tgec->addr) {
MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ set_mac_address(tgec->regs, (const u8 *)eth_addr);
}
/* interrupts */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index 3bfd1062b386..b28b20b26148 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -37,7 +37,7 @@
struct fman_mac *tgec_config(struct fman_mac_params *params);
int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 824a81a9f350..daa285a9b8b2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -66,7 +66,7 @@ struct mac_device {
int (*stop)(struct mac_device *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
- int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2db6e38a772e..bacf25318f87 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1005,7 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ of_get_ethdev_address(ofdev->dev.of_node, ndev);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index af6ad94bf24a..acab58fd3db3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -753,7 +753,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
- err = of_get_mac_address(np, dev->dev_addr);
+ err = of_get_ethdev_address(np, dev);
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3eb288d10b0c..823221c912ab 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3205,7 +3205,7 @@ static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/*
* If device is not running, we will set mac addr register
@@ -3731,7 +3731,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
goto err_free_netdev;
}
- of_get_mac_address(np, dev->dev_addr);
+ of_get_ethdev_address(np, dev);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 62c0bed82ced..b0d733e9a7c6 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -334,6 +334,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
u8 *buf;
size_t len;
u_char buggybuf[32];
+ u8 addr[ETH_ALEN];
dev_dbg(&link->dev, "fmvj18x_config\n");
@@ -468,8 +469,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
goto failed;
}
/* Read MACID from CIS */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = buf[i + 5];
+ eth_hw_addr_set(dev, &buf[5]);
kfree(buf);
} else {
if (pcmcia_get_mac_from_cis(link, dev))
@@ -490,7 +490,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
case UNGERMANN:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "Access/CARD";
break;
case XXX10304:
@@ -499,16 +500,15 @@ static int fmvj18x_config(struct pcmcia_device *link)
pr_notice("unable to read hardware net address\n");
goto failed;
}
- for (i = 0 ; i < 6; i++) {
- dev->dev_addr[i] = buggybuf[i];
- }
+ eth_hw_addr_set(dev, buggybuf);
card_name = "FMV-J182";
break;
case MBH10302:
default:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ addr[i] = inb(ioaddr + MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "FMV-J181";
break;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 92dc18a4bcc4..b719f72281c4 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -30,7 +30,7 @@
#define GVE_MIN_MSIX 3
/* Numbers of gve tx/rx stats in stats report. */
-#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_TX_STATS_REPORT_NUM 6
#define GVE_RX_STATS_REPORT_NUM 2
/* Interval to schedule a stats report update, 20000ms. */
@@ -142,6 +142,19 @@ struct gve_index_list {
s16 tail;
};
+/* A single received packet split across multiple buffers may be
+ * reconstructed using the information in this structure.
+ */
+struct gve_rx_ctx {
+ /* head and tail of skb chain for the current packet or NULL if none */
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
+ u16 total_expected_size;
+ u8 expected_frag_cnt;
+ u8 curr_frag_cnt;
+ u8 reuse_frags;
+};
+
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
@@ -153,6 +166,7 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
+ u16 packet_buffer_size;
};
/* DQO fields. */
@@ -200,15 +214,16 @@ struct gve_rx_ring {
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
+ u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
+ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
+ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
dma_addr_t q_resources_bus; /* dma address for the queue resources */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
- /* head and tail of skb chain for the current packet or NULL if none */
- struct sk_buff *skb_head;
- struct sk_buff *skb_tail;
+ struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
};
/* A TX desc ring entry */
@@ -224,11 +239,6 @@ struct gve_tx_iovec {
u32 iov_padding; /* padding associated with this segment */
};
-struct gve_tx_dma_buf {
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
* ring entry but only used for a pkt_desc not a seg_desc
*/
@@ -236,7 +246,10 @@ struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
- struct gve_tx_dma_buf buf;
+ struct {
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ };
};
};
@@ -280,7 +293,8 @@ struct gve_tx_pending_packet_dqo {
* All others correspond to `skb`'s frags and should be unmapped with
* `dma_unmap_page`.
*/
- struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+ DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
+ DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
u16 num_bufs;
/* Linked list index to next element in the list, or -1 if none */
@@ -342,8 +356,8 @@ struct gve_tx_ring {
union {
/* GQI fields */
struct {
- /* NIC tail pointer */
- __be32 last_nic_done;
+ /* Spinlock for when cleanup in progress */
+ spinlock_t clean_lock;
};
/* DQO fields. */
@@ -414,7 +428,9 @@ struct gve_tx_ring {
u32 q_num ____cacheline_aligned; /* queue idx */
u32 stop_queue; /* count of queue stops */
u32 wake_queue; /* count of queue wakes */
+ u32 queue_timeout; /* count of queue timeouts */
u32 ntfy_id; /* notification block index */
+ u32 last_kick_msec; /* Last time the queue was kicked */
dma_addr_t bus; /* dma address of the descr ring */
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
@@ -822,15 +838,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx);
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx);
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
-bool gve_rx_poll(struct gve_notify_block *block, int budget);
+int gve_rx_poll(struct gve_notify_block *block, int budget);
+bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv);
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index f089d33dd48e..83ae56c310d3 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -38,7 +38,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option *option,
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
- struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+ struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -111,6 +112,24 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_dqo_rda = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_JUMBO_FRAMES:
+ if (option_length < sizeof(**dev_op_jumbo_frames) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Jumbo Frames",
+ (int)sizeof(**dev_op_jumbo_frames),
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_jumbo_frames)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Jumbo Frames");
+ }
+ *dev_op_jumbo_frames = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -126,7 +145,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_descriptor *descriptor,
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
- struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+ struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -146,7 +166,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
- dev_op_dqo_rda);
+ dev_op_dqo_rda, dev_op_jumbo_frames);
dev_opt = next_opt;
}
@@ -530,6 +550,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(rx->data.data_bus),
cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
@@ -660,12 +681,31 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
return 0;
}
+static void gve_enable_supported_features(struct gve_priv *priv,
+ u32 supported_features_mask,
+ const struct gve_device_option_jumbo_frames
+ *dev_op_jumbo_frames)
+{
+ /* Before control reaches this point, the page-size-capped max MTU from
+ * the gve_device_descriptor field has already been stored in
+ * priv->dev->max_mtu. We overwrite it with the true max MTU below.
+ */
+ if (dev_op_jumbo_frames &&
+ (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
+ dev_info(&priv->pdev->dev,
+ "JUMBO FRAMES device option enabled.\n");
+ priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
+ }
+}
+
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
struct gve_device_descriptor *descriptor;
+ u32 supported_features_mask = 0;
union gve_adminq_command cmd;
dma_addr_t descriptor_bus;
int err = 0;
@@ -689,7 +729,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
goto free_device_descriptor;
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
- &dev_op_gqi_qpl, &dev_op_dqo_rda);
+ &dev_op_gqi_qpl, &dev_op_dqo_rda,
+ &dev_op_jumbo_frames);
if (err)
goto free_device_descriptor;
@@ -704,12 +745,19 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
+ supported_features_mask =
+ be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
} else if (dev_op_gqi_rda) {
priv->queue_format = GVE_GQI_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with GQI RDA queue format.\n");
+ supported_features_mask =
+ be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
+ if (dev_op_gqi_qpl)
+ supported_features_mask =
+ be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
@@ -733,7 +781,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
}
priv->dev->max_mtu = mtu;
priv->num_event_counters = be16_to_cpu(descriptor->counters);
- ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
+ eth_hw_addr_set(priv->dev, descriptor->mac);
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
@@ -746,6 +794,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
+ gve_enable_supported_features(priv, supported_features_mask,
+ dev_op_jumbo_frames);
+
free_device_descriptor:
dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 47c3d8f313fc..83c0b40cd2d9 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -108,6 +108,14 @@ struct gve_device_option_dqo_rda {
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+struct gve_device_option_jumbo_frames {
+ __be32 supported_features_mask;
+ __be16 max_mtu;
+ u8 padding[2];
+};
+
+static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -121,6 +129,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
};
enum gve_dev_opt_req_feat_mask {
@@ -128,6 +137,11 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+};
+
+enum gve_sup_feature_mask {
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -270,6 +284,7 @@ enum gve_stat_names {
TX_LAST_COMPLETION_PROCESSED = 5,
RX_NEXT_EXPECTED_SEQUENCE = 6,
RX_BUFFERS_POSTED = 7,
+ TX_TIMEOUT_CNT = 8,
// stats from NIC
RX_QUEUE_DROP_CNT = 65,
RX_NO_BUFFERS_POSTED = 66,
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index 05ae6300e984..4d225a18d8ce 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -90,12 +90,13 @@ union gve_rx_data_slot {
/* GVE Recive Packet Descriptor Flags */
#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
-#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
-#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
-#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
-#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
-#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
-#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
+#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
+#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
+#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
+#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
+#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
+#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
+#define GVE_RXF_PKT_CONT GVE_RXFLG(10) /* Multi Fragment RX packet */
/* GVE IRQ */
#define GVE_IRQ_ACK BIT(31)
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 716e6240305d..c8df47a97fa4 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
+ data[i++] = rx->rx_cont_packet_cnt;
+ data[i++] = rx->rx_frag_flip_cnt;
+ data[i++] = rx->rx_frag_copy_cnt;
/* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail +
@@ -330,8 +334,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
- data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
- tx));
+ data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
if (skip_nic_stats) {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index bf8a4a7c43f7..6b02ef432eda 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -24,6 +24,9 @@
#define GVE_VERSION "1.0.0"
#define GVE_VERSION_PREFIX "GVE-"
+// Minimum amount of time between queue kicks in msec (10 seconds)
+#define MIN_TX_TIMEOUT_GAP (1000 * 10)
+
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
@@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
__be32 __iomem *irq_doorbell;
bool reschedule = false;
struct gve_priv *priv;
+ int work_done = 0;
block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv;
if (block->tx)
reschedule |= gve_tx_poll(block, budget);
- if (block->rx)
- reschedule |= gve_rx_poll(block, budget);
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+ }
if (reschedule)
return budget;
- napi_complete(napi);
- irq_doorbell = gve_irq_doorbell(priv, block);
- iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
+ /* Complete processing - don't unmask irq if busy polling is enabled */
+ if (likely(napi_complete_done(napi, work_done))) {
+ irq_doorbell = gve_irq_doorbell(priv, block);
+ iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
- /* Double check we have no extra work.
- * Ensure unmask synchronizes with checking for work.
- */
- mb();
- if (block->tx)
- reschedule |= gve_tx_poll(block, -1);
- if (block->rx)
- reschedule |= gve_rx_poll(block, -1);
- if (reschedule && napi_reschedule(napi))
- iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ /* Ensure IRQ ACK is visible before we check pending work.
+ * If queue had issued updates, it would be truly visible.
+ */
+ mb();
- return 0;
+ if (block->tx)
+ reschedule |= gve_tx_clean_pending(priv, block->tx);
+ if (block->rx)
+ reschedule |= gve_rx_work_pending(block->rx);
+
+ if (reschedule && napi_reschedule(napi))
+ iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ }
+ return work_done;
}
static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
@@ -279,7 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int i, j;
int err;
- priv->msix_vectors = kvzalloc(num_vecs_requested *
+ priv->msix_vectors = kvcalloc(num_vecs_requested,
sizeof(*priv->msix_vectors), GFP_KERNEL);
if (!priv->msix_vectors)
return -ENOMEM;
@@ -640,7 +649,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
int err;
/* Setup tx rings */
- priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
+ priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
@@ -653,7 +662,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
goto free_tx;
/* Setup rx rings */
- priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
+ priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
@@ -776,12 +785,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
qpl->id = id;
qpl->num_entries = 0;
- qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
+ qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
return -ENOMEM;
- qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
- GFP_KERNEL);
+ qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->page_buses)
return -ENOMEM;
@@ -840,7 +848,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_RDA_FORMAT)
return 0;
- priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
+ priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
@@ -859,7 +867,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
+ priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
@@ -1116,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv)
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- struct gve_priv *priv = netdev_priv(dev);
+ struct gve_notify_block *block;
+ struct gve_tx_ring *tx = NULL;
+ struct gve_priv *priv;
+ u32 last_nic_done;
+ u32 current_time;
+ u32 ntfy_idx;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+ if (txqueue > priv->tx_cfg.num_queues)
+ goto reset;
+
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
+ if (ntfy_idx > priv->num_ntfy_blks)
+ goto reset;
+
+ block = &priv->ntfy_blocks[ntfy_idx];
+ tx = block->tx;
+
+ current_time = jiffies_to_msecs(jiffies);
+ if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ goto reset;
+
+ /* Check to see if there are missed completions, which will allow us to
+ * kick the queue.
+ */
+ last_nic_done = gve_tx_load_event_counter(priv, tx);
+ if (last_nic_done - tx->done) {
+ netdev_info(dev, "Kicking queue %d", txqueue);
+ iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
+ napi_schedule(&block->napi);
+ tx->last_kick_msec = current_time;
+ goto out;
+ } // Else reset.
+
+reset:
gve_schedule_reset(priv);
+
+out:
+ if (tx)
+ tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
@@ -1247,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv)
.value = cpu_to_be64(last_completion),
.queue_id = cpu_to_be32(idx),
};
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
+ .value = cpu_to_be64(priv->tx[idx].queue_timeout),
+ .queue_id = cpu_to_be32(idx),
+ };
}
}
/* rx stats */
@@ -1320,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
"Could not get device information: err=%d\n", err);
goto err;
}
- if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
- priv->dev->max_mtu = PAGE_SIZE;
- err = gve_adminq_set_mtu(priv, priv->dev->mtu);
- if (err) {
- dev_err(&priv->pdev->dev, "Could not set mtu");
- goto err;
- }
- }
priv->dev->mtu = priv->dev->max_mtu;
num_ntfy = pci_msix_vec_count(priv->pdev);
if (num_ntfy <= 0) {
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 94941d4e4744..c8500babbd1d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev,
dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
GVE_DATA_SLOT_ADDR_PAGE_MASK);
+ page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
{
- if (rx->data.raw_addressing) {
- u32 slots = rx->mask + 1;
- int i;
+ u32 slots = rx->mask + 1;
+ int i;
+ if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
} else {
+ for (i = 0; i < slots; i++)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
}
@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
page_info->page_offset = 0;
page_info->page_address = page_address(page);
*slot_addr = cpu_to_be64(addr);
+ /* The page already has 1 ref */
+ page_ref_add(page, INT_MAX - 1);
+ page_info->pagecnt_bias = INT_MAX;
}
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
@@ -136,6 +143,16 @@ alloc_err:
return err;
}
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+ ctx->curr_frag_cnt = 0;
+ ctx->total_expected_size = 0;
+ ctx->expected_frag_cnt = 0;
+ ctx->skb_head = NULL;
+ ctx->skb_tail = NULL;
+ ctx->reuse_frags = false;
+}
+
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
@@ -202,6 +219,12 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->cnt = 0;
rx->db_threshold = priv->rx_desc_cnt / 2;
rx->desc.seqno = 1;
+
+ /* Allocating half-page buffers allows page-flipping which is faster
+ * than copying or allocating new pages.
+ */
+ rx->packet_buffer_size = PAGE_SIZE / 2;
+ gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
return 0;
@@ -268,18 +291,28 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
+static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx)
+{
+ return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0;
+}
+
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
- u16 len)
+ u16 packet_buffer_size, u16 len,
+ struct gve_rx_ctx *ctx)
{
- struct sk_buff *skb = napi_get_frags(napi);
+ u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx);
+ struct sk_buff *skb;
- if (unlikely(!skb))
+ if (!ctx->skb_head)
+ ctx->skb_head = napi_get_frags(napi);
+
+ if (unlikely(!ctx->skb_head))
return NULL;
- skb_add_rx_frag(skb, 0, page_info->page,
- page_info->page_offset +
- GVE_RX_PAD, len, PAGE_SIZE / 2);
+ skb = ctx->skb_head;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ offset, len, packet_buffer_size);
return skb;
}
@@ -293,23 +326,18 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
*(slot_addr) ^= offset;
}
-static bool gve_rx_can_flip_buffers(struct net_device *netdev)
-{
- return PAGE_SIZE == 4096
- ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
-}
-
-static int gve_rx_can_recycle_buffer(struct page *page)
+static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{
- int pagecount = page_count(page);
+ int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */
- if (pagecount == 1)
+ if (pagecount == page_info->pagecnt_bias)
return 1;
/* This page is still being used by an SKB - we can't reuse */
- else if (pagecount >= 2)
+ else if (pagecount > page_info->pagecnt_bias)
return 0;
- WARN(pagecount < 1, "Pagecount should never be < 1");
+ WARN(pagecount < page_info->pagecnt_bias,
+ "Pagecount should never be less than the bias.");
return -1;
}
@@ -317,19 +345,19 @@ static struct sk_buff *
gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
struct gve_rx_slot_page_info *page_info, u16 len,
struct napi_struct *napi,
- union gve_rx_data_slot *data_slot)
+ union gve_rx_data_slot *data_slot,
+ u16 packet_buffer_size, struct gve_rx_ctx *ctx)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx);
- skb = gve_rx_add_frags(napi, page_info, len);
if (!skb)
return NULL;
- /* Optimistically stop the kernel from freeing the page by increasing
- * the page bias. We will check the refcount in refill to determine if
- * we need to alloc a new page.
+ /* Optimistically stop the kernel from freeing the page.
+ * We will check again in refill to determine if we need to alloc a
+ * new page.
*/
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
return skb;
}
@@ -340,6 +368,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
u16 len, struct napi_struct *napi,
union gve_rx_data_slot *data_slot)
{
+ struct gve_rx_ctx *ctx = &rx->ctx;
struct sk_buff *skb;
/* if raw_addressing mode is not enabled gvnic can only receive into
@@ -347,116 +376,259 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* choice is to copy the data out of it so that we can return it to the
* device.
*/
- if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, len);
+ if (ctx->reuse_frags) {
+ skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
- skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
+ const u16 padding = gve_rx_ctx_padding(ctx);
+
+ skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx);
if (skb) {
u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
+ rx->rx_frag_copy_cnt++;
u64_stats_update_end(&rx->statss);
}
}
return skb;
}
-static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
- netdev_features_t feat, u32 idx)
+#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
+static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc)
{
+ return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx);
+}
+
+static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
+{
+ bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false;
+ bool buffer_error = false, desc_error = false, seqno_error = false;
struct gve_rx_slot_page_info *page_info;
struct gve_priv *priv = rx->gve;
- struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- struct net_device *dev = priv->dev;
- union gve_rx_data_slot *data_slot;
- struct sk_buff *skb = NULL;
- dma_addr_t page_bus;
- u16 len;
+ u32 idx = rx->cnt & rx->mask;
+ bool reuse_frags, can_flip;
+ struct gve_rx_desc *desc;
+ u16 packet_size = 0;
+ u16 n_frags = 0;
+ int recycle;
+
+ /** In QPL mode, we only flip buffers when all buffers containing the packet
+ * can be flipped. RDA can_flip decisions will be made later, per frag.
+ */
+ can_flip = qpl_mode;
+ reuse_frags = can_flip;
+ do {
+ u16 frag_size;
+
+ n_frags++;
+ desc = &rx->desc.desc_ring[idx];
+ desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error;
+ if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) {
+ seqno_error = true;
+ netdev_warn(priv->dev,
+ "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.",
+ rx->desc.seqno, GVE_SEQNO(desc->flags_seq));
+ }
+ frag_size = be16_to_cpu(desc->len);
+ packet_size += frag_size;
+ if (frag_size > rx->packet_buffer_size) {
+ packet_size_error = true;
+ netdev_warn(priv->dev,
+ "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
+ rx->packet_buffer_size, be16_to_cpu(desc->len));
+ }
+ page_info = &rx->data.page_info[idx];
+ if (can_flip) {
+ recycle = gve_rx_can_recycle_buffer(page_info);
+ reuse_frags = reuse_frags && recycle > 0;
+ buffer_error = buffer_error || unlikely(recycle < 0);
+ }
+ idx = (idx + 1) & rx->mask;
+ rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
+ } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq));
+
+ prefetch(rx->desc.desc_ring + idx);
+
+ ctx->curr_frag_cnt = 0;
+ ctx->total_expected_size = packet_size - GVE_RX_PAD;
+ ctx->expected_frag_cnt = n_frags;
+ ctx->skb_head = NULL;
+ ctx->reuse_frags = reuse_frags;
- /* drop this packet */
- if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
+ if (ctx->expected_frag_cnt > 1) {
u64_stats_update_begin(&rx->statss);
- rx->rx_desc_err_dropped_pkt++;
+ rx->rx_cont_packet_cnt++;
u64_stats_update_end(&rx->statss);
+ }
+ if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ if (unlikely(buffer_error || seqno_error || packet_size_error)) {
+ gve_schedule_reset(priv);
return false;
}
- len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
- page_info = &rx->data.page_info[idx];
+ if (unlikely(desc_error)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
+ return false;
+ }
+ return true;
+}
- data_slot = &rx->data.data_ring[idx];
- page_bus = (rx->data.raw_addressing) ?
- be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK :
- rx->data.qpl->page_buses[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
- PAGE_SIZE, DMA_FROM_DEVICE);
+static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
+ u16 len, union gve_rx_data_slot *data_slot)
+{
+ struct net_device *netdev = priv->dev;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ struct sk_buff *skb = NULL;
- if (len <= priv->rx_copybreak) {
+ if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) {
/* Just copy small packets */
- skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD);
- u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
- rx->rx_copybreak_pkt++;
- u64_stats_update_end(&rx->statss);
+ skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx);
+ if (skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ rx->rx_frag_copy_cnt++;
+ rx->rx_copybreak_pkt++;
+ } u64_stats_update_end(&rx->statss);
} else {
- u8 can_flip = gve_rx_can_flip_buffers(dev);
- int recycle = 0;
+ if (rx->data.raw_addressing) {
+ int recycle = gve_rx_can_recycle_buffer(page_info);
- if (can_flip) {
- recycle = gve_rx_can_recycle_buffer(page_info->page);
- if (recycle < 0) {
- if (!rx->data.raw_addressing)
- gve_schedule_reset(priv);
- return false;
+ if (unlikely(recycle < 0)) {
+ gve_schedule_reset(priv);
+ return NULL;
}
- }
-
- page_info->can_flip = can_flip && recycle;
- if (rx->data.raw_addressing) {
- skb = gve_rx_raw_addressing(&priv->pdev->dev, dev,
+ page_info->can_flip = recycle;
+ if (page_info->can_flip) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_flip_cnt++;
+ u64_stats_update_end(&rx->statss);
+ }
+ skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
- data_slot);
+ data_slot,
+ rx->packet_buffer_size, ctx);
} else {
- skb = gve_rx_qpl(&priv->pdev->dev, dev, rx,
+ if (ctx->reuse_frags) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_flip_cnt++;
+ u64_stats_update_end(&rx->statss);
+ }
+ skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
}
}
+ return skb;
+}
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- return false;
+static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
+ u64 *packet_size_bytes, u32 *work_done)
+{
+ struct gve_rx_slot_page_info *page_info;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ union gve_rx_data_slot *data_slot;
+ struct gve_priv *priv = rx->gve;
+ struct gve_rx_desc *first_desc;
+ struct sk_buff *skb = NULL;
+ struct gve_rx_desc *desc;
+ struct napi_struct *napi;
+ dma_addr_t page_bus;
+ u32 work_cnt = 0;
+ void *va;
+ u32 idx;
+ u16 len;
+
+ idx = rx->cnt & rx->mask;
+ first_desc = &rx->desc.desc_ring[idx];
+ desc = first_desc;
+ napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+
+ if (unlikely(!gve_rx_ctx_init(ctx, rx)))
+ goto skb_alloc_fail;
+
+ while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
+ /* Prefetch two packet buffers ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + page_info->page_offset;
+
+ prefetch(page_info->page); /* Kernel page struct. */
+ prefetch(va); /* Packet header. */
+ prefetch(va + 64); /* Next cacheline too. */
+
+ len = gve_rx_get_fragment_size(ctx, desc);
+
+ page_info = &rx->data.page_info[idx];
+ data_slot = &rx->data.data_ring[idx];
+ page_bus = rx->data.raw_addressing ?
+ be64_to_cpu(data_slot->addr) - page_info->page_offset :
+ rx->data.qpl->page_buses[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+ goto skb_alloc_fail;
+ }
+
+ ctx->curr_frag_cnt++;
+ rx->cnt++;
+ idx = rx->cnt & rx->mask;
+ work_cnt++;
+ desc = &rx->desc.desc_ring[idx];
}
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */
- if (rx_desc->csum)
+ if (first_desc->csum)
skb->ip_summed = CHECKSUM_COMPLETE;
else
skb->ip_summed = CHECKSUM_NONE;
- skb->csum = csum_unfold(rx_desc->csum);
+ skb->csum = csum_unfold(first_desc->csum);
}
/* parse flags & pass relevant info up */
if (likely(feat & NETIF_F_RXHASH) &&
- gve_needs_rss(rx_desc->flags_seq))
- skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash),
- gve_rss_type(rx_desc->flags_seq));
+ gve_needs_rss(first_desc->flags_seq))
+ skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash),
+ gve_rss_type(first_desc->flags_seq));
+ *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
+ *work_done = work_cnt;
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
napi_gro_receive(napi, skb);
+
+ gve_rx_ctx_clear(ctx);
return true;
+
+skb_alloc_fail:
+ if (napi->skb)
+ napi_free_frags(napi);
+ *packet_size_bytes = 0;
+ *work_done = ctx->expected_frag_cnt;
+ while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
+ rx->cnt++;
+ ctx->curr_frag_cnt++;
+ }
+ gve_rx_ctx_clear(ctx);
+ return false;
}
-static bool gve_rx_work_pending(struct gve_rx_ring *rx)
+bool gve_rx_work_pending(struct gve_rx_ring *rx)
{
struct gve_rx_desc *desc;
__be16 flags_seq;
@@ -499,7 +671,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
* owns half the page it is impossible to tell which half. Either
* the whole page is free or it needs to be replaced.
*/
- int recycle = gve_rx_can_recycle_buffer(page_info->page);
+ int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) {
if (!rx->data.raw_addressing)
@@ -511,11 +683,15 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
union gve_rx_data_slot *data_slot =
&rx->data.data_ring[idx];
struct device *dev = &priv->pdev->dev;
-
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
- if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
+ if (gve_rx_alloc_buffer(priv, dev, page_info,
+ data_slot)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
break;
+ }
}
}
fill_cnt++;
@@ -524,19 +700,20 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
return true;
}
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat)
+static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
+ netdev_features_t feat)
{
+ u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0;
struct gve_priv *priv = rx->gve;
- u32 work_done = 0, packets = 0;
+ u32 idx = rx->cnt & rx->mask;
struct gve_rx_desc *desc;
- u32 cnt = rx->cnt;
- u32 idx = cnt & rx->mask;
u64 bytes = 0;
- desc = rx->desc.desc_ring + idx;
+ desc = &rx->desc.desc_ring[idx];
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
work_done < budget) {
+ u64 packet_size_bytes = 0;
+ u32 work_cnt = 0;
bool dropped;
netif_info(priv, rx_status, priv->dev,
@@ -546,56 +723,57 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
"[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
- dropped = !gve_rx(rx, desc, feat, idx);
+
+ dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt);
if (!dropped) {
- bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
- packets++;
+ bytes += packet_size_bytes;
+ ok_packet_cnt++;
}
- cnt++;
- idx = cnt & rx->mask;
- desc = rx->desc.desc_ring + idx;
- rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
- work_done++;
+ total_packet_cnt++;
+ idx = rx->cnt & rx->mask;
+ desc = &rx->desc.desc_ring[idx];
+ work_done += work_cnt;
}
- if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
- return false;
+ if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
+ return 0;
- u64_stats_update_begin(&rx->statss);
- rx->rpackets += packets;
- rx->rbytes += bytes;
- u64_stats_update_end(&rx->statss);
- rx->cnt = cnt;
+ if (work_done) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rpackets += ok_packet_cnt;
+ rx->rbytes += bytes;
+ u64_stats_update_end(&rx->statss);
+ }
/* restock ring slots */
if (!rx->data.raw_addressing) {
/* In QPL mode buffs are refilled as the desc are processed */
rx->fill_cnt += work_done;
- } else if (rx->fill_cnt - cnt <= rx->db_threshold) {
+ } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
/* In raw addressing mode buffs are only refilled if the avail
* falls below a threshold.
*/
if (!gve_rx_refill_buffers(priv, rx))
- return false;
+ return 0;
/* If we were not able to completely refill buffers, we'll want
* to schedule this queue for work again to refill buffers.
*/
- if (rx->fill_cnt - cnt <= rx->db_threshold) {
+ if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
gve_rx_write_doorbell(priv, rx);
- return true;
+ return budget;
}
}
gve_rx_write_doorbell(priv, rx);
- return gve_rx_work_pending(rx);
+ return total_packet_cnt;
}
-bool gve_rx_poll(struct gve_notify_block *block, int budget)
+int gve_rx_poll(struct gve_notify_block *block, int budget)
{
struct gve_rx_ring *rx = block->rx;
netdev_features_t feat;
- bool repoll = false;
+ int work_done = 0;
feat = block->napi.dev->features;
@@ -604,8 +782,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget)
budget = INT_MAX;
if (budget > 0)
- repoll |= gve_clean_rx_done(rx, budget, feat);
- else
- repoll |= gve_rx_work_pending(rx);
- return repoll;
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+ return work_done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8500621b2cd4..beb8bb079023 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -240,8 +240,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1;
- rx->skb_head = NULL;
- rx->skb_tail = NULL;
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -467,12 +467,12 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
static void gve_rx_free_skb(struct gve_rx_ring *rx)
{
- if (!rx->skb_head)
+ if (!rx->ctx.skb_head)
return;
- dev_kfree_skb_any(rx->skb_head);
- rx->skb_head = NULL;
- rx->skb_tail = NULL;
+ dev_kfree_skb_any(rx->ctx.skb_head);
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
}
/* Chains multi skbs for single rx packet.
@@ -483,7 +483,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
u16 buf_len, struct gve_rx_ring *rx,
struct gve_priv *priv)
{
- int num_frags = skb_shinfo(rx->skb_tail)->nr_frags;
+ int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
if (unlikely(num_frags == MAX_SKB_FRAGS)) {
struct sk_buff *skb;
@@ -492,17 +492,17 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
- skb_shinfo(rx->skb_tail)->frag_list = skb;
- rx->skb_tail = skb;
+ skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
+ rx->ctx.skb_tail = skb;
num_frags = 0;
}
- if (rx->skb_tail != rx->skb_head) {
- rx->skb_head->len += buf_len;
- rx->skb_head->data_len += buf_len;
- rx->skb_head->truesize += priv->data_buffer_size_dqo;
+ if (rx->ctx.skb_tail != rx->ctx.skb_head) {
+ rx->ctx.skb_head->len += buf_len;
+ rx->ctx.skb_head->data_len += buf_len;
+ rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
}
- skb_add_rx_frag(rx->skb_tail, num_frags,
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
buf_state->page_info.page,
buf_state->page_info.page_offset,
buf_len, priv->data_buffer_size_dqo);
@@ -556,7 +556,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
- if (rx->skb_head) {
+ if (rx->ctx.skb_head) {
if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
priv)) != 0) {
goto error;
@@ -567,11 +567,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (eop && buf_len <= priv->rx_copybreak) {
- rx->skb_head = gve_rx_copy(priv->dev, napi,
- &buf_state->page_info, buf_len, 0);
- if (unlikely(!rx->skb_head))
+ rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
+ &buf_state->page_info, buf_len, 0, NULL);
+ if (unlikely(!rx->ctx.skb_head))
goto error;
- rx->skb_tail = rx->skb_head;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -583,12 +583,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- rx->skb_head = napi_get_frags(napi);
- if (unlikely(!rx->skb_head))
+ rx->ctx.skb_head = napi_get_frags(napi);
+ if (unlikely(!rx->ctx.skb_head))
goto error;
- rx->skb_tail = rx->skb_head;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
- skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page,
+ skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len,
priv->data_buffer_size_dqo);
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -635,27 +635,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
int err;
- skb_record_rx_queue(rx->skb_head, rx->q_num);
+ skb_record_rx_queue(rx->ctx.skb_head, rx->q_num);
if (feat & NETIF_F_RXHASH)
- gve_rx_skb_hash(rx->skb_head, desc, ptype);
+ gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype);
if (feat & NETIF_F_RXCSUM)
- gve_rx_skb_csum(rx->skb_head, desc, ptype);
+ gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
if (desc->rsc) {
- err = gve_rx_complete_rsc(rx->skb_head, desc, ptype);
+ err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype);
if (err < 0)
return err;
}
- if (skb_headlen(rx->skb_head) == 0)
+ if (skb_headlen(rx->ctx.skb_head) == 0)
napi_gro_frags(napi);
else
- napi_gro_receive(napi, rx->skb_head);
+ napi_gro_receive(napi, rx->ctx.skb_head);
return 0;
}
@@ -717,18 +717,18 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Free running counter of completed descriptors */
rx->cnt++;
- if (!rx->skb_head)
+ if (!rx->ctx.skb_head)
continue;
if (!compl_desc->end_of_packet)
continue;
work_done++;
- pkt_bytes = rx->skb_head->len;
+ pkt_bytes = rx->ctx.skb_head->len;
/* The ethernet header (first ETH_HLEN bytes) is snipped off
* by eth_type_trans.
*/
- if (skb_headlen(rx->skb_head))
+ if (skb_headlen(rx->ctx.skb_head))
pkt_bytes += ETH_HLEN;
/* gve_rx_complete_skb() will consume skb if successful */
@@ -741,8 +741,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
}
bytes += pkt_bytes;
- rx->skb_head = NULL;
- rx->skb_tail = NULL;
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
}
gve_rx_post_buffers_dqo(rx);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 665ac795a1ad..a9cb241fedf4 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- gve_clean_tx_done(priv, tx, tx->req, false);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq);
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
+ spin_lock_init(&tx->clean_lock);
tx->q_num = idx;
tx->mask = slots - 1;
@@ -303,15 +304,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
- dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_single(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
} else {
- dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_page(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
}
}
@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
}
+static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
+
/* Stops the queue if the skb cannot be transmitted. */
-static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
+static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct sk_buff *skb)
{
int bytes_required = 0;
+ u32 nic_done;
+ u32 to_do;
+ int ret;
if (!tx->raw_addressing)
bytes_required = gve_skb_fifo_bytes_required(tx, skb);
@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
if (likely(gve_can_tx(tx, bytes_required)))
return 0;
- /* No space, so stop the queue */
- tx->stop_queue++;
- netif_tx_stop_queue(tx->netdev_txq);
- smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */
-
- /* Now check for resources again, in case gve_clean_tx_done() freed
- * resources after we checked and we stopped the queue after
- * gve_clean_tx_done() checked.
- *
- * gve_maybe_stop_tx() gve_clean_tx_done()
- * nsegs/can_alloc test failed
- * gve_tx_free_fifo()
- * if (tx queue stopped)
- * netif_tx_queue_wake()
- * netif_tx_stop_queue()
- * Need to check again for space here!
- */
- if (likely(!gve_can_tx(tx, bytes_required)))
- return -EBUSY;
+ ret = -EBUSY;
+ spin_lock(&tx->clean_lock);
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = nic_done - tx->done;
- netif_tx_start_queue(tx->netdev_txq);
- tx->wake_queue++;
- return 0;
+ /* Only try to clean if there is hope for TX */
+ if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
+ if (to_do > 0) {
+ to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
+ gve_clean_tx_done(priv, tx, to_do, false);
+ }
+ if (likely(gve_can_tx(tx, bytes_required)))
+ ret = 0;
+ }
+ if (ret) {
+ /* No space, so stop the queue */
+ tx->stop_queue++;
+ netif_tx_stop_queue(tx->netdev_txq);
+ }
+ spin_unlock(&tx->clean_lock);
+
+ return ret;
}
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
@@ -491,7 +497,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_buffer_state *info;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
- struct gve_tx_dma_buf *buf;
u64 addr;
u32 len;
int i;
@@ -515,9 +520,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto drop;
}
- buf = &info->buf;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(info, len, len);
+ dma_unmap_addr_set(info, dma, addr);
payload_nfrags = shinfo->nr_frags;
if (hlen < len) {
@@ -549,10 +553,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto unmap_drop;
}
- buf = &tx->info[idx].buf;
tx->info[idx].skb = NULL;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
@@ -579,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
- if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+ if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
/* We need to ring the txq doorbell -- we have stopped the Tx
* queue for want of resources, but prior calls to gve_tx()
* may have added descriptors without ringing the doorbell.
@@ -675,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
return pkts;
}
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx)
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
{
- u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
+ u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
+ __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
- return READ_ONCE(priv->counter_array[counter_index]);
+ return be32_to_cpu(counter);
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
- bool repoll = false;
u32 nic_done;
u32 to_do;
@@ -695,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget)
if (budget == 0)
budget = INT_MAX;
+ /* In TX path, it may try to clean completed pkts in order to xmit,
+ * to avoid cleaning conflict, use spin_lock(), it yields better
+ * concurrency between xmit/clean than netif's lock.
+ */
+ spin_lock(&tx->clean_lock);
/* Find out how much work there is to be done */
- tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
- nic_done = be32_to_cpu(tx->last_nic_done);
- if (budget > 0) {
- /* Do as much work as we have that the budget will
- * allow
- */
- to_do = min_t(u32, (nic_done - tx->done), budget);
- gve_clean_tx_done(priv, tx, to_do, true);
- }
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_tx_done(priv, tx, to_do, true);
+ spin_unlock(&tx->clean_lock);
/* If we still have work we want to repoll */
- repoll |= (nic_done != tx->done);
- return repoll;
+ return nic_done != tx->done;
+}
+
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
+{
+ u32 nic_done = gve_tx_load_event_counter(priv, tx);
+
+ return nic_done != tx->done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 05ddb6a75c38..ec394d991668 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
int j;
for (j = 0; j < cur_state->num_bufs; j++) {
- struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
-
if (j == 0) {
dma_unmap_single(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
} else {
dma_unmap_page(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
}
}
if (cur_state->skb) {
@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
const bool is_gso = skb_is_gso(skb);
u32 desc_idx = tx->dqo_tx.tail;
- struct gve_tx_pending_packet_dqo *pending_packet;
+ struct gve_tx_pending_packet_dqo *pkt;
struct gve_tx_metadata_dqo metadata;
s16 completion_tag;
int i;
- pending_packet = gve_alloc_pending_packet(tx);
- pending_packet->skb = skb;
- pending_packet->num_bufs = 0;
- completion_tag = pending_packet - tx->dqo.pending_packets;
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->skb = skb;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
if (is_gso) {
@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
/* Map the linear portion of skb */
{
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
u32 len = skb_headlen(skb);
dma_addr_t addr;
@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag,
@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
}
for (i = 0; i < shinfo->nr_frags; i++) {
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
const skb_frag_t *frag = &shinfo->frags[i];
bool is_eop = i == (shinfo->nr_frags - 1);
u32 len = skb_frag_size(frag);
@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag, is_eop, is_gso);
@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
return 0;
err:
- for (i = 0; i < pending_packet->num_bufs; i++) {
- struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
-
+ for (i = 0; i < pkt->num_bufs; i++) {
if (i == 0) {
- dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
+ dma_unmap_single(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
DMA_TO_DEVICE);
} else {
- dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_page(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE);
}
}
- pending_packet->skb = NULL;
- pending_packet->num_bufs = 0;
- gve_free_pending_packet(tx, pending_packet);
+ pkt->skb = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
return -1;
}
@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
static void remove_from_list(struct gve_tx_ring *tx,
struct gve_index_list *list,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
s16 prev_index, next_index;
- prev_index = pending_packet->prev;
- next_index = pending_packet->next;
+ prev_index = pkt->prev;
+ next_index = pkt->next;
if (prev_index == -1) {
/* Node is head */
@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
- struct gve_tx_dma_buf *buf;
int i;
/* SKB linear portion is guaranteed to be mapped */
- buf = &pending_packet->bufs[0];
- dma_unmap_single(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
- for (i = 1; i < pending_packet->num_bufs; i++) {
- buf = &pending_packet->bufs[i];
- dma_unmap_page(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
}
- pending_packet->num_bufs = 0;
+ pkt->num_bufs = 0;
}
/* Completion types and expected behavior:
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 93f3dcbeeea9..88ca49cbc1e2 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
{
+ unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
+ num_online_cpus());
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
struct gve_tx_ring *tx = &priv->tx[queue_idx];
block->tx = tx;
tx->ntfy_id = ntfy_idx;
+ netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
+ queue_idx);
}
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
@@ -46,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad)
+ u16 padding, struct gve_rx_ctx *ctx)
{
- struct sk_buff *skb = napi_alloc_skb(napi, len);
- void *va = page_info->page_address + pad +
- page_info->page_offset;
-
- if (unlikely(!skb))
- return NULL;
-
+ void *va = page_info->page_address + padding + page_info->page_offset;
+ int skb_linear_offset = 0;
+ bool set_protocol = false;
+ struct sk_buff *skb;
+
+ if (ctx) {
+ if (!ctx->skb_head)
+ ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
+
+ if (unlikely(!ctx->skb_head))
+ return NULL;
+ skb = ctx->skb_head;
+ skb_linear_offset = skb->len;
+ set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
+ } else {
+ skb = napi_alloc_skb(napi, len);
+ set_protocol = true;
+ }
__skb_put(skb, len);
+ skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
- skb_copy_to_linear_data(skb, va, len);
-
- skb->protocol = eth_type_trans(skb, dev);
+ if (set_protocol)
+ skb->protocol = eth_type_trans(skb, dev);
return skb;
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 79595940b351..6d98e69fd3b8 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad);
+ u16 pad, struct gve_rx_ctx *ctx);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 37b605fed32c..c84ef494bd60 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -998,7 +998,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 22bf914f2dbd..a6c18b6527f9 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -427,7 +427,7 @@ static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
}
static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
- unsigned char *mac)
+ const unsigned char *mac)
{
u32 reg;
@@ -555,7 +555,7 @@ static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(skaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, skaddr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
@@ -841,7 +841,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
(unsigned long)phy->phy_id,
phy_modes(phy->interface));
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
dev_warn(dev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c1aae0fca5e9..d7e62eca050f 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -429,7 +429,7 @@ static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
+ const unsigned char *mac = dev->dev_addr;
u32 val;
val = mac[1] | (mac[0] << 8);
@@ -1219,7 +1219,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_phy_node;
}
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
netdev_warn(ndev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 2b7db1c22321..d72657444ef3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -499,7 +499,7 @@ struct hnae_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
- int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*set_mac_addr)(struct hnae_handle *handle, const void *p);
int (*add_uc_addr)(struct hnae_handle *handle,
const unsigned char *addr);
int (*rm_uc_addr)(struct hnae_handle *handle,
@@ -558,7 +558,7 @@ struct hnae_handle {
enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
- struct hnae_queue **qs; /* array base of all queues */
+ struct hnae_queue *qs[]; /* flexible array of all queues */
};
#define ring_to_dev(ring) ((ring)->q->dev->dev)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 75e4ec569da8..bc3e406f0139 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -81,8 +81,8 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
- vf_cb = kzalloc(sizeof(*vf_cb) +
- qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf),
+ GFP_KERNEL);
if (unlikely(!vf_cb)) {
dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
ae_handle = ERR_PTR(-ENOMEM);
@@ -108,7 +108,6 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
goto vf_id_err;
}
- ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
for (i = 0; i < qnum_per_vf; i++) {
ae_handle->qs[i] = &ring_pair_cb->q;
ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
@@ -207,7 +206,7 @@ static void hns_ae_fini_queue(struct hnae_queue *q)
hns_rcb_reset_ring_hw(q);
}
-static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p)
{
int ret;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index f387a859a201..8f391e2adcc0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -450,7 +450,7 @@ static void hns_gmac_update_stats(void *mac_drv)
+= dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
}
-static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f41379de2186..7edf8569514c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -240,7 +240,7 @@ int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
*@addr:mac address
*/
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
- u32 vmid, char *addr)
+ u32 vmid, const char *addr)
{
int ret;
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 8943ffab4418..e3bb05959ba9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -348,7 +348,7 @@ struct mac_driver {
/*disable mac when disable nic or dsaf*/
void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
/* config mac address*/
- void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ void (*set_mac_addr)(void *mac_drv, const char *mac_addr);
/*adjust mac mode of port,include speed and duplex*/
int (*adjust_link)(void *mac_drv, enum mac_speed speed,
u32 full_duplex);
@@ -425,7 +425,8 @@ int hns_mac_init(struct dsaf_device *dsaf_dev);
void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
-int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
+ const char *addr);
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
u32 port_num, char *addr, bool enable);
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index cba04bfa0b3f..5526a10caac5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -210,7 +210,7 @@ struct hnae_vf_cb {
u8 port_index;
struct hns_mac_cb *mac_cb;
struct dsaf_device *dsaf_dev;
- struct hnae_handle ae_handle; /* must be the last number */
+ struct hnae_handle ae_handle; /* must be the last member */
};
struct dsaf_int_xge_src {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 401fef5f1d07..fc26ffaae620 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -255,7 +255,7 @@ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
}
-static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 343c605c4be8..22a463e15678 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1194,7 +1194,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return ret;
}
- memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, mac_addr->sa_data);
return 0;
}
@@ -1212,7 +1212,7 @@ static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
+ if (device_get_ethdev_address(priv->dev, ndev)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index eef1b2764d34..67b0bf310daa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *pci_id;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!ae_algo)
+ return;
+
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!pci_id)
+ continue;
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ pci_disable_sriov(ae_dev->pdev);
+ }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
/* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen
* in parallel, can wait.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8ba21d6dc220..3f7a9a4c59d5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -151,6 +152,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -294,6 +298,8 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_MAC_TNL_STATUS,
HNAE3_DBG_CMD_SERV_INFO,
HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ HNAE3_DBG_CMD_COAL_INFO,
HNAE3_DBG_CMD_UNKNOWN,
};
@@ -341,6 +347,9 @@ struct hnae3_dev_specs {
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
u16 max_frm_size;
u16 max_qset_num;
+ u16 umv_size;
+ u16 mc_mac_size;
+ u32 mac_stats_num;
};
struct hnae3_client_ops {
@@ -588,7 +597,7 @@ struct hnae3_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
- int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
+ int (*set_mac_addr)(struct hnae3_handle *handle, const void *p,
bool is_first);
int (*do_ioctl)(struct hnae3_handle *handle,
struct ifreq *ifr, int cmd);
@@ -853,6 +862,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 2b66c59f5eaf..67364ab63a1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@@ -336,6 +336,20 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "coalesce_info",
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@ -384,6 +398,26 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
+static const struct hns3_dbg_item coal_info_items[] = {
+ { "VEC_ID", 2 },
+ { "ALGO_STATE", 2 },
+ { "PROFILE_ID", 2 },
+ { "CQE_MODE", 2 },
+ { "TUNE_STATE", 2 },
+ { "STEPS_LEFT", 2 },
+ { "STEPS_RIGHT", 2 },
+ { "TIRED", 2 },
+ { "SW_GL", 2 },
+ { "SW_QL", 2 },
+ { "HW_GL", 2 },
+ { "HW_QL", 2 },
+};
+
+static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
+static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
+static const char * const
+dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
+
static void hns3_dbg_fill_content(char *content, u16 len,
const struct hns3_dbg_item *items,
const char **result, u16 size)
@@ -405,6 +439,94 @@ static void hns3_dbg_fill_content(char *content, u16 len,
*pos++ = '\0';
}
+static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ char **result, int i, bool is_tx)
+{
+ unsigned int gl_offset, ql_offset;
+ struct hns3_enet_coalesce *coal;
+ unsigned int reg_val;
+ unsigned int j = 0;
+ struct dim *dim;
+ bool ql_enable;
+
+ if (is_tx) {
+ coal = &tqp_vector->tx_group.coal;
+ dim = &tqp_vector->tx_group.dim;
+ gl_offset = HNS3_VECTOR_GL1_OFFSET;
+ ql_offset = HNS3_VECTOR_TX_QL_OFFSET;
+ ql_enable = tqp_vector->tx_group.coal.ql_enable;
+ } else {
+ coal = &tqp_vector->rx_group.coal;
+ dim = &tqp_vector->rx_group.dim;
+ gl_offset = HNS3_VECTOR_GL0_OFFSET;
+ ql_offset = HNS3_VECTOR_RX_QL_OFFSET;
+ ql_enable = tqp_vector->rx_group.coal.ql_enable;
+ }
+
+ sprintf(result[j++], "%d", i);
+ sprintf(result[j++], "%s", dim_state_str[dim->state]);
+ sprintf(result[j++], "%u", dim->profile_ix);
+ sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
+ sprintf(result[j++], "%s",
+ dim_tune_stat_str[dim->tune_state]);
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+ sprintf(result[j++], "%u", coal->int_gl);
+ sprintf(result[j++], "%u", coal->int_ql);
+ reg_val = readl(tqp_vector->mask_addr + gl_offset) &
+ HNS3_VECTOR_GL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ if (ql_enable) {
+ reg_val = readl(tqp_vector->mask_addr + ql_offset) &
+ HNS3_VECTOR_QL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ } else {
+ sprintf(result[j++], "NA");
+ }
+}
+
+static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
+ int *pos, bool is_tx)
+{
+ char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(coal_info_items)];
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%s interrupt coalesce info:\n",
+ is_tx ? "tx" : "rx");
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ NULL, ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_get_coal_info(tqp_vector, result, i, is_tx);
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ (const char **)result,
+ ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
+static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+{
+ int pos = 0;
+
+ hns3_dump_coal_info(h, buf, len, &pos, true);
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ hns3_dump_coal_info(h, buf, len, &pos, false);
+
+ return 0;
+}
+
static const struct hns3_dbg_item tx_spare_info_items[] = {
{ "QUEUE_ID", 2 },
{ "COPYBREAK", 2 },
@@ -462,7 +584,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
{ "TAIL", 2 },
{ "HEAD", 2 },
{ "FBDNUM", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
@@ -565,7 +687,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "OFFSET", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "RING_EN", 2 },
{ "TX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@@ -790,13 +912,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
}
static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 5 },
- { "ADDRESS", 2 },
+ { "BD_IDX", 2 },
+ { "ADDRESS", 13 },
{ "VLAN_TAG", 2 },
{ "SIZE", 2 },
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
- { "TV", 2 },
+ { "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },
@@ -924,6 +1046,12 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
dev_specs->max_tm_rate);
*pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
dev_specs->max_qset_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+ dev_specs->umv_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+ dev_specs->mc_mac_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
+ dev_specs->mac_stats_num);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@ -937,6 +1065,69 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
return 0;
}
+static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+};
+
+static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+{
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+}
+
+static int
+hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
{
u32 i;
@@ -978,6 +1169,14 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dbg_dump = hns3_dbg_tx_queue_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dbg_dump = hns3_dbg_coal_info,
+ },
};
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 468b8f07bf47..a2b993d62822 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb,
- u8 max_non_tso_bd_num,
unsigned int bd_num)
{
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help.
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len >
- HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
@@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
- if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
- bd_num))
+ if (hns3_skb_linearize(ring, skb, bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
@@ -2287,7 +2284,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return ret;
}
- ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
+ eth_hw_addr_set(netdev, mac_addr->sa_data);
return 0;
}
@@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0;
+ ring->desc_cb[i].refill = 0;
}
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
+ ring->desc_cb[i].refill = 1;
return 0;
}
@@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
+ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+ return ring->desc_num;
+
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count)
{
struct hns3_desc_cb *desc_cb;
@@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
- break;
+
+ writel(i, ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ return true;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ return false;
}
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
+ bool failure = false;
int recv_pkts = 0;
int err;
@@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring, unused_count);
- unused_count = hns3_desc_unused(ring) -
- ring->pending_buf;
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+ unused_count = 0;
}
/* Poll one pkt */
@@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
out:
- /* Make all data has been write before submit */
- if (unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, unused_count);
-
- return recv_pkts;
+ return failure ? budget : recv_pkts;
}
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4933,7 +4940,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
} else {
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 6162d9f88e37..1715c98d906d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -186,17 +186,16 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U
-#define HNS3_MAX_TSO_SIZE \
- (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
-
-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
- (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
+#define HNS3_MAX_TSO_SIZE 1048576U
+#define HNS3_MAX_NON_TSO_SIZE 9728U
+#define HNS3_VECTOR_GL_MASK GENMASK(11, 0)
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
#define HNS3_VECTOR_GL2_OFFSET 0x300
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_VECTOR_QL_MASK GENMASK(9, 0)
#define HNS3_VECTOR_TX_QL_OFFSET 0xe00
#define HNS3_VECTOR_RX_QL_OFFSET 0xf00
@@ -332,6 +331,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */
u16 reuse_flag;
+ u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 9c2eeaa82294..c327df9dbac4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -482,6 +482,7 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
if (hnae3_dev_phy_imp_supported(hdev))
hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 33244472e0d0..c38b57fc6c6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1150,6 +1150,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
#define HCLGE_LINK_EVENT_REPORT_EN_B 0
#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
#define HCLGE_PHY_IMP_EN_B 2
+#define HCLGE_MAC_STATS_EXT_EN_B 3
struct hclge_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
@@ -1188,7 +1189,10 @@ struct hclge_dev_specs_1_cmd {
__le16 max_frm_size;
__le16 max_qset_num;
__le16 max_int_gl;
- u8 rsv1[18];
+ u8 rsv0[2];
+ __le16 umv_size;
+ __le16 mc_mac_size;
+ u8 rsv1[12];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 307c9e830510..91cb578f56b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 32f62cd2dd99..4e0a8c2f7c05 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u16 qset_id, qset_num;
int ret;
@@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%04u %#x %#x %#x %#x\n",
- qset_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2, bitmap->bit3);
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
}
return 0;
@@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pri_id, pri_num;
int ret;
@@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pri_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
@@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pg_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 port_id = 0;
int ret;
@@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- bitmap->bit0);
+ req.bit0);
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- bitmap->bit1);
+ req.bit1);
return 0;
}
@@ -1992,6 +1990,9 @@ static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
}
mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index e4aad695abcc..4c441e6a5082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -109,7 +109,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
struct hclge_devlink_priv *priv;
struct devlink *devlink;
- int ret;
devlink = devlink_alloc(&hclge_devlink_ops,
sizeof(struct hclge_devlink_priv), &pdev->dev);
@@ -120,28 +119,15 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- ret = devlink_register(devlink);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
- ret);
- goto out_reg_fail;
- }
-
- devlink_reload_enable(devlink);
-
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
-
-out_reg_fail:
- devlink_free(devlink);
- return ret;
}
void hclge_devlink_uninit(struct hclge_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index bb9b026ae88e..20e628c2bd44 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1243,6 +1243,9 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.module_id = MODULE_MASTER,
.msg = "MODULE_MASTER"
}, {
+ .module_id = MODULE_HIMAC,
+ .msg = "MODULE_HIMAC"
+ }, {
.module_id = MODULE_ROCEE_TOP,
.msg = "MODULE_ROCEE_TOP"
}, {
@@ -1316,12 +1319,21 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
.type_id = GLB_ERROR,
.msg = "glb_error"
}, {
+ .type_id = LINK_ERROR,
+ .msg = "link_error"
+ }, {
+ .type_id = PTP_ERROR,
+ .msg = "ptp_error"
+ }, {
.type_id = ROCEE_NORMAL_ERR,
.msg = "rocee_normal_error"
}, {
.type_id = ROCEE_OVF_ERR,
.msg = "rocee_ovf_error"
- }
+ }, {
+ .type_id = ROCEE_BUS_ERR,
+ .msg = "rocee_bus_error"
+ },
};
static void hclge_log_error(struct device *dev, char *reg,
@@ -1560,8 +1572,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure TM QCN hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
- if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+ if (en) {
+ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 07987fb8332e..86be6fb32990 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -50,6 +50,8 @@
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
@@ -136,6 +138,7 @@ enum hclge_mod_name_list {
MODULE_RCB_TX = 12,
MODULE_TXDMA = 13,
MODULE_MASTER = 14,
+ MODULE_HIMAC = 15,
/* add new MODULE NAME for NIC here in order */
MODULE_ROCEE_TOP = 40,
MODULE_ROCEE_TIMER = 41,
@@ -164,9 +167,12 @@ enum hclge_err_type_list {
ETS_ERROR = 10,
NCSI_ERROR = 11,
GLB_ERROR = 12,
+ LINK_ERROR = 13,
+ PTP_ERROR = 14,
/* add new ERROR TYPE for NIC here in order */
ROCEE_NORMAL_ERR = 40,
ROCEE_OVF_ERR = 41,
+ ROCEE_BUS_ERR = 42,
/* add new ERROR TYPE for ROCEE here in order */
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f5b8d1fee0f1..2e41aa2d1df8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -156,174 +156,210 @@ static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
- {"mac_tx_mac_pause_num",
+ {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
- {"mac_rx_mac_pause_num",
+ {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
- {"mac_tx_control_pkt_num",
+ {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
+ {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
+ {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
- {"mac_rx_control_pkt_num",
+ {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
- {"mac_tx_pfc_pkt_num",
+ {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
- {"mac_tx_pfc_pri0_pkt_num",
+ {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
- {"mac_tx_pfc_pri1_pkt_num",
+ {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
- {"mac_tx_pfc_pri2_pkt_num",
+ {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
- {"mac_tx_pfc_pri3_pkt_num",
+ {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
- {"mac_tx_pfc_pri4_pkt_num",
+ {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
- {"mac_tx_pfc_pri5_pkt_num",
+ {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
- {"mac_tx_pfc_pri6_pkt_num",
+ {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
- {"mac_tx_pfc_pri7_pkt_num",
+ {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
- {"mac_rx_pfc_pkt_num",
+ {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
+ {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
+ {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
+ {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
+ {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
+ {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
+ {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
+ {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
+ {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
- {"mac_rx_pfc_pri0_pkt_num",
+ {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
- {"mac_rx_pfc_pri1_pkt_num",
+ {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
- {"mac_rx_pfc_pri2_pkt_num",
+ {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
- {"mac_rx_pfc_pri3_pkt_num",
+ {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
- {"mac_rx_pfc_pri4_pkt_num",
+ {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
- {"mac_rx_pfc_pri5_pkt_num",
+ {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
- {"mac_rx_pfc_pri6_pkt_num",
+ {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
- {"mac_rx_pfc_pri7_pkt_num",
+ {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
- {"mac_tx_total_pkt_num",
+ {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
+ {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
+ {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
+ {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
+ {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
+ {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
+ {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
+ {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
+ {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
- {"mac_tx_total_oct_num",
+ {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
- {"mac_tx_good_pkt_num",
+ {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
- {"mac_tx_bad_pkt_num",
+ {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
- {"mac_tx_good_oct_num",
+ {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
- {"mac_tx_bad_oct_num",
+ {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
- {"mac_tx_uni_pkt_num",
+ {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
- {"mac_tx_multi_pkt_num",
+ {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
- {"mac_tx_broad_pkt_num",
+ {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
- {"mac_tx_undersize_pkt_num",
+ {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_oversize_pkt_num",
+ {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
- {"mac_tx_64_oct_pkt_num",
+ {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
- {"mac_tx_65_127_oct_pkt_num",
+ {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
- {"mac_tx_128_255_oct_pkt_num",
+ {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
- {"mac_tx_256_511_oct_pkt_num",
+ {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
- {"mac_tx_512_1023_oct_pkt_num",
+ {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
- {"mac_tx_1024_1518_oct_pkt_num",
+ {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_2047_oct_pkt_num",
+ {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
- {"mac_tx_2048_4095_oct_pkt_num",
+ {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
- {"mac_tx_4096_8191_oct_pkt_num",
+ {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_9216_oct_pkt_num",
+ {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
- {"mac_tx_9217_12287_oct_pkt_num",
+ {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
- {"mac_tx_12288_16383_oct_pkt_num",
+ {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
- {"mac_tx_1519_max_good_pkt_num",
+ {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
- {"mac_tx_1519_max_bad_pkt_num",
+ {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
- {"mac_rx_total_pkt_num",
+ {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
- {"mac_rx_total_oct_num",
+ {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
- {"mac_rx_good_pkt_num",
+ {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
- {"mac_rx_bad_pkt_num",
+ {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
- {"mac_rx_good_oct_num",
+ {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
- {"mac_rx_bad_oct_num",
+ {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
- {"mac_rx_uni_pkt_num",
+ {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
- {"mac_rx_multi_pkt_num",
+ {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
- {"mac_rx_broad_pkt_num",
+ {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
- {"mac_rx_undersize_pkt_num",
+ {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_oversize_pkt_num",
+ {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
- {"mac_rx_64_oct_pkt_num",
+ {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
- {"mac_rx_65_127_oct_pkt_num",
+ {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
- {"mac_rx_128_255_oct_pkt_num",
+ {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
- {"mac_rx_256_511_oct_pkt_num",
+ {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
- {"mac_rx_512_1023_oct_pkt_num",
+ {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
- {"mac_rx_1024_1518_oct_pkt_num",
+ {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_2047_oct_pkt_num",
+ {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
- {"mac_rx_2048_4095_oct_pkt_num",
+ {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
- {"mac_rx_4096_8191_oct_pkt_num",
+ {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_9216_oct_pkt_num",
+ {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
- {"mac_rx_9217_12287_oct_pkt_num",
+ {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
- {"mac_rx_12288_16383_oct_pkt_num",
+ {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
- {"mac_rx_1519_max_good_pkt_num",
+ {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
- {"mac_rx_1519_max_bad_pkt_num",
+ {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num",
+ {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
- {"mac_tx_undermin_pkt_num",
+ {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
- {"mac_tx_jabber_pkt_num",
+ {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
- {"mac_tx_err_all_pkt_num",
+ {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
- {"mac_tx_from_app_good_pkt_num",
+ {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
- {"mac_tx_from_app_bad_pkt_num",
+ {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
- {"mac_rx_fragment_pkt_num",
+ {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
- {"mac_rx_undermin_pkt_num",
+ {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
- {"mac_rx_jabber_pkt_num",
+ {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
- {"mac_rx_fcs_err_pkt_num",
+ {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
- {"mac_rx_send_app_good_pkt_num",
+ {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
- {"mac_rx_send_app_bad_pkt_num",
+ {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
@@ -451,8 +487,9 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
- int i, k, n;
+ u32 data_size;
int ret;
+ u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
@@ -463,33 +500,37 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
return ret;
}
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
- /* for special opcode 0032, only the first desc has the head */
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ /* The first desc has a 64-bit header, so data size need to minus 1 */
+ data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
return 0;
}
-static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
{
+#define HCLGE_REG_NUM_PER_DESC 4
+
+ u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
- u16 i, k, n;
+ u32 data_size;
+ u32 desc_num;
int ret;
+ u32 i;
+
+ /* The first desc has a 64-bit header, so need to consider it */
+ desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
* so GFP_ATOMIC is more suitalbe here
@@ -505,21 +546,16 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
return ret;
}
- for (i = 0; i < desc_num; i++) {
- /* for special opcode 0034, only the first desc has the head */
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
kfree(desc);
@@ -527,42 +563,37 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
return 0;
}
-static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
{
struct hclge_desc desc;
- __le32 *desc_data;
- u32 reg_num;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query mac statistic reg number, ret = %d\n",
+ ret);
return ret;
+ }
- desc_data = (__le32 *)(&desc.data[0]);
- reg_num = le32_to_cpu(*desc_data);
-
- *desc_num = 1 + ((reg_num - 3) >> 2) +
- (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+ *reg_num = le32_to_cpu(desc.data[0]);
+ if (*reg_num == 0) {
+ dev_err(&hdev->pdev->dev,
+ "mac statistic reg number is invalid!\n");
+ return -ENODATA;
+ }
return 0;
}
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
- u32 desc_num;
- int ret;
-
- ret = hclge_mac_query_reg_num(hdev, &desc_num);
/* The firmware supports the new statistics acquisition method */
- if (!ret)
- ret = hclge_mac_update_stats_complete(hdev, desc_num);
- else if (ret == -EOPNOTSUPP)
- ret = hclge_mac_update_stats_defective(hdev);
+ if (hdev->ae_dev->dev_specs.mac_stats_num)
+ return hclge_mac_update_stats_complete(hdev);
else
- dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
-
- return ret;
+ return hclge_mac_update_stats_defective(hdev);
}
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
@@ -670,20 +701,39 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
-static u64 *hclge_comm_get_stats(const void *comm_stats,
+static int hclge_comm_get_count(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ u32 size)
+{
+ int count = 0;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
+ count++;
+
+ return count;
+}
+
+static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
u64 *buf = data;
u32 i;
- for (i = 0; i < size; i++)
- buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
+ buf++;
+ }
- return buf + size;
+ return buf;
}
-static u8 *hclge_comm_get_strings(u32 stringset,
+static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 *data)
{
@@ -694,6 +744,9 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return buff;
for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@ -785,7 +838,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
} else if (stringset == ETH_SS_STATS) {
- count = ARRAY_SIZE(g_mac_stats_string) +
+ count = hclge_comm_get_count(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string)) +
hclge_tqps_get_sset_count(handle, stringset);
}
@@ -795,12 +849,14 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
u8 *p = (char *)data;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
@@ -834,7 +890,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -1037,96 +1093,100 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_sr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_lr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_cr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_kr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1170,9 +1230,9 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
mac->supported);
- hclge_convert_setting_sr(mac, speed_ability);
- hclge_convert_setting_lr(mac, speed_ability);
- hclge_convert_setting_cr(mac, speed_ability);
+ hclge_convert_setting_sr(speed_ability, mac->supported);
+ hclge_convert_setting_lr(speed_ability, mac->supported);
+ hclge_convert_setting_cr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@ -1188,7 +1248,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
{
struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability);
+ hclge_convert_setting_kr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@ -1342,8 +1402,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
- if (!cfg->umv_space)
- cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_PF_RSS_SIZE_M,
@@ -1419,6 +1477,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1440,6 +1499,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1460,6 +1521,21 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
+{
+ u32 reg_num = 0;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &reg_num);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
+ return 0;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -1468,6 +1544,10 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev)
int ret;
int i;
+ ret = hclge_query_mac_stats_num(hdev);
+ if (ret)
+ return ret;
+
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
@@ -1549,7 +1629,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
- hdev->wanted_umv_size = cfg.umv_space;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
hdev->gro_en = true;
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
@@ -2847,33 +2930,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task,
- delay_time);
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@@ -2968,6 +3047,82 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
+static void hclge_update_speed_advertising(struct hclge_mac *mac)
+{
+ u32 speed_ability;
+
+ if (hclge_get_speed_bit(mac->speed, &speed_ability))
+ return;
+
+ switch (mac->module_type) {
+ case HNAE3_MODULE_TYPE_FIBRE_LR:
+ hclge_convert_setting_lr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_FIBRE_SR:
+ case HNAE3_MODULE_TYPE_AOC:
+ hclge_convert_setting_sr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_CR:
+ hclge_convert_setting_cr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_KR:
+ hclge_convert_setting_kr(speed_ability, mac->advertising);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_update_fec_advertising(struct hclge_mac *mac)
+{
+ if (mac->fec_mode & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->advertising);
+ else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->advertising);
+}
+
+static void hclge_update_pause_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ bool rx_en, tx_en;
+
+ switch (hdev->fc_mode_last_time) {
+ case HCLGE_FC_RX_PAUSE:
+ rx_en = true;
+ tx_en = false;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ rx_en = false;
+ tx_en = true;
+ break;
+ case HCLGE_FC_FULL:
+ rx_en = true;
+ tx_en = true;
+ break;
+ default:
+ rx_en = false;
+ tx_en = false;
+ break;
+ }
+
+ linkmode_set_pause(mac->advertising, tx_en, rx_en);
+}
+
+static void hclge_update_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ linkmode_zero(mac->advertising);
+ hclge_update_speed_advertising(mac);
+ hclge_update_fec_advertising(mac);
+ hclge_update_pause_advertising(hdev);
+}
+
static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
@@ -2990,7 +3145,7 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
} else {
linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
mac->supported);
- linkmode_zero(mac->advertising);
+ hclge_update_advertising(hdev);
}
}
@@ -3491,33 +3646,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
- affinity_notify);
-
- cpumask_copy(&hdev->affinity_mask, mask);
-}
-
-static void hclge_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
-
- hdev->affinity_notify.notify = hclge_irq_affinity_notify;
- hdev->affinity_notify.release = hclge_irq_affinity_release;
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
- &hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
@@ -8498,6 +8634,9 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
return 0;
}
@@ -8515,6 +8654,8 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
}
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
@@ -8769,6 +8910,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
+ bool is_new_addr = false;
int status;
/* mac addr check */
@@ -8782,6 +8924,13 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
@@ -8791,12 +8940,18 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- /* if already overflow, not to print each time */
- if (status == -ENOSPC &&
- !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
- dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ if (status == -ENOSPC)
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
return status;
+
+err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ return -ENOSPC;
}
static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@ -8833,12 +8988,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
- if (hclge_is_all_function_id_zero(desc))
+ if (hclge_is_all_function_id_zero(desc)) {
/* All the vfid is zero, so need to delete this entry */
status = hclge_remove_mac_vlan_tbl(vport, &req);
- else
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ }
} else if (status == -ENOENT) {
status = 0;
}
@@ -9414,7 +9572,7 @@ int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
@@ -13052,7 +13210,7 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -13065,6 +13223,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index de6afbcbfbac..9e1eede599ec 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -403,8 +403,13 @@ struct hclge_tm_info {
u8 pfc_en; /* PFC enabled or not for user priority */
};
+/* max number of mac statistics on each version */
+#define HCLGE_MAC_STATS_MAX_NUM_V1 84
+#define HCLGE_MAC_STATS_MAX_NUM_V2 105
+
struct hclge_comm_stats_str {
char desc[ETH_GSTRING_LEN];
+ u32 stats_num;
unsigned long offset;
};
@@ -412,6 +417,7 @@ struct hclge_comm_stats_str {
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
u64 mac_rx_mac_pause_num;
+ u64 rsv0;
u64 mac_tx_pfc_pri0_pkt_num;
u64 mac_tx_pfc_pri1_pkt_num;
u64 mac_tx_pfc_pri2_pkt_num;
@@ -448,7 +454,7 @@ struct hclge_mac_stats {
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 rsv0;
+ u64 rsv1;
u64 mac_tx_8192_9216_oct_pkt_num;
u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
@@ -475,7 +481,7 @@ struct hclge_mac_stats {
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 rsv1;
+ u64 rsv2;
u64 mac_rx_8192_9216_oct_pkt_num;
u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
@@ -498,6 +504,28 @@ struct hclge_mac_stats {
u64 mac_rx_pfc_pause_pkt_num;
u64 mac_tx_ctrl_pkt_num;
u64 mac_rx_ctrl_pkt_num;
+
+ /* duration of pfc */
+ u64 mac_tx_pfc_pri0_xoff_time;
+ u64 mac_tx_pfc_pri1_xoff_time;
+ u64 mac_tx_pfc_pri2_xoff_time;
+ u64 mac_tx_pfc_pri3_xoff_time;
+ u64 mac_tx_pfc_pri4_xoff_time;
+ u64 mac_tx_pfc_pri5_xoff_time;
+ u64 mac_tx_pfc_pri6_xoff_time;
+ u64 mac_tx_pfc_pri7_xoff_time;
+ u64 mac_rx_pfc_pri0_xoff_time;
+ u64 mac_rx_pfc_pri1_xoff_time;
+ u64 mac_rx_pfc_pri2_xoff_time;
+ u64 mac_rx_pfc_pri3_xoff_time;
+ u64 mac_rx_pfc_pri4_xoff_time;
+ u64 mac_rx_pfc_pri5_xoff_time;
+ u64 mac_rx_pfc_pri6_xoff_time;
+ u64 mac_rx_pfc_pri7_xoff_time;
+
+ /* duration of pause */
+ u64 mac_tx_pause_xoff_time;
+ u64 mac_rx_pause_xoff_time;
};
#define HCLGE_STATS_TIMER_INTERVAL 300UL
@@ -938,13 +966,14 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
struct devlink *devlink;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index f314dbd3ce11..95074e91a846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index f478770299c6..fdc19868b818 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -110,7 +110,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_devlink_priv *priv;
struct devlink *devlink;
- int ret;
devlink =
devlink_alloc(&hclgevf_devlink_ops,
@@ -122,28 +121,15 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- ret = devlink_register(devlink);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
- ret);
- goto out_reg_fail;
- }
-
- devlink_reload_enable(devlink);
-
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
-
-out_reg_fail:
- devlink_free(devlink);
- return ret;
}
void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5fdac8685f95..645b2c0011e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1349,7 +1349,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
-static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
&hdev->state))
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
@@ -2273,9 +2274,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies;
- while ((hdev->reset_type =
- hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET)
+ hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
@@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 883130a9b48f..28288d7e3303 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -146,6 +146,7 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
HCLGEVF_STATE_ROCE_REGISTERED,
+ HCLGEVF_STATE_SERVICE_INITED,
/* task states */
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 6e11ee339f12..60ae8bfc5f69 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-int hinic_devlink_register(struct hinic_devlink_priv *priv)
+void hinic_devlink_register(struct hinic_devlink_priv *priv)
{
struct devlink *devlink = priv_to_devlink(priv);
- return devlink_register(devlink);
+ devlink_register(devlink);
}
void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
index 9e315011015c..46760d607b9b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
@@ -110,7 +110,7 @@ struct host_image_st {
struct devlink *hinic_devlink_alloc(struct device *dev);
void hinic_devlink_free(struct devlink *devlink);
-int hinic_devlink_register(struct hinic_devlink_priv *priv);
+void hinic_devlink_register(struct hinic_devlink_priv *priv);
void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index b431c300ef1b..a85667078b72 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -322,12 +322,10 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
}
}
- bitmap_copy(link_ksettings->link_modes.supported,
- (unsigned long *)&settings.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_copy(link_ksettings->link_modes.advertising,
- (unsigned long *)&settings.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported);
+ linkmode_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising);
return 0;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 56b6b04e209b..657a15447bd0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -754,17 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- err = hinic_devlink_register(hwdev->devlink_dev);
- if (err) {
- dev_err(&hwif->pdev->dev, "Failed to register devlink\n");
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
- return err;
- }
-
err = hinic_func_to_func_init(hwdev);
if (err) {
dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
- hinic_devlink_unregister(hwdev->devlink_dev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
return err;
}
@@ -787,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
}
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
-
+ hinic_devlink_register(hwdev->devlink_dev);
return 0;
}
@@ -799,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ hinic_devlink_unregister(hwdev->devlink_dev);
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
if (!HINIC_IS_VF(hwdev->hwif)) {
@@ -816,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_func_to_func_free(hwdev);
- hinic_devlink_unregister(hwdev->devlink_dev);
-
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index ae707e305684..f9a766b8ac43 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -656,7 +656,7 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
err = change_mac_addr(netdev, new_mac);
if (!err)
- memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
+ eth_hw_addr_set(netdev, new_mac);
return err;
}
@@ -1181,6 +1181,7 @@ static int nic_dev_init(struct pci_dev *pdev)
struct net_device *netdev;
struct hinic_hwdev *hwdev;
struct devlink *devlink;
+ u8 addr[ETH_ALEN];
int err, num_qps;
devlink = hinic_devlink_alloc(&pdev->dev);
@@ -1259,11 +1260,12 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
- err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
+ err = hinic_port_get_mac(nic_dev, addr);
if (err) {
dev_err(&pdev->dev, "Failed to get mac address\n");
goto err_get_mac;
}
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
@@ -1379,10 +1381,8 @@ static int hinic_probe(struct pci_dev *pdev,
{
int err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
err = pci_request_regions(pdev, HINIC_DRV_NAME);
if (err) {
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 0696f723228a..3909c6a0af89 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -339,14 +339,13 @@ static const struct net_device_ops sun3_82586_netdev_ops = {
static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
{
- int i, size, retval;
+ int size, retval;
if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
@@ -461,7 +460,7 @@ static int init586(struct net_device *dev)
ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
ias_cmd->cmd_link = 0xffff;
- memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+ memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN);
p->scb->cbl_offset = make16(ias_cmd);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index d5df131b183c..bad94e4d50f4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1741,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
goto out_free;
}
- memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr->sa_data);
/* Deregister old MAC in pHYP */
if (port->state == EHEA_PORT_UP) {
@@ -2986,7 +2986,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
SET_NETDEV_DEV(dev, port_dev);
/* initialize net_device structure */
- memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 664a91af662d..6b3fc8823c54 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1013,7 +1013,7 @@ static int emac_set_mac_address(struct net_device *ndev, void *sa)
mutex_lock(&dev->link_lock);
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
emac_rx_disable(dev);
emac_tx_disable(dev);
@@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev)
static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
- const void *p;
int err;
/* Read config from device-tree */
@@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev)
}
/* Read MAC-address */
- p = of_get_property(np, "local-mac-address", NULL);
- if (p == NULL) {
- printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
- np);
- return -ENXIO;
+ err = of_get_ethdev_address(np, dev->ndev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
+ return err;
}
- memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3d9b4f99d357..45ba40cf4d07 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -483,17 +483,6 @@ retry:
return rc;
}
-static u64 ibmveth_encode_mac_addr(u8 *mac)
-{
- int i;
- u64 encoded = 0;
-
- for (i = 0; i < ETH_ALEN; i++)
- encoded = (encoded << 8) | mac[i];
-
- return encoded;
-}
-
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
@@ -553,7 +542,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
+ mac_address = ether_addr_to_u64(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -605,17 +594,13 @@ static int ibmveth_open(struct net_device *netdev)
}
rc = -ENOMEM;
- adapter->bounce_buffer =
- kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer)
- goto out_free_irq;
- adapter->bounce_buffer_dma =
- dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
- netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- netdev_err(netdev, "unable to map bounce buffer\n");
- goto out_free_bounce_buffer;
+ adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
+ netdev->mtu + IBMVETH_BUFF_OH,
+ &adapter->bounce_buffer_dma, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to alloc bounce buffer\n");
+ goto out_free_irq;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -627,8 +612,6 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-out_free_bounce_buffer:
- kfree(adapter->bounce_buffer);
out_free_irq:
free_irq(netdev->irq, netdev);
out_free_buffer_pools:
@@ -702,10 +685,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- kfree(adapter->bounce_buffer);
+ dma_free_coherent(&adapter->vdev->dev,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ adapter->bounce_buffer, adapter->bounce_buffer_dma);
netdev_dbg(netdev, "close complete\n");
@@ -1483,7 +1465,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
u64 mcast_addr;
- mcast_addr = ibmveth_encode_mac_addr(ha->addr);
+ mcast_addr = ether_addr_to_u64(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1613,14 +1595,14 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ mac_address = ether_addr_to_u64(addr->sa_data);
rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
if (rc) {
netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
return rc;
}
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1727,7 +1709,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
- memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr_p);
if (firmware_has_feature(FW_FEATURE_CMO))
memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6aa6ff89a765..3cca51735421 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
+static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
return -ETIMEDOUT;
}
+/**
+ * reuse_ltb() - Check if a long term buffer can be reused
+ * @ltb: The long term buffer to be checked
+ * @size: The size of the long term buffer.
+ *
+ * An LTB can be reused unless its size has changed.
+ *
+ * Return: Return true if the LTB can be reused, false otherwise.
+ */
+static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
+{
+ return (ltb->buff && ltb->size == size);
+}
+
+/**
+ * alloc_long_term_buff() - Allocate a long term buffer (LTB)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb: container object for the LTB
+ * @size: size of the LTB
+ *
+ * Allocate an LTB of the specified size and notify VIOS.
+ *
+ * If the given @ltb already has the correct size, reuse it. Otherwise if
+ * its non-NULL, free it. Then allocate a new one of the correct size.
+ * Notify the VIOS either way since we may now be working with a new VIOS.
+ *
+ * Allocating larger chunks of memory during resets, specially LPM or under
+ * low memory situations can cause resets to fail/timeout and for LPAR to
+ * lose connectivity. So hold onto the LTB even if we fail to communicate
+ * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
+ *
+ * Return: 0 if we were able to allocate the LTB and notify the VIOS and
+ * a negative value otherwise.
+ */
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
int rc;
- ltb->size = size;
- ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
- GFP_KERNEL);
+ if (!reuse_ltb(ltb, size)) {
+ dev_dbg(dev,
+ "LTB size changed from 0x%llx to 0x%x, reallocating\n",
+ ltb->size, size);
+ free_long_term_buff(adapter, ltb);
+ }
- if (!ltb->buff) {
- dev_err(dev, "Couldn't alloc long term buffer\n");
- return -ENOMEM;
+ if (ltb->buff) {
+ dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
+ ltb->map_id, ltb->size);
+ } else {
+ ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
+ GFP_KERNEL);
+ if (!ltb->buff) {
+ dev_err(dev, "Couldn't alloc long term buffer\n");
+ return -ENOMEM;
+ }
+ ltb->size = size;
+
+ ltb->map_id = find_first_zero_bit(adapter->map_ids,
+ MAX_MAP_ID);
+ bitmap_set(adapter->map_ids, ltb->map_id, 1);
+
+ dev_dbg(dev,
+ "Allocated new LTB [map %d, size 0x%llx]\n",
+ ltb->map_id, ltb->size);
}
- ltb->map_id = adapter->map_id;
- adapter->map_id++;
+
+ /* Ensure ltb is zeroed - specially when reusing it. */
+ memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
@@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
- dev_err(dev,
- "Long term map request aborted or timed out,rc = %d\n",
+ dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
rc);
goto out;
}
if (adapter->fw_done_rc) {
- dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
rc = -1;
goto out;
}
rc = 0;
out:
- if (rc) {
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- ltb->buff = NULL;
- }
+ /* don't free LTB on communication error - see function header */
mutex_unlock(&adapter->fw_lock);
return rc;
}
@@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
+
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+
ltb->buff = NULL;
+ /* mark this map_id free */
+ bitmap_clear(adapter->map_ids, ltb->map_id, 1);
ltb->map_id = 0;
}
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
-{
- struct device *dev = &adapter->vdev->dev;
- int rc;
-
- memset(ltb->buff, 0, ltb->size);
-
- mutex_lock(&adapter->fw_lock);
- adapter->fw_done_rc = 0;
-
- reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc) {
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
- if (rc) {
- dev_info(dev,
- "Reset failed, long term map request timed out or aborted\n");
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- if (adapter->fw_done_rc) {
- dev_info(dev,
- "Reset failed, attempting to free and reallocate buffer\n");
- free_long_term_buff(adapter, ltb);
- mutex_unlock(&adapter->fw_lock);
- return alloc_long_term_buff(adapter, ltb, ltb->size);
- }
- mutex_unlock(&adapter->fw_lock);
- return 0;
-}
-
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ index = pool->free_map[pool->next_free];
+
+ /* We maybe reusing the skb from earlier resets. Allocate
+ * only if necessary. But since the LTB may have changed
+ * during reset (see init_rx_pools()), update LTB below
+ * even if reusing skb.
+ */
+ skb = pool->rx_buff[index].skb;
if (!skb) {
- dev_err(dev, "Couldn't replenish rx buff\n");
- adapter->replenish_no_mem++;
- break;
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ dev_err(dev, "Couldn't replenish rx buff\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
}
- index = pool->free_map[pool->next_free];
-
- if (pool->rx_buff[index].skb)
- dev_err(dev, "Inconsistent free_map!\n");
+ pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
offset = index * pool->buff_size;
dst = pool->long_term_buff.buff + offset;
memset(dst, 0, pool->buff_size);
dma_addr = pool->long_term_buff.addr + offset;
- pool->rx_buff[index].data = dst;
- pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ /* add the skb to an rx_buff in the pool */
+ pool->rx_buff[index].data = dst;
pool->rx_buff[index].dma = dma_addr;
pool->rx_buff[index].skb = skb;
pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size;
+ /* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
@@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
shift = 8;
#endif
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
- pool->next_free = (pool->next_free + 1) % pool->size;
+
+ /* if send_subcrq_indirect queue is full, flush to VIOS */
if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
i == count - 1) {
lpar_rc =
@@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
return 0;
}
-static int reset_rx_pools(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_rx_pool *rx_pool;
- u64 buff_size;
- int rx_scrqs;
- int i, j, rc;
-
- if (!adapter->rx_pool)
- return -1;
-
- buff_size = adapter->cur_rx_buf_sz;
- rx_scrqs = adapter->num_active_rx_pools;
- for (i = 0; i < rx_scrqs; i++) {
- rx_pool = &adapter->rx_pool[i];
-
- netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
-
- if (rx_pool->buff_size != buff_size) {
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
- rc = alloc_long_term_buff(adapter,
- &rx_pool->long_term_buff,
- rx_pool->size *
- rx_pool->buff_size);
- } else {
- rc = reset_long_term_buff(adapter,
- &rx_pool->long_term_buff);
- }
-
- if (rc)
- return rc;
-
- for (j = 0; j < rx_pool->size; j++)
- rx_pool->free_map[j] = j;
-
- memset(rx_pool->rx_buff, 0,
- rx_pool->size * sizeof(struct ibmvnic_rx_buff));
-
- atomic_set(&rx_pool->available, 0);
- rx_pool->next_alloc = 0;
- rx_pool->next_free = 0;
- rx_pool->active = 1;
- }
-
- return 0;
-}
-
+/**
+ * release_rx_pools() - Release any rx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
kfree(rx_pool->free_map);
+
free_long_term_buff(adapter, &rx_pool->long_term_buff);
if (!rx_pool->rx_buff)
@@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
adapter->num_active_rx_pools = 0;
+ adapter->prev_rx_pool_size = 0;
}
+/**
+ * reuse_rx_pools() - Check if the existing rx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing rx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and size of each buffer) have not
+ * changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ * which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the rx pools can be reused, false otherwise.
+ */
+static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ u64 old_num_pools, new_num_pools;
+ u64 old_pool_size, new_pool_size;
+ u64 old_buff_size, new_buff_size;
+
+ if (!adapter->rx_pool)
+ return false;
+
+ old_num_pools = adapter->num_active_rx_pools;
+ new_num_pools = adapter->req_rx_queues;
+
+ old_pool_size = adapter->prev_rx_pool_size;
+ new_pool_size = adapter->req_rx_add_entries_per_subcrq;
+
+ old_buff_size = adapter->prev_rx_buf_sz;
+ new_buff_size = adapter->cur_rx_buf_sz;
+
+ /* Require buff size to be exactly same for now */
+ if (old_buff_size != new_buff_size)
+ return false;
+
+ if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+ return true;
+
+ if (old_num_pools < adapter->min_rx_queues ||
+ old_num_pools > adapter->max_rx_queues ||
+ old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
+ old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+ return false;
+
+ return true;
+}
+
+/**
+ * init_rx_pools(): Initialize the set of receiver pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of receiver pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing rx pools.
+ * Otherwise free any existing pools and allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
static int init_rx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
- int rxadd_subcrqs;
+ u64 num_pools;
+ u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
int i, j;
- rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ pool_size = adapter->req_rx_add_entries_per_subcrq;
+ num_pools = adapter->req_rx_queues;
buff_size = adapter->cur_rx_buf_sz;
- adapter->rx_pool = kcalloc(rxadd_subcrqs,
+ if (reuse_rx_pools(adapter)) {
+ dev_dbg(dev, "Reusing rx pools\n");
+ goto update_ltb;
+ }
+
+ /* Allocate/populate the pools. */
+ release_rx_pools(adapter);
+
+ adapter->rx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_rx_pool),
GFP_KERNEL);
if (!adapter->rx_pool) {
@@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
- adapter->num_active_rx_pools = rxadd_subcrqs;
+ /* Set num_active_rx_pools early. If we fail below after partial
+ * allocation, release_rx_pools() will know how many to look for.
+ */
+ adapter->num_active_rx_pools = num_pools;
- for (i = 0; i < rxadd_subcrqs; i++) {
+ for (i = 0; i < num_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
- i, adapter->req_rx_add_entries_per_subcrq,
- buff_size);
+ i, pool_size, buff_size);
- rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+ rx_pool->size = pool_size;
rx_pool->index = i;
rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
- rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
GFP_KERNEL);
if (!rx_pool->free_map) {
- release_rx_pools(adapter);
- return -1;
+ dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ goto out_release;
}
rx_pool->rx_buff = kcalloc(rx_pool->size,
@@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
- release_rx_pools(adapter);
- return -1;
+ goto out_release;
}
-
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size)) {
- release_rx_pools(adapter);
- return -1;
- }
-
- for (j = 0; j < rx_pool->size; ++j)
- rx_pool->free_map[j] = j;
-
- atomic_set(&rx_pool->available, 0);
- rx_pool->next_alloc = 0;
- rx_pool->next_free = 0;
}
- return 0;
-}
+ adapter->prev_rx_pool_size = pool_size;
+ adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_tx_pool *tx_pool)
-{
- int rc, i;
-
- rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
- if (rc)
- return rc;
-
- memset(tx_pool->tx_buff, 0,
- tx_pool->num_buffers *
- sizeof(struct ibmvnic_tx_buff));
+update_ltb:
+ for (i = 0; i < num_pools; i++) {
+ rx_pool = &adapter->rx_pool[i];
+ dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
+ i, rx_pool->size, rx_pool->buff_size);
- for (i = 0; i < tx_pool->num_buffers; i++)
- tx_pool->free_map[i] = i;
+ if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size))
+ goto out;
- tx_pool->consumer_index = 0;
- tx_pool->producer_index = 0;
+ for (j = 0; j < rx_pool->size; ++j) {
+ struct ibmvnic_rx_buff *rx_buff;
- return 0;
-}
-
-static int reset_tx_pools(struct ibmvnic_adapter *adapter)
-{
- int tx_scrqs;
- int i, rc;
+ rx_pool->free_map[j] = j;
- if (!adapter->tx_pool)
- return -1;
+ /* NOTE: Don't clear rx_buff->skb here - will leak
+ * memory! replenish_rx_pool() will reuse skbs or
+ * allocate as necessary.
+ */
+ rx_buff = &rx_pool->rx_buff[j];
+ rx_buff->dma = 0;
+ rx_buff->data = 0;
+ rx_buff->size = 0;
+ rx_buff->pool_index = 0;
+ }
- tx_scrqs = adapter->num_active_tx_pools;
- for (i = 0; i < tx_scrqs; i++) {
- ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
- rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
- if (rc)
- return rc;
- rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
- if (rc)
- return rc;
+ /* Mark pool "empty" so replenish_rx_pools() will
+ * update the LTB info for each buffer
+ */
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ /* replenish_rx_pool() may have called deactivate_rx_pools()
+ * on failover. Ensure pool is active now.
+ */
+ rx_pool->active = 1;
}
-
return 0;
+out_release:
+ release_rx_pools(adapter);
+out:
+ /* We failed to allocate one or more LTBs or map them on the VIOS.
+ * Hold onto the pools and any LTBs that we did allocate/map.
+ */
+ return -1;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
free_long_term_buff(adapter, &tx_pool->long_term_buff);
}
+/**
+ * release_tx_pools() - Release any tx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
int i;
+ /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
+ * both NULL or both non-NULL. So we only need to check one.
+ */
if (!adapter->tx_pool)
return;
@@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tso_pool);
adapter->tso_pool = NULL;
adapter->num_active_tx_pools = 0;
+ adapter->prev_tx_pool_size = 0;
}
static int init_one_tx_pool(struct net_device *netdev,
struct ibmvnic_tx_pool *tx_pool,
- int num_entries, int buf_size)
+ int pool_size, int buf_size)
{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
- tx_pool->tx_buff = kcalloc(num_entries,
+ tx_pool->tx_buff = kcalloc(pool_size,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
return -1;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- num_entries * buf_size))
- return -1;
-
- tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
- if (!tx_pool->free_map)
+ tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
+ if (!tx_pool->free_map) {
+ kfree(tx_pool->tx_buff);
+ tx_pool->tx_buff = NULL;
return -1;
+ }
- for (i = 0; i < num_entries; i++)
+ for (i = 0; i < pool_size; i++)
tx_pool->free_map[i] = i;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
- tx_pool->num_buffers = num_entries;
+ tx_pool->num_buffers = pool_size;
tx_pool->buf_size = buf_size;
return 0;
}
+/**
+ * reuse_tx_pools() - Check if the existing tx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing tx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and mtu) have not changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ * which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the tx pools can be reused, false otherwise.
+ */
+static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ u64 old_num_pools, new_num_pools;
+ u64 old_pool_size, new_pool_size;
+ u64 old_mtu, new_mtu;
+
+ if (!adapter->tx_pool)
+ return false;
+
+ old_num_pools = adapter->num_active_tx_pools;
+ new_num_pools = adapter->num_active_tx_scrqs;
+ old_pool_size = adapter->prev_tx_pool_size;
+ new_pool_size = adapter->req_tx_entries_per_subcrq;
+ old_mtu = adapter->prev_mtu;
+ new_mtu = adapter->req_mtu;
+
+ /* Require MTU to be exactly same to reuse pools for now */
+ if (old_mtu != new_mtu)
+ return false;
+
+ if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+ return true;
+
+ if (old_num_pools < adapter->min_tx_queues ||
+ old_num_pools > adapter->max_tx_queues ||
+ old_pool_size < adapter->min_tx_entries_per_subcrq ||
+ old_pool_size > adapter->max_tx_entries_per_subcrq)
+ return false;
+
+ return true;
+}
+
+/**
+ * init_tx_pools(): Initialize the set of transmit pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of transmit pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing tx pools.
+ * Otherwise free any existing pools and allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
static int init_tx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int tx_subcrqs;
+ struct device *dev = &adapter->vdev->dev;
+ int num_pools;
+ u64 pool_size; /* # of buffers in pool */
u64 buff_size;
- int i, rc;
+ int i, j, rc;
+
+ num_pools = adapter->req_tx_queues;
+
+ /* We must notify the VIOS about the LTB on all resets - but we only
+ * need to alloc/populate pools if either the number of buffers or
+ * size of each buffer in the pool has changed.
+ */
+ if (reuse_tx_pools(adapter)) {
+ netdev_dbg(netdev, "Reusing tx pools\n");
+ goto update_ltb;
+ }
+
+ /* Allocate/populate the pools. */
+ release_tx_pools(adapter);
+
+ pool_size = adapter->req_tx_entries_per_subcrq;
+ num_pools = adapter->num_active_tx_scrqs;
- tx_subcrqs = adapter->num_active_tx_scrqs;
- adapter->tx_pool = kcalloc(tx_subcrqs,
+ adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
return -1;
- adapter->tso_pool = kcalloc(tx_subcrqs,
+ adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+ /* To simplify release_tx_pools() ensure that ->tx_pool and
+ * ->tso_pool are either both NULL or both non-NULL.
+ */
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
return -1;
}
- adapter->num_active_tx_pools = tx_subcrqs;
+ /* Set num_active_tx_pools early. If we fail below after partial
+ * allocation, release_tx_pools() will know how many to look for.
+ */
+ adapter->num_active_tx_pools = num_pools;
+
+ buff_size = adapter->req_mtu + VLAN_HLEN;
+ buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
+
+ for (i = 0; i < num_pools; i++) {
+ dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
+ i, adapter->req_tx_entries_per_subcrq, buff_size);
- for (i = 0; i < tx_subcrqs; i++) {
- buff_size = adapter->req_mtu + VLAN_HLEN;
- buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
- adapter->req_tx_entries_per_subcrq,
- buff_size);
- if (rc) {
- release_tx_pools(adapter);
- return rc;
- }
+ pool_size, buff_size);
+ if (rc)
+ goto out_release;
rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
IBMVNIC_TSO_BUFS,
IBMVNIC_TSO_BUF_SZ);
- if (rc) {
- release_tx_pools(adapter);
- return rc;
- }
+ if (rc)
+ goto out_release;
+ }
+
+ adapter->prev_tx_pool_size = pool_size;
+ adapter->prev_mtu = adapter->req_mtu;
+
+update_ltb:
+ /* NOTE: All tx_pools have the same number of buffers (which is
+ * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
+ * buffers (see calls init_one_tx_pool() for these).
+ * For consistency, we use tx_pool->num_buffers and
+ * tso_pool->num_buffers below.
+ */
+ rc = -1;
+ for (i = 0; i < num_pools; i++) {
+ struct ibmvnic_tx_pool *tso_pool;
+ struct ibmvnic_tx_pool *tx_pool;
+ u32 ltb_size;
+
+ tx_pool = &adapter->tx_pool[i];
+ ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
+ if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
+ ltb_size))
+ goto out;
+
+ dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
+ i, tx_pool->long_term_buff.buff,
+ tx_pool->num_buffers, tx_pool->buf_size);
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+
+ for (j = 0; j < tx_pool->num_buffers; j++)
+ tx_pool->free_map[j] = j;
+
+ tso_pool = &adapter->tso_pool[i];
+ ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
+ if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
+ ltb_size))
+ goto out;
+
+ dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
+ i, tso_pool->long_term_buff.buff,
+ tso_pool->num_buffers, tso_pool->buf_size);
+
+ tso_pool->consumer_index = 0;
+ tso_pool->producer_index = 0;
+
+ for (j = 0; j < tso_pool->num_buffers; j++)
+ tso_pool->free_map[j] = j;
}
return 0;
+out_release:
+ release_tx_pools(adapter);
+out:
+ /* We failed to allocate one or more LTBs or map them on the VIOS.
+ * Hold onto the pools and any LTBs that we did allocate/map.
+ */
+ return rc;
}
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
@@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
{
release_vpd_data(adapter);
- release_tx_pools(adapter);
- release_rx_pools(adapter);
-
release_napi(adapter);
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
@@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
return rc;
}
- adapter->map_id = 1;
-
rc = init_napi(adapter);
if (rc)
return rc;
@@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev)
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
goto out;
}
}
@@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev)
ibmvnic_napi_disable(adapter);
ibmvnic_disable_irqs(adapter);
-
- clean_rx_pools(adapter);
- clean_tx_pools(adapter);
}
static int __ibmvnic_close(struct net_device *netdev)
@@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev)
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
+ clean_rx_pools(adapter);
+ clean_tx_pools(adapter);
return rc;
}
@@ -1724,8 +1914,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ind_bufp = &tx_scrq->ind_buf;
if (test_bit(0, &adapter->resetting)) {
- if (!netif_subqueue_stopped(netdev, skb))
- netif_stop_subqueue(netdev, queue_num);
dev_kfree_skb_any(skb);
tx_send_failed++;
@@ -2036,9 +2224,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
+ struct net_device *netdev = adapter->netdev;
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
- struct net_device *netdev = adapter->netdev;
int rc;
netdev_dbg(adapter->netdev,
@@ -2188,8 +2376,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
!adapter->rx_pool ||
!adapter->tso_pool ||
!adapter->tx_pool) {
- release_rx_pools(adapter);
- release_tx_pools(adapter);
release_napi(adapter);
release_vpd_data(adapter);
@@ -2198,16 +2384,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
} else {
- rc = reset_tx_pools(adapter);
+ rc = init_tx_pools(netdev);
if (rc) {
- netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ netdev_dbg(netdev,
+ "init tx pools failed (%d)\n",
rc);
goto out;
}
- rc = reset_rx_pools(adapter);
+ rc = init_rx_pools(netdev);
if (rc) {
- netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ netdev_dbg(netdev,
+ "init rx pools failed (%d)\n",
rc);
goto out;
}
@@ -2567,7 +2755,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = EAGAIN;
+ adapter->init_done_rc = -EAGAIN;
ret = EAGAIN;
goto err;
}
@@ -4576,8 +4764,7 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
/* crq->change_mac_addr.mac_addr is the requested one
* crq->change_mac_addr_rsp.mac_addr is the returned valid one.
*/
- ether_addr_copy(netdev->dev_addr,
- &crq->change_mac_addr_rsp.mac_addr[0]);
+ eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
ether_addr_copy(adapter->mac_addr,
&crq->change_mac_addr_rsp.mac_addr[0]);
out:
@@ -4778,9 +4965,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
return;
}
- netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
- crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
- crq->query_map_rsp.free_pages);
+ netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
+ crq->query_map_rsp.page_size,
+ __be32_to_cpu(crq->query_map_rsp.tot_pages),
+ __be32_to_cpu(crq->query_map_rsp.free_pages));
}
static void handle_query_cap_rsp(union ibmvnic_crq *crq,
@@ -5069,11 +5257,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
*/
adapter->login_pending = false;
- if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
- adapter->init_done_rc = -EIO;
- }
-
if (adapter->state == VNIC_DOWN)
rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
else
@@ -5094,6 +5277,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
rc);
adapter->failover_pending = false;
}
+
+ if (!completion_done(&adapter->init_done)) {
+ complete(&adapter->init_done);
+ if (!adapter->init_done_rc)
+ adapter->init_done_rc = -EAGAIN;
+ }
+
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -5414,6 +5604,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
crq->cur = 0;
spin_lock_init(&crq->lock);
+ /* process any CRQs that were queued before we enabled interrupts */
+ tasklet_schedule(&adapter->tasklet);
+
return retrc;
req_irq_failed:
@@ -5527,9 +5720,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->login_pending = false;
+ memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
+ /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
+ bitmap_set(adapter->map_ids, 0, 1);
ether_addr_copy(adapter->mac_addr, mac_addr_p);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmvnic_netdev_ops;
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
@@ -5547,6 +5743,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->reset_done);
init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
+ adapter->prev_rx_buf_sz = 0;
+ adapter->prev_mtu = 0;
init_success = false;
do {
@@ -5558,7 +5756,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
rc = ibmvnic_reset_init(adapter, false);
- } while (rc == EAGAIN);
+ } while (rc == -EAGAIN);
/* We are ignoring the error from ibmvnic_reset_init() assuming that the
* partner is not ready. CRQ is not active. When the partner becomes
@@ -5647,6 +5845,8 @@ static void ibmvnic_remove(struct vio_dev *dev)
unregister_netdevice(netdev);
release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 22df602323bc..b8e42f67d897 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -827,7 +827,7 @@ struct ibmvnic_rx_buff {
struct ibmvnic_rx_pool {
struct ibmvnic_rx_buff *rx_buff;
- int size;
+ int size; /* # of buffers in the pool */
int index;
int buff_size;
atomic_t available;
@@ -967,6 +967,7 @@ struct ibmvnic_adapter {
u64 min_mtu;
u64 max_mtu;
u64 req_mtu;
+ u64 prev_mtu;
u64 max_multicast_filters;
u64 vlan_header_insertion;
u64 rx_vlan_header_insertion;
@@ -979,13 +980,18 @@ struct ibmvnic_adapter {
u64 opt_tx_entries_per_subcrq;
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
- u8 map_id;
+#define MAX_MAP_ID 255
+ DECLARE_BITMAP(map_ids, MAX_MAP_ID);
u32 num_active_rx_scrqs;
u32 num_active_rx_pools;
u32 num_active_rx_napi;
u32 num_active_tx_scrqs;
u32 num_active_tx_pools;
+
+ u32 prev_rx_pool_size;
+ u32 prev_tx_pool_size;
u32 cur_rx_buf_sz;
+ u32 prev_rx_buf_sz;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index ed8ea63bb172..0b274d8fa45b 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -313,6 +313,20 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_SWITCHDEV
+ bool "Switchdev Support"
+ default y
+ depends on ICE && NET_SWITCHDEV
+ help
+ Switchdev support provides internal SRIOV packet steering and switching.
+
+ To enable it on running kernel use devlink tool:
+ #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev
+
+ Say Y here if you want to use Switchdev in the driver.
+
+ If unsure, say N.
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 09ae1939e6db..5039a2536951 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2259,7 +2259,7 @@ static int e100_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
e100_exec_cb(nic, NULL, e100_setup_iaaddr);
return 0;
@@ -2921,7 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
- memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)nic->eeprom);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bed4f040face..669060a2e6aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1103,7 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e_err(probe, "EEPROM Read Error\n");
}
/* don't block initialization here due to bad MAC address */
- memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -2209,7 +2209,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
e1000_rar_set(hw, hw->mac_addr, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 5b2143f4b1f8..c3def0ee7788 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -21,6 +21,7 @@
#include <linux/ptp_classify.h>
#include <linux/mii.h>
#include <linux/mdio.h>
+#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include "hw.h"
@@ -113,7 +114,8 @@ enum e1000_boards {
board_pch2lan,
board_pch_lpt,
board_pch_spt,
- board_pch_cnp
+ board_pch_cnp,
+ board_pch_tgp
};
struct e1000_ps_page {
@@ -499,6 +501,7 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
+extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 60c582a16821..5e4fc9b4e2ad 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 ctrl_ext, txdctl, snoop;
+ u32 ctrl_ext, txdctl, snoop, fflt_dbg;
s32 ret_val;
u16 i;
@@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
+ /* Enable workaround for packet loss issue on TGP PCH
+ * Do not gate DMA clock from the modPHY block
+ */
+ if (mac->type >= e1000_pch_tgp) {
+ fflt_dbg = er32(FFLT_DBG);
+ fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
+ ew32(FFLT_DBG, fflt_dbg);
+ }
+
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
@@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_tgp_info = {
+ .mac = e1000_pch_tgp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index d6a092e5ee74..2504b11c3169 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -289,6 +289,9 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
+/* Don't gate wake DMA clock */
+#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
+
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 900b3ab998bd..44e2dc8328a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
+ [board_pch_tgp] = &e1000_pch_tgp_info,
};
struct e1000_reg_info {
@@ -2549,7 +2550,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
- current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
@@ -4786,7 +4786,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
@@ -7589,7 +7589,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"NVM Read Error while reading MAC address\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
@@ -7896,28 +7896,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2fb52bd6fc0e..2cca9e84e31e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -990,7 +990,7 @@ static int fm10k_set_mac(struct net_device *dev, void *p)
}
if (!err) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
ether_addr_copy(hw->mac.addr, addr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index adfa2768f024..b473cb7d7c57 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -300,7 +300,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
if (is_valid_ether_addr(hw->mac.perm_addr)) {
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
@@ -2045,7 +2045,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 39fb3d57c057..3d528fba754b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -435,7 +435,7 @@ static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
return !!ch->fwd;
}
-static inline u8 *i40e_channel_mac(struct i40e_channel *ch)
+static inline const u8 *i40e_channel_mac(struct i40e_channel *ch)
{
if (i40e_is_channel_macvlan(ch))
return ch->fwd->netdev->dev_addr;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e04b540cedc8..ba862131b9bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1587,7 +1587,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -13425,7 +13425,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e7e778ca074c..ea06e957393e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi, *xdp;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = i40e_rx_bi(rx_ring, ntu);
- do {
- xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!xdp) {
- ok = false;
- goto no_buffers;
- }
- *bi = xdp;
- dma = xsk_buff_xdp_get_dma(xdp);
+ xdp = i40e_rx_bi(rx_ring, ntu);
+
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
+
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->read.hdr_addr = 0;
rx_desc++;
- bi++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = i40e_rx_bi(rx_ring, 0);
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
-no_buffers:
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
- i40e_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ xdp = i40e_rx_bi(rx_ring, 0);
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs;
}
/**
@@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
break;
bi = *i40e_rx_bi(rx_ring, next_to_clean);
- bi->data_end = bi->data + size;
+ xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 68c80f04113c..e6e7c1da47fb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -177,6 +177,7 @@ enum iavf_state_t {
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_SW, /* got resources, setting up structs */
+ __IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
__IAVF_COMM_FAILED, /* communication with PF failed */
/* Below here, watchdog is running */
@@ -225,7 +226,6 @@ struct iavf_adapter {
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
- struct delayed_work init_task;
wait_queue_head_t down_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -312,6 +312,7 @@ struct iavf_adapter {
struct iavf_hw hw; /* defined in iavf_type.h */
enum iavf_state_t state;
+ enum iavf_state_t last_state;
unsigned long crit_section;
struct delayed_work watchdog_task;
@@ -393,6 +394,51 @@ struct iavf_device {
extern char iavf_driver_name[];
extern struct workqueue_struct *iavf_wq;
+static inline const char *iavf_state_str(enum iavf_state_t state)
+{
+ switch (state) {
+ case __IAVF_STARTUP:
+ return "__IAVF_STARTUP";
+ case __IAVF_REMOVE:
+ return "__IAVF_REMOVE";
+ case __IAVF_INIT_VERSION_CHECK:
+ return "__IAVF_INIT_VERSION_CHECK";
+ case __IAVF_INIT_GET_RESOURCES:
+ return "__IAVF_INIT_GET_RESOURCES";
+ case __IAVF_INIT_SW:
+ return "__IAVF_INIT_SW";
+ case __IAVF_INIT_FAILED:
+ return "__IAVF_INIT_FAILED";
+ case __IAVF_RESETTING:
+ return "__IAVF_RESETTING";
+ case __IAVF_COMM_FAILED:
+ return "__IAVF_COMM_FAILED";
+ case __IAVF_DOWN:
+ return "__IAVF_DOWN";
+ case __IAVF_DOWN_PENDING:
+ return "__IAVF_DOWN_PENDING";
+ case __IAVF_TESTING:
+ return "__IAVF_TESTING";
+ case __IAVF_RUNNING:
+ return "__IAVF_RUNNING";
+ default:
+ return "__IAVF_UNKNOWN_STATE";
+ }
+}
+
+static inline void iavf_change_state(struct iavf_adapter *adapter,
+ enum iavf_state_t state)
+{
+ if (adapter->state != state) {
+ adapter->last_state = adapter->state;
+ adapter->state = state;
+ }
+ dev_dbg(&adapter->pdev->dev,
+ "state transition from:%s to:%s\n",
+ iavf_state_str(adapter->last_state),
+ iavf_state_str(adapter->state));
+}
+
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cada4e0e40b4..847d67e32a54 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -14,7 +14,7 @@
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
-static int iavf_init_get_resources(struct iavf_adapter *adapter);
+static void iavf_init_get_resources(struct iavf_adapter *adapter);
static int iavf_check_reset_complete(struct iavf_hw *hw);
char iavf_driver_name[] = "iavf";
@@ -52,6 +52,15 @@ static const struct net_device_ops iavf_netdev_ops;
struct workqueue_struct *iavf_wq;
/**
+ * iavf_pdev_to_adapter - go from pci_dev to adapter
+ * @pdev: pci_dev pointer
+ */
+static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
+{
+ return netdev_priv(pci_get_drvdata(pdev));
+}
+
+/**
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
@@ -960,7 +969,7 @@ static void iavf_configure(struct iavf_adapter *adapter)
**/
static void iavf_up_complete(struct iavf_adapter *adapter)
{
- adapter->state = __IAVF_RUNNING;
+ iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_napi_enable_all(adapter);
@@ -1688,9 +1697,9 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
*
* Function process __IAVF_STARTUP driver state.
* When success the state is changed to __IAVF_INIT_VERSION_CHECK
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_startup(struct iavf_adapter *adapter)
+static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1729,9 +1738,10 @@ static int iavf_startup(struct iavf_adapter *adapter)
iavf_shutdown_adminq(hw);
goto err;
}
- adapter->state = __IAVF_INIT_VERSION_CHECK;
+ iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1740,9 +1750,9 @@ err:
*
* Function process __IAVF_INIT_VERSION_CHECK driver state.
* When success the state is changed to __IAVF_INIT_GET_RESOURCES
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_version_check(struct iavf_adapter *adapter)
+static void iavf_init_version_check(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1753,7 +1763,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
if (!iavf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
goto err;
}
@@ -1776,10 +1786,10 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
err);
goto err;
}
- adapter->state = __IAVF_INIT_GET_RESOURCES;
-
+ iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1789,9 +1799,9 @@ err:
* Function process __IAVF_INIT_GET_RESOURCES driver state and
* finishes driver initialization procedure.
* When success the state is changed to __IAVF_DOWN
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_get_resources(struct iavf_adapter *adapter)
+static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1819,7 +1829,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
*/
iavf_shutdown_adminq(hw);
dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
- return 0;
+ return;
}
if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
@@ -1847,7 +1857,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
eth_hw_addr_random(netdev);
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -1893,7 +1903,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
rtnl_unlock();
@@ -1911,7 +1921,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
- return err;
+ return;
err_mem:
iavf_free_rss(adapter);
err_register:
@@ -1922,7 +1932,7 @@ err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1941,9 +1951,50 @@ static void iavf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
- adapter->state = __IAVF_COMM_FAILED;
+ iavf_change_state(adapter, __IAVF_COMM_FAILED);
+
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
+ adapter->state != __IAVF_RESETTING) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ }
switch (adapter->state) {
+ case __IAVF_STARTUP:
+ iavf_startup(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_VERSION_CHECK:
+ iavf_init_version_check(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_GET_RESOURCES:
+ iavf_init_get_resources(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_FAILED:
+ if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to communicate with PF; waiting before retry\n");
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ iavf_shutdown_adminq(hw);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, (5 * HZ));
+ return;
+ }
+ /* Try again from failed step*/
+ iavf_change_state(adapter, adapter->last_state);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ return;
case __IAVF_COMM_FAILED:
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -1952,23 +2003,19 @@ static void iavf_watchdog_task(struct work_struct *work)
/* A chance for redemption! */
dev_err(&adapter->pdev->dev,
"Hardware came out of reset. Attempting reinit.\n");
- adapter->state = __IAVF_STARTUP;
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- queue_delayed_work(iavf_wq, &adapter->init_task, 10);
- mutex_unlock(&adapter->crit_lock);
- /* Don't reschedule the watchdog, since we've restarted
- * the init task. When init_task contacts the PF and
+ /* When init task contacts the PF and
* gets everything set up again, it'll restart the
* watchdog for us. Down, boy. Sit. Stay. Woof.
*/
- return;
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
- goto watchdog_done;
+ return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
@@ -1991,38 +2038,40 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
+ if (adapter->state == __IAVF_RUNNING)
+ iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
mutex_unlock(&adapter->crit_lock);
return;
default:
- goto restart_watchdog;
+ return;
}
- /* check for hw reset */
+ /* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
queue_work(iavf_wq, &adapter->reset_task);
- goto watchdog_done;
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, HZ * 2);
+ return;
}
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
-watchdog_done:
- if (adapter->state == __IAVF_RUNNING ||
- adapter->state == __IAVF_COMM_FAILED)
- iavf_detect_recover_hung(&adapter->vsi);
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
- queue_work(iavf_wq, &adapter->adminq_task);
}
static void iavf_disable_vf(struct iavf_adapter *adapter)
@@ -2081,7 +2130,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
adapter->netdev->flags &= ~IFF_UP;
mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}
@@ -2191,7 +2240,7 @@ continue_reset:
}
iavf_irq_disable(adapter);
- adapter->state = __IAVF_RESETTING;
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
/* free the Tx/Rx rings and descriptors, might be better to just
@@ -2291,11 +2340,14 @@ continue_reset:
iavf_configure(adapter);
+ /* iavf_up_complete() will switch device back
+ * to __IAVF_RUNNING
+ */
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
} else {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
mutex_unlock(&adapter->client_lock);
@@ -2305,6 +2357,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
+ if (running)
+ iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -3226,6 +3280,13 @@ static int iavf_open(struct net_device *netdev)
goto err_unlock;
}
+ if (adapter->state == __IAVF_RUNNING &&
+ !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
+ dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
+ err = 0;
+ goto err_unlock;
+ }
+
/* allocate transmit descriptors */
err = iavf_setup_all_tx_resources(adapter);
if (err)
@@ -3297,7 +3358,7 @@ static int iavf_close(struct net_device *netdev)
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
iavf_down(adapter);
- adapter->state = __IAVF_DOWN_PENDING;
+ iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
@@ -3631,71 +3692,13 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_init_task - worker thread to perform delayed initialization
- * @work: pointer to work_struct containing our data
- *
- * This task completes the work that was begun in probe. Due to the nature
- * of VF-PF communications, we may need to wait tens of milliseconds to get
- * responses back from the PF. Rather than busy-wait in probe and bog down the
- * whole system, we'll do it in a task so we can sleep.
- * This task only runs during driver init. Once we've established
- * communications with the PF driver and set up our netdev, the watchdog
- * takes over.
- **/
-static void iavf_init_task(struct work_struct *work)
-{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- init_task.work);
- struct iavf_hw *hw = &adapter->hw;
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- return;
- }
- switch (adapter->state) {
- case __IAVF_STARTUP:
- if (iavf_startup(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_VERSION_CHECK:
- if (iavf_init_version_check(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_GET_RESOURCES:
- if (iavf_init_get_resources(adapter) < 0)
- goto init_failed;
- goto out;
- default:
- goto init_failed;
- }
-
- queue_delayed_work(iavf_wq, &adapter->init_task,
- msecs_to_jiffies(30));
- goto out;
-init_failed:
- if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
- dev_err(&adapter->pdev->dev,
- "Failed to communicate with PF; waiting before retry\n");
- adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
- iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- goto out;
- }
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
-out:
- mutex_unlock(&adapter->crit_lock);
-}
-
-/**
* iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
static void iavf_shutdown(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
+ struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
@@ -3705,7 +3708,7 @@ static void iavf_shutdown(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
mutex_unlock(&adapter->crit_lock);
@@ -3778,7 +3781,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
/* Call save state here because it relies on the adapter struct. */
pci_save_state(pdev);
@@ -3822,8 +3825,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
- queue_delayed_work(iavf_wq, &adapter->init_task,
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
@@ -3880,10 +3882,11 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
static int __maybe_unused iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter;
u32 err;
+ adapter = iavf_pdev_to_adapter(pdev);
+
pci_set_master(pdev);
rtnl_lock();
@@ -3902,7 +3905,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
queue_work(iavf_wq, &adapter->reset_task);
- netif_device_attach(netdev);
+ netif_device_attach(adapter->netdev);
return err;
}
@@ -3918,8 +3921,9 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
+ enum iavf_state_t prev_state = adapter->last_state;
+ struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_adv_rss *rss, *rsstmp;
@@ -3929,8 +3933,8 @@ static void iavf_remove(struct pci_dev *pdev)
int err;
/* Indicate we are in remove and not to run reset_task */
mutex_lock(&adapter->remove_lock);
- cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -3954,13 +3958,25 @@ static void iavf_remove(struct pci_dev *pdev)
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
+
+ /* In case we enter iavf_remove from erroneous state, free traffic irqs
+ * here, so as to not cause a kernel crash, when calling
+ * iavf_reset_interrupt_capability.
+ */
+ if ((adapter->last_state == __IAVF_RESETTING &&
+ prev_state != __IAVF_DOWN) ||
+ (adapter->last_state == __IAVF_RUNNING &&
+ !(netdev->flags & IFF_UP)))
+ iavf_free_traffic_irqs(adapter);
+
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 3c735968e1b8..8c3f0f77cb57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1685,7 +1685,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -1716,7 +1716,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
/* refresh current mac address if changed */
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
@@ -1735,7 +1735,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
if (adapter->state == __IAVF_DOWN_PENDING) {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 4f538cdf42c1..c36faa7d1471 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -26,10 +26,13 @@ ice-y := ice_main.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \
- ice_ethtool.o
+ ice_ethtool.o \
+ ice_repr.o \
+ ice_tc_lib.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 3c4f08d20414..bf4ecd9a517c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,10 +34,15 @@
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/ip.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
@@ -55,6 +60,7 @@
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
+#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
@@ -63,6 +69,8 @@
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
+#include "ice_repr.h"
+#include "ice_eswitch.h"
#include "ice_lag.h"
#define ICE_BAR0 0
@@ -84,6 +92,7 @@
#define ICE_FDIR_MSIX 2
#define ICE_RDMA_NUM_AEQ_MSIX 4
#define ICE_MIN_RDMA_MSIX 2
+#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -101,6 +110,10 @@
#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
+
+#define ICE_CHNL_START_TC 1
+#define ICE_CHNL_MAX_TC 16
+
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -118,14 +131,24 @@
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
+/* Minimum BW limit is 500 Kbps for any scheduler node */
+#define ICE_MIN_BW_LIMIT 500
+/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
+ * use it to convert user specified BW limit into Kbps
+ */
+#define ICE_BW_KBPS_DIVISOR 125
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each Tx/Rx ring in a VSI */
+/* Macros for each Tx/Xdp/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
+#define ice_for_each_xdp_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
+
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
@@ -139,6 +162,9 @@
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ice_for_each_chnl_tc(i) \
+ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
+
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
@@ -158,6 +184,29 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+enum ice_feature {
+ ICE_F_DSCP,
+ ICE_F_SMA_CTRL,
+ ICE_F_MAX
+};
+
+DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+
+struct ice_channel {
+ struct list_head list;
+ u8 type;
+ u16 sw_id;
+ u16 base_q;
+ u16 num_rxq;
+ u16 num_txq;
+ u16 vsi_num;
+ u8 ena_tc;
+ struct ice_aqc_vsi_props info;
+ u64 max_tx_rate;
+ u64 min_tx_rate;
+ struct ice_vsi *ch_vsi;
+};
+
struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */
@@ -175,7 +224,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* Tx map */
+ u16 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -266,8 +315,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* Rx ring array */
- struct ice_ring **tx_rings; /* Tx ring array */
+ struct ice_rx_ring **rx_rings; /* Rx ring array */
+ struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -306,10 +355,6 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
- /* devlink port data */
- struct devlink_port devlink_port;
- bool devlink_port_registered;
-
u16 max_frame;
u16 rx_buf_len;
@@ -344,11 +389,42 @@ struct ice_vsi {
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
- struct ice_ring **xdp_rings; /* XDP ring array */
+ struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct net_device **target_netdevs;
+
+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
+
+ /* Channel Specific Fields */
+ struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
+ u16 cnt_q_avail;
+ u16 next_base_q; /* next queue to be used for channel setup */
+ struct list_head ch_list;
+ u16 num_chnl_rxq;
+ u16 num_chnl_txq;
+ u16 ch_rss_size;
+ u16 num_chnl_fltr;
+ /* store away rss size info before configuring ADQ channels so that,
+ * it can be used after tc-qdisc delete, to get back RSS setting as
+ * they were before
+ */
+ u16 orig_rss_size;
+ /* this keeps tracks of all enabled TC with and without DCB
+ * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
+ * information
+ */
+ u8 all_numtc;
+ u16 all_enatc;
+
+ /* store away TC info, to be used for rebuild logic */
+ u8 old_numtc;
+ u16 old_ena_tc;
+
+ struct ice_channel *ch;
+
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
@@ -377,6 +453,8 @@ struct ice_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
+ struct ice_channel *ch;
+
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
@@ -395,11 +473,14 @@ enum ice_pf_flags {
ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
+ ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
+ ICE_FLAG_CLS_FLOWER,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_MOD_POWER_UNSUPPORTED,
+ ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
@@ -408,6 +489,12 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+struct ice_switchdev_info {
+ struct ice_vsi *control_vsi;
+ struct ice_vsi *uplink_vsi;
+ bool is_running;
+};
+
struct ice_agg_node {
u32 agg_id;
#define ICE_MAX_VSIS_IN_AGG_NODE 64
@@ -421,6 +508,9 @@ struct ice_pf {
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -434,6 +524,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ u16 eswitch_mode; /* current mode of eswitch */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
@@ -443,6 +534,7 @@ struct ice_pf {
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
@@ -495,12 +587,19 @@ struct ice_pf {
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
+ /* count of tc_flower filters specific to channel (aka where filter
+ * action is "hw_tc <tc_num>")
+ */
+ u16 num_dmac_chnl_fltrs;
+ struct hlist_head tc_flower_fltr_list;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
+ struct ice_switchdev_info switchdev;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -512,9 +611,28 @@ struct ice_pf {
struct ice_netdev_priv {
struct ice_vsi *vsi;
+ struct ice_repr *repr;
+ /* indirect block callbacks on registered higher level devices
+ * (e.g. tunnel devices)
+ *
+ * tc_indr_block_cb_priv_list is used to look up indirect callback
+ * private data
+ */
+ struct list_head tc_indr_block_priv_list;
};
/**
+ * ice_vector_ch_enabled
+ * @qv: pointer to q_vector, can be NULL
+ *
+ * This function returns true if vector is channel enabled otherwise false
+ */
+static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
+{
+ return !!qv->ch; /* Enable it to run with TC */
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
@@ -556,25 +674,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline void ice_set_ring_xdp(struct ice_ring *ring)
+static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}
/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: ring to use
+ * @ring: Rx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (ice_ring_is_xdp(ring))
- qid -= vsi->num_xdp_txq;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
+}
+
+/**
+ * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: Tx ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
+ * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ */
+static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid;
+
+ qid = ring->q_index - vsi->num_xdp_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
@@ -597,6 +732,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
}
/**
+ * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
+ * @np: private netdev structure
+ */
+static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
+{
+ /* In case of port representor return source port VSI. */
+ if (np->repr)
+ return np->repr->src_vsi;
+ else
+ return np->vsi;
+}
+
+/**
* ice_get_ctrl_vsi - Get the control VSI
* @pf: PF instance
*/
@@ -610,6 +758,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_is_switchdev_running - check if switchdev is configured
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
+ * and switchdev is configured, false otherwise.
+ */
+static inline bool ice_is_switchdev_running(struct ice_pf *pf)
+{
+ return pf->switchdev.is_running;
+}
+
+/**
* ice_set_sriov_cap - enable SRIOV in PF flags
* @pf: PF struct
*/
@@ -633,11 +793,37 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+/**
+ * ice_is_adq_active - any active ADQs
+ * @pf: pointer to PF
+ *
+ * This function returns true if there are any ADQs configured (which is
+ * determined by looking at VSI type (which should be VSI_PF), numtc, and
+ * TC_MQPRIO flag) otherwise return false
+ */
+static inline bool ice_is_adq_active(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ /* is ADQ configured */
+ if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return true;
+
+ return false;
+}
+
bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
+int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
@@ -648,6 +834,7 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 21b4c7cd6f05..4eef3488d86f 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -233,6 +233,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_RECIPE 0x05
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
@@ -241,6 +242,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
@@ -474,6 +476,53 @@ struct ice_aqc_vsi_props {
#define ICE_MAX_NUM_RECIPES 64
+/* Add/Get Recipe (indirect 0x0290/0x0292) */
+struct ice_aqc_add_get_recipe {
+ __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */
+ __le16 return_index; /* Input, used for Get cmd only */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_recipe_content {
+ u8 rid;
+#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
+#define ICE_AQ_SW_ID_LKUP_IDX 0
+ u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
+#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
+ __le16 mask[5];
+ u8 result_indx;
+#define ICE_AQ_RECIPE_RESULT_DATA_S 0
+#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S)
+#define ICE_AQ_RECIPE_RESULT_EN BIT(7)
+ u8 rsvd0[3];
+ u8 act_ctrl_join_priority;
+ u8 act_ctrl_fwd_priority;
+ u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+ u8 rsvd1;
+ __le32 dflt_act;
+};
+
+struct ice_aqc_recipe_data_elem {
+ u8 recipe_indx;
+ u8 resp_bits;
+ u8 rsvd0[2];
+ u8 recipe_bitmap[8];
+ u8 rsvd1[4];
+ struct ice_aqc_recipe_content content;
+ u8 rsvd2[20];
+};
+
+/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */
+struct ice_aqc_recipe_to_profile {
+ __le16 profile_id;
+ u8 rsvd[6];
+ DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+};
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -671,6 +720,16 @@ struct ice_aqc_sw_rules_elem {
} __packed pdata;
};
+/* Query PFC Mode (direct 0x0302)
+ * Set PFC Mode (direct 0x0303)
+ */
+struct ice_aqc_set_query_pfc_mode {
+ u8 pfc_mode;
+/* For Query Command response, reserved in all other cases */
+#define ICE_AQC_PFC_VLAN_BASED_PFC 1
+#define ICE_AQC_PFC_DSCP_BASED_PFC 2
+ u8 rsvd[15];
+};
/* Get Default Topology (indirect 0x0400) */
struct ice_aqc_get_topo {
u8 port_num;
@@ -1126,6 +1185,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 link_cfg_err;
#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
@@ -1209,6 +1269,7 @@ struct ice_aqc_set_event_mask {
#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7)
#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
u8 reserved1[6];
};
@@ -1220,7 +1281,7 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
-struct ice_aqc_link_topo_addr {
+struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
@@ -1246,6 +1307,10 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
u8 index;
+};
+
+struct ice_aqc_link_topo_addr {
+ struct ice_aqc_link_topo_params topo_params;
__le16 handle;
#define ICE_AQC_LINK_TOPO_HANDLE_S 0
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
@@ -1268,6 +1333,7 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
@@ -1281,6 +1347,16 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ice_aqc_gpio {
+ __le16 gpio_ctrl_handle;
+#define ICE_AQC_GPIO_HANDLE_S 0
+#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ice_aqc_sff_eeprom {
u8 lport_num;
@@ -1922,10 +1998,13 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_add_get_recipe add_get_recipe;
+ struct ice_aqc_recipe_to_profile recipe_to_profile;
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
@@ -1936,6 +2015,7 @@ struct ice_aq_desc {
struct ice_aqc_nvm_pkg_data pkg_data;
struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
struct ice_aqc_lldp_stop lldp_stop;
@@ -2033,6 +2113,12 @@ enum ice_adminq_opc {
ice_aqc_opc_update_vsi = 0x0211,
ice_aqc_opc_free_vsi = 0x0213,
+ /* recipe commands */
+ ice_aqc_opc_add_recipe = 0x0290,
+ ice_aqc_opc_recipe_to_profile = 0x0291,
+ ice_aqc_opc_get_recipe = 0x0292,
+ ice_aqc_opc_get_recipe_to_profile = 0x0293,
+
/* switch rules population commands */
ice_aqc_opc_add_sw_rules = 0x02A0,
ice_aqc_opc_update_sw_rules = 0x02A1,
@@ -2040,6 +2126,10 @@ enum ice_adminq_opc {
ice_aqc_opc_clear_pf_cfg = 0x02A4,
+ /* DCB commands */
+ ice_aqc_opc_query_pfc_mode = 0x0302,
+ ice_aqc_opc_set_pfc_mode = 0x0303,
+
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
@@ -2064,6 +2154,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_set_gpio = 0x06EC,
+ ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 88d98c9e5f91..5daade32ea62 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
if (!vsi || vsi->type != ICE_VSI_PF)
return;
- arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
GFP_KERNEL);
if (!arfs_fltr_list)
return;
@@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
return -EINVAL;
base_idx = vsi->base_vector;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pf->msix_entries[base_idx + i].vector)) {
ice_free_cpu_rx_rmap(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c36057efc7ae..fa6cd63cbf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC;
+ q_vector->tx.type = ICE_TX_CONTAINER;
+ q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF)
goto out;
@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ tx_ring->q_vector = NULL;
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ rx_ring->q_vector = NULL;
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -201,15 +204,18 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
}
/**
- * ice_calc_q_handle - calculate the queue handle
+ * ice_calc_txq_handle - calculate the queue handle
* @vsi: VSI that ring belongs to
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
+ if (ring->ch)
+ return ring->q_index - ring->ch->base_q;
+
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
@@ -218,13 +224,37 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
}
/**
+ * ice_eswitch_calc_txq_handle
+ * @ring: pointer to ring which unique index is needed
+ *
+ * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
+ * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
+ * here by finding index in vsi->tx_rings of this ring.
+ *
+ * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
+ * because VSI is get from ring->vsi, so it has to be present in this VSI.
+ */
+static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ int i;
+
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i] == ring)
+ return i;
+ }
+
+ return ICE_INVAL_Q_INDEX;
+}
+
+/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
* This enables/disables XPS for a given Tx descriptor ring
* based on the TCs enabled for the VSI that ring belongs to.
*/
-static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
+static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
{
if (!ring->q_vector || !ring->netdev)
return;
@@ -246,7 +276,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
* Configure the Tx descriptor ring in TLAN context.
*/
static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
@@ -258,7 +288,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
- ice_set_cgd_num(tlan_ctx, ring);
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -273,19 +303,28 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_LB:
case ICE_VSI_CTRL:
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ if (ring->ch)
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ else
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
/* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ if (ring->ch)
+ tlan_ctx->src_vsi = ring->ch->vsi_num;
+ else
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
/* Restrict Tx timestamps to the PF VSI */
switch (vsi->type) {
@@ -312,7 +351,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*
* Returns the offset value for ring into the data buffer.
*/
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
@@ -328,7 +367,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
@@ -439,7 +478,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_ring *ring)
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring);
@@ -660,16 +699,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
+ tx_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = tx_ring;
}
tx_rings_rem -= tx_rings_per_v;
@@ -677,16 +716,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
+ q_vector->rx.rx_ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
+ rx_ring->next = q_vector->rx.rx_ring;
+ q_vector->rx.rx_ring = rx_ring;
}
rx_rings_rem -= rx_rings_per_v;
}
@@ -711,12 +750,13 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @qg_buf: queue group buffer
*/
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
+ struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -746,10 +786,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+ if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ring->q_handle = ice_eswitch_calc_txq_handle(ring);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
+ if (ring->q_handle == ICE_INVAL_Q_INDEX)
+ return -ENODEV;
+ } else {
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
+ }
+
+ if (ch)
+ status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
+ else
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
ice_stat_str(status));
@@ -870,7 +923,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
*/
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
@@ -927,9 +980,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
+ struct ice_channel *ch = ring->ch;
u8 tc;
if (IS_ENABLED(CONFIG_DCB))
@@ -940,6 +994,11 @@ ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
txq_meta->q_id = ring->reg_idx;
txq_meta->q_teid = ring->txq_teid;
txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
+ if (ch) {
+ txq_meta->vsi_idx = ch->ch_vsi->idx;
+ txq_meta->tc = 0;
+ } else {
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 20e1c29aa68a..b67dca417acb 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,7 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_ring *ring);
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
@@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2fb81e359cdf..b3066d0fea8b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810;
break;
@@ -70,6 +72,27 @@ bool ice_is_e810(struct ice_hw *hw)
}
/**
+ * ice_is_e810t
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810T based, false if not.
+ */
+bool ice_is_e810t(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
+ hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -240,11 +263,13 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
- ICE_AQC_LINK_TOPO_NODE_CTX_S);
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
/* set node type */
- cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+ cmd->addr.topo_params.node_type_ctx |=
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
@@ -568,6 +593,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
return ICE_ERR_NO_MEMORY;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
+ sw->prof_res_bm_init = 0;
status = ice_init_def_sw_recp(hw);
if (status) {
@@ -594,17 +620,42 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
- recps = hw->switch_info->recp_list;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
- struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+ recps = sw->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
recps[i].root_rid = i;
- mutex_destroy(&recps[i].filt_rule_lock);
- list_for_each_entry_safe(lst_itr, tmp_entry,
- &recps[i].filt_rules, list_entry) {
- list_del(&lst_itr->list_entry);
- devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ list_for_each_entry_safe(rg_entry, tmprg_entry,
+ &recps[i].rg_list, l_entry) {
+ list_del(&rg_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), rg_entry);
+ }
+
+ if (recps[i].adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
}
+ if (recps[i].root_buf)
+ devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -4767,6 +4818,64 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
}
/**
+ * ice_aq_set_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
+ */
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
+ * the topology
+ */
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = !!cmd->gpio_val;
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index fb16070f02e2..65c1b3244264 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -183,6 +183,7 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
+bool ice_is_e810t(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -192,6 +193,12 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd);
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd);
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 849fcf605479..241427cd9bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -736,6 +737,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
}
/**
+ * ice_aq_set_pfc_mode - Set PFC mode
+ * @hw: pointer to the HW struct
+ * @pfc_mode: value of PFC mode to set
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ call configures the PFC mode to DSCP-based PFC mode or
+ * VLAN-based PFC (0x0303)
+ */
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
+ return -EINVAL;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
+
+ cmd->pfc_mode = pfc_mode;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
+ * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
+ * been executed, check if cmd->pfc_mode is what was requested. If not,
+ * return an error.
+ */
+ if (cmd->pfc_mode != pfc_mode)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
* @pi: port information structure
@@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
}
/**
- * ice_add_dcb_tlv - Add all IEEE TLVs
+ * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of data to convert to TLV
+ */
+static void
+ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_UP_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_DSCP2UP);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* bytes 0 - 63 - IPv4 DSCP2UP LUT */
+ for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ /* IPv4 mapping */
+ buf[i] = dcbcfg->dscp_map[i];
+ /* IPv6 mapping */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i];
+ }
+
+ /* byte 64 - IPv4 untagged traffic */
+ buf[i] = 0;
+
+ /* byte 144 - IPv6 untagged traffic */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = 0;
+}
+
+#define ICE_BYTES_PER_TC 8
+/**
+ * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV
+ * @tlv: location to build the TLV data
+ */
+static void
+ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_ENF_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_ENFORCE);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */
+ memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC));
+}
+
+/**
+ * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of the data to convert to TLV
+ */
+static void
+ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u8 offset = 0;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_TC_BW_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_TCBW);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octect after subtype
+ * ----------------------------
+ * | RSV | CBS | RSV | Max TCs |
+ * | 1b | 1b | 3b | 3b |
+ * ----------------------------
+ */
+ etscfg = &dcbcfg->etscfg;
+ buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+
+ /* bytes 1 - 4 reserved */
+ offset = 5;
+
+ /* TC BW table
+ * bytes 0 - 7 for TC 0 - 7
+ *
+ * TSA Assignment table
+ * bytes 8 - 15 for TC 0 - 7
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ buf[offset] = etscfg->tcbwtable[i];
+ buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ */
+static void
+ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_PFC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf[0] = dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena & 0xF;
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs
* @tlv: Fill TLV data in IEEE format
* @dcbcfg: Local store which holds the DCB Config
* @tlvid: Type of IEEE TLV
@@ -1218,21 +1391,41 @@ static void
ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
u16 tlvid)
{
- switch (tlvid) {
- case ICE_IEEE_TLV_ID_ETS_CFG:
- ice_add_ieee_ets_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_ETS_REC:
- ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_PFC_CFG:
- ice_add_ieee_pfc_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_APP_PRI:
- ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
- break;
- default:
- break;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* pfc_mode == ICE_QOS_MODE_DSCP */
+ switch (tlvid) {
+ case ICE_TLV_ID_DSCP_UP:
+ ice_add_dscp_up_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_ENF:
+ ice_add_dscp_enf_tlv(tlv);
+ break;
+ case ICE_TLV_ID_DSCP_TC_BW:
+ ice_add_dscp_tc_bw_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_TO_PFC:
+ ice_add_dscp_pfc_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d7e5e6178a21..9b6f87a889a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -22,6 +22,14 @@
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
+
+#define ICE_DSCP_OUI 0xFFFFFF
+#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
+#define ICE_DSCP_SUBTYPE_ENFORCE 0x42
+#define ICE_DSCP_SUBTYPE_TCBW 0x43
+#define ICE_DSCP_SUBTYPE_PFC 0x44
+#define ICE_DSCP_IPV6_OFFSET 80
+
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
@@ -78,11 +86,20 @@
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+#define ICE_TLV_ID_DSCP_UP 3
+#define ICE_TLV_ID_DSCP_ENF 4
+#define ICE_TLV_ID_DSCP_TC_BW 5
+#define ICE_TLV_ID_DSCP_TO_PFC 6
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
+#define ICE_DSCP_UP_TLV_LEN 148
+#define ICE_DSCP_ENF_TLV_LEN 132
+#define ICE_DSCP_TC_BW_TLV_LEN 25
+#define ICE_DSCP_PFC_TLV_LEN 6
+
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
@@ -120,6 +137,7 @@ struct ice_cee_app_prio {
u8 prio_map;
} __packed;
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 926cf748c5ec..a72e18320a22 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -5,52 +5,10 @@
#include "ice_dcb_nl.h"
/**
- * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
- * @vsi: the VSI being configured
- * @ena_tc: TC map to be enabled
- */
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
-{
- struct net_device *netdev = vsi->netdev;
- struct ice_pf *pf = vsi->back;
- struct ice_dcbx_cfg *dcbcfg;
- u8 netdev_tc;
- int i;
-
- if (!netdev)
- return;
-
- if (!ena_tc) {
- netdev_reset_tc(netdev);
- return;
- }
-
- if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
- return;
-
- dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
-
- ice_for_each_traffic_class(i)
- if (vsi->tc_cfg.ena_tc & BIT(i))
- netdev_set_tc_queue(netdev,
- vsi->tc_cfg.tc_info[i].netdev_tc,
- vsi->tc_cfg.tc_info[i].qcount_tx,
- vsi->tc_cfg.tc_info[i].qoffset);
-
- for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- u8 ets_tc = dcbcfg->etscfg.prio_table[i];
-
- /* Get the mapped netdev TC# for the UP */
- netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
- netdev_set_prio_tc_map(netdev, i, netdev_tc);
- }
-}
-
-/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
@@ -179,6 +137,67 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_get_first_droptc - returns number of first droptc
+ * @vsi: used to find the first droptc
+ *
+ * This function returns the value of first_droptc.
+ * When DCB is enabled, first droptc information is derived from enabled_tc
+ * and PFC enabled bits. otherwise this function returns 0 as there is one
+ * TC without DCB (tc0)
+ */
+static u8 ice_get_first_droptc(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ u8 num_tc, ena_tc_map, pfc_ena_map;
+ u8 i;
+
+ num_tc = ice_dcb_get_num_tc(cfg);
+
+ /* get bitmap of enabled TCs */
+ ena_tc_map = ice_dcb_get_ena_tc(cfg);
+
+ /* get bitmap of PFC enabled TCs */
+ pfc_ena_map = cfg->pfc.pfcena;
+
+ /* get first TC that is not PFC enabled */
+ for (i = 0; i < num_tc; i++) {
+ if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
+ dev_dbg(dev, "first drop tc = %d\n", i);
+ return i;
+ }
+ }
+
+ dev_dbg(dev, "first drop tc = 0\n");
+ return 0;
+}
+
+/**
+ * ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration
+ * @vsi: pointer to the VSI instance
+ */
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ break;
+ case ICE_VSI_CHNL:
+ vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
+ vsi->tc_cfg.numtc = 1;
+ break;
+ case ICE_VSI_CTRL:
+ case ICE_VSI_LB:
+ default:
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ }
+}
+
+/**
* ice_dcb_get_tc - Get the TC associated with the queue
* @vsi: ptr to the VSI
* @queue_index: queue number associated with VSI
@@ -194,17 +213,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{
- struct ice_ring *tx_ring, *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 qoffset, qcount;
int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
- for (i = 0; i < vsi->num_txq; i++) {
+ ice_for_each_txq(vsi, i) {
tx_ring = vsi->tx_rings[i];
tx_ring->dcb_tc = 0;
}
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i];
rx_ring->dcb_tc = 0;
}
@@ -217,11 +237,68 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
- for (i = qoffset; i < (qoffset + qcount); i++) {
- tx_ring = vsi->tx_rings[i];
- rx_ring = vsi->rx_rings[i];
- tx_ring->dcb_tc = n;
- rx_ring->dcb_tc = n;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->tx_rings[i]->dcb_tc = n;
+
+ qcount = vsi->tc_cfg.tc_info[n].qcount_rx;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->rx_rings[i]->dcb_tc = n;
+ }
+ /* applicable only if "all_enatc" is set, which will be set from
+ * setup_tc method as part of configuring channels
+ */
+ if (vsi->all_enatc) {
+ u8 first_droptc = ice_get_first_droptc(vsi);
+
+ /* When DCB is configured, TC for ADQ queues (which are really
+ * PF queues) should be the first drop TC of the main VSI
+ */
+ ice_for_each_chnl_tc(n) {
+ if (!(vsi->all_enatc & BIT(n)))
+ break;
+
+ qoffset = vsi->mqprio_qopt.qopt.offset[n];
+ qcount = vsi->mqprio_qopt.qopt.count[n];
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ vsi->tx_rings[i]->dcb_tc = first_droptc;
+ vsi->rx_rings[i]->dcb_tc = first_droptc;
+ }
+ }
+ }
+}
+
+/**
+ * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
+ * @pf: pointer to the PF instance
+ * @ena: true to enable VSIs, false to disable
+ * @locked: true if caller holds RTNL lock, false otherwise
+ *
+ * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV
+ * and CHNL need to be brought down. Following completion of DCB configuration
+ * the VSIs that were downed need to be brought up again. This helper function
+ * does both.
+ */
+static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (!vsi)
+ continue;
+
+ switch (vsi->type) {
+ case ICE_VSI_CHNL:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_PF:
+ if (ena)
+ ice_ena_vsi(vsi, locked);
+ else
+ ice_dis_vsi(vsi, locked);
+ break;
+ default:
+ continue;
}
}
}
@@ -330,7 +407,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
*/
if (!locked)
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
@@ -358,7 +437,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
if (!locked)
rtnl_unlock();
free_cfg:
@@ -544,7 +624,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
* @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -673,6 +753,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_dcb_noncontig_cfg(pf);
}
+ } else if (vsi->type == ICE_VSI_CHNL) {
+ tc_map = BIT(ice_get_first_droptc(vsi));
} else {
tc_map = ICE_DFLT_TRAFFIC_CLASS;
}
@@ -683,6 +765,12 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
vsi->idx);
continue;
}
+ /* no need to proceed with remaining cfg if it is CHNL
+ * or switchdev VSI
+ */
+ if (vsi->type == ICE_VSI_CHNL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ continue;
ice_vsi_map_rings_to_vectors(vsi);
if (vsi->type == ICE_VSI_PF)
@@ -726,6 +814,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (err)
+ dev_info(dev, "Failed to set VLAN PFC mode\n");
+
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
dev_err(dev, "Failed to set local DCB config %d\n",
@@ -814,7 +907,7 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* tag will already be configured with the correct ID and priority bits
*/
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -851,7 +944,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
- struct ice_vsi *pf_vsi;
u8 mib_type;
int ret;
@@ -927,14 +1019,9 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
- pf_vsi = ice_get_main_vsi(pf);
- if (!pf_vsi) {
- dev_dbg(dev, "PF VSI doesn't exist\n");
- goto out;
- }
-
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -945,7 +1032,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
unlock_rtnl:
rtnl_unlock();
out:
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 261b6e2ed7bc..4c421c842a13 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -15,7 +15,7 @@
#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
@@ -28,13 +28,11 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
-
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
@@ -49,9 +47,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
}
static inline void
-ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc)
{
- tlan_ctx->cgd_num = ring->dcb_tc;
+ tlan_ctx->cgd_num = dcb_tc;
}
static inline bool ice_is_dcb_active(struct ice_pf *pf)
@@ -59,9 +57,21 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf)
return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
+
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode;
+}
+
#else
static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
+static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+}
+
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
@@ -95,7 +105,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
}
static inline int
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
return 0;
@@ -113,12 +123,16 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
return false;
}
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return 0;
+}
+
static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
-static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
+static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 4180f1f35fb8..7fdeb411b6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *new_cfg;
int bwcfg = 0, bwrec = 0;
- int err, i, max_tc = 0;
+ int err, i;
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
bwcfg += ets->tc_tx_bw[i];
new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
- new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
- if (ets->prio_tc[i] > max_tc)
- max_tc = ets->prio_tc[i];
+ if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ /* in DSCP mode up->tc mapping cannot change */
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
bwrec += ets->tc_reco_bw[i];
new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
- new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
if (ice_dcb_bwchk(pf, new_cfg)) {
@@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
goto ets_out;
}
- max_tc = pf->hw.func_caps.common_cap.maxtc;
-
- new_cfg->etscfg.maxtcs = max_tc;
-
- if (!bwcfg)
- new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE)
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- else
+ } else {
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
@@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
return false;
}
+#define ICE_BYTES_PER_DSCP_VAL 8
+
/**
* ice_dcbnl_setapp - set local IEEE App config
* @netdev: relevant netdev struct
@@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcb_app_priority_table new_app;
struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ u8 max_tc;
int ret;
- if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ /* ONLY DSCP APP TLVs have operational significance */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
- mutex_lock(&pf->tc_mutex);
+ /* only allow APP TLVs in SW Mode */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
+ return -EINVAL;
+ }
- new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
- old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (!ice_is_feature_supported(pf, ICE_F_DSCP))
+ return -EOPNOTSUPP;
- if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
- ret = -EINVAL;
- goto setapp_out;
+ if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ netdev_err(netdev, "DSCP value 0x%04X out of range\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
+ if (app->priority >= max_tc) {
+ netdev_err(netdev, "TC %d out of range, max TC %d\n",
+ app->priority, max_tc);
+ return -EINVAL;
}
+ /* grab TC mutex */
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
ret = dcb_ieee_setapp(netdev, app);
if (ret)
goto setapp_out;
+ if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
+ netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
+ app->protocol);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ netdev_err(netdev, "Failed to delete re-mapping TLV\n");
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
new_app.selector = app->selector;
new_app.prot_id = app->protocol;
new_app.priority = app->priority;
- if (ice_dcbnl_find_app(old_cfg, &new_app)) {
- ret = 0;
- goto setapp_out;
- }
+ /* If port is not in DSCP mode, need to set */
+ if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ int i, j;
+
+ /* set DSCP mode */
+ ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
+ ret);
+ goto setapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
+
+ /* set default DSCP QoS values */
+ new_cfg->etscfg.willing = 0;
+ new_cfg->pfc.pfccap = max_tc;
+ new_cfg->pfc.willing = 0;
+
+ for (i = 0; i < max_tc; i++)
+ for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
+ int dscp, offset;
+
+ dscp = (i * max_tc) + j;
+ offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
+
+ new_cfg->dscp_map[dscp] = i;
+ /* if less that 8 TCs supported */
+ if (max_tc < ICE_MAX_TRAFFIC_CLASS)
+ new_cfg->dscp_map[dscp + offset] = i;
+ }
+
+ new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[0] = 0;
+
+ for (i = 1; i < max_tc; i++) {
+ new_cfg->etscfg.tcbwtable[i] = 0;
+ new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[i] = i;
+ }
+ } /* end of switching to DSCP mode */
+
+ /* apply new mapping for this DSCP value */
+ new_cfg->dscp_map[app->protocol] = app->priority;
new_cfg->app[new_cfg->numapps++] = new_app;
+
ret = ice_pf_dcb_cfg(pf, new_cfg, true);
/* return of zero indicates new cfg applied */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
- if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ else
+ ret = ICE_DCB_NO_HW_CHG;
setapp_out:
mutex_unlock(&pf->tc_mutex);
@@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
unsigned int i, j;
int ret = 0;
- if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
return -EINVAL;
+ }
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- if (old_cfg->numapps <= 1)
- goto delapp_out;
-
ret = dcb_ieee_delapp(netdev, app);
if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- for (i = 1; i < new_cfg->numapps; i++) {
+ for (i = 0; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector &&
app->protocol == new_cfg->app[i].prot_id &&
app->priority == new_cfg->app[i].priority) {
@@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
new_cfg->numapps--;
for (j = i; j < new_cfg->numapps; j++) {
- new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
- new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
- new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
}
- ret = ice_pf_dcb_cfg(pf, new_cfg, true);
- /* return of zero indicates new cfg applied */
+ /* if not a DSCP APP TLV or DSCP is not supported, we are done */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ !ice_is_feature_supported(pf, ICE_F_DSCP)) {
+ ret = ICE_DCB_HW_CHG;
+ goto delapp_out;
+ }
+
+ /* if DSCP TLV, then need to address change in mapping */
+ clear_bit(app->protocol, new_cfg->dscp_mapped);
+ /* remap this DSCP value to default value */
+ new_cfg->dscp_map[app->protocol] = app->protocol %
+ ICE_BYTES_PER_DSCP_VAL;
+
+ /* if the last DSCP mapping just got deleted, need to switch
+ * to L2 VLAN QoS mode
+ */
+ if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
+ ret = ice_aq_set_pfc_mode(&pf->hw,
+ ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
+ ret);
+ goto delapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
+
+ ret = ice_dcb_sw_dflt_cfg(pf, true, true);
+ } else {
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ }
+
+ /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied
+ * and reset needs to be performed
+ */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
+
+ /* if the change was not siginificant enough to actually call
+ * the reconfiguration flow, we still need to tell caller that
+ * their request was successfully handled
+ */
if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ ret = ICE_DCB_HW_CHG;
delapp_out:
mutex_unlock(&pf->tc_mutex);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9d8194671f6a..61dd2f18dee8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -21,6 +21,12 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+#define ICE_SUBDEV_ID_E810T 0x000E
+#define ICE_SUBDEV_ID_E810T2 0x000F
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 14afce82ef63..b9bd9f9472f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_eswitch.h"
#include "ice_fw_update.h"
/* context for devlink info version reporting */
@@ -22,7 +23,7 @@ struct ice_info_ctx {
*
* If a version does not exist, for example when attempting to get the
* inactive version of flash when there is no pending update, the function
- * should leave the buffer in the ctx structure empty and return 0.
+ * should leave the buffer in the ctx structure empty.
*/
static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -35,7 +36,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
}
-static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -45,148 +46,127 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
/* We failed to locate the PBA, so just skip this entry */
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
ice_stat_str(status));
-
- return 0;
}
-static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
- hw->fw_patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
}
-static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
+ hw->api_min_ver, hw->api_patch);
}
-static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
-
- return 0;
}
-static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &pf->hw.flash.orom;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
}
-static int
-ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &ctx->pending_orom;
if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
orom->major, orom->build, orom->patch);
-
- return 0;
}
-static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
}
-static int
-ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
+ nvm->major, nvm->minor);
}
-static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int
-ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
-
- return 0;
}
-static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
- pkg->draft);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
+ pkg->major, pkg->minor, pkg->update, pkg->draft);
}
-static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
-
- return 0;
}
-static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
/* The netlist version fields are BCD formatted */
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
-static int
-ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
@@ -194,21 +174,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int
-ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
@@ -238,8 +215,8 @@ enum ice_version_type {
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
- int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
- int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
@@ -351,24 +328,15 @@ static int ice_devlink_info_get(struct devlink *devlink,
memset(ctx->buf, 0, sizeof(ctx->buf));
- err = ice_devlink_versions[i].getter(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
+ ice_devlink_versions[i].getter(pf, ctx);
/* If the default getter doesn't report a version, use the
* fallback function. This is primarily useful in the case of
* "stored" versions that want to report the same value as the
* running version in the normal case of no pending update.
*/
- if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) {
- err = ice_devlink_versions[i].fallback(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
- }
+ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
+ ice_devlink_versions[i].fallback(pf, ctx);
/* Do not report missing versions */
if (ctx->buf[0] == '\0')
@@ -456,6 +424,8 @@ ice_devlink_flash_update(struct devlink *devlink,
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .eswitch_mode_get = ice_eswitch_mode_get,
+ .eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -482,10 +452,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
return NULL;
/* Add an action to teardown the devlink when unwinding the driver */
- if (devm_add_action(dev, ice_devlink_free, devlink)) {
- devlink_free(devlink);
+ if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
return NULL;
- }
return devlink_priv(devlink);
}
@@ -498,15 +466,58 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_register(struct ice_pf *pf)
+void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- struct device *dev = ice_pf_to_dev(pf);
+
+ devlink_register(devlink);
+}
+
+/**
+ * ice_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
+ *
+ * Releases resources used by devlink and cleans up associated memory.
+ */
+void ice_devlink_unregister(struct ice_pf *pf)
+{
+ devlink_unregister(priv_to_devlink(pf));
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
int err;
- err = devlink_register(devlink);
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.bus.func;
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink registration failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
return err;
}
@@ -514,71 +525,75 @@ int ice_devlink_register(struct ice_pf *pf)
}
/**
- * ice_devlink_unregister - Unregister devlink resources for this PF.
- * @pf: the PF structure to cleanup
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
*
- * Releases resources used by devlink and cleans up associated memory.
+ * Unregisters the devlink_port structure associated with this PF.
*/
-void ice_devlink_unregister(struct ice_pf *pf)
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
+ struct devlink_port *devlink_port;
+
+ devlink_port = &pf->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
- * ice_devlink_create_port - Create a devlink port for this VSI
- * @vsi: the VSI to create a port for
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
*
- * Create and register a devlink_port for this VSI.
+ * Create and register a devlink_port for this VF.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_vsi *vsi)
+int ice_devlink_create_vf_port(struct ice_vf *vf)
{
struct devlink_port_attrs attrs = {};
- struct ice_port_info *pi;
+ struct devlink_port *devlink_port;
struct devlink *devlink;
+ struct ice_vsi *vsi;
struct device *dev;
struct ice_pf *pf;
int err;
- /* Currently we only create devlink_port instances for PF VSIs */
- if (vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_get_vf_vsi(vf);
+ devlink_port = &vf->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.vf = vf->vf_id;
- pf = vsi->back;
+ devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
- dev = ice_pf_to_dev(pf);
- pi = pf->hw.port_info;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pi->lport;
- devlink_port_attrs_set(&vsi->devlink_port, &attrs);
- err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink_port_register failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
return err;
}
- vsi->devlink_port_registered = true;
-
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
- * @vsi: the VSI to cleanup
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
*
- * Unregisters the devlink_port structure associated with this VSI.
+ * Unregisters the devlink_port structure associated with this VF.
*/
-void ice_devlink_destroy_port(struct ice_vsi *vsi)
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
- if (!vsi->devlink_port_registered)
- return;
+ struct devlink_port *devlink_port;
- devlink_port_type_clear(&vsi->devlink_port);
- devlink_port_unregister(&vsi->devlink_port);
+ devlink_port = &vf->devlink_port;
- vsi->devlink_port_registered = false;
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index e07e74426bde..b7f9551e4fc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -6,10 +6,12 @@
struct ice_pf *ice_allocate_pf(struct device *dev);
-int ice_devlink_register(struct ice_pf *pf);
+void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_vsi *vsi);
-void ice_devlink_destroy_port(struct ice_vsi *vsi);
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
new file mode 100644
index 000000000000..d1d7389b0bff
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_eswitch.h"
+#include "ice_fltr.h"
+#include "ice_repr.h"
+#include "ice_devlink.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_eswitch_setup_env - configure switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function adds HW filters configuration specific for switchdev
+ * mode.
+ */
+static int ice_eswitch_setup_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct net_device *uplink_netdev = uplink_vsi->netdev;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_port_info *pi = pf->hw.port_info;
+ bool rule_added = false;
+
+ ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+
+ ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+
+ netif_addr_lock_bh(uplink_netdev);
+ __dev_uc_unsync(uplink_netdev, NULL);
+ __dev_mc_unsync(uplink_netdev, NULL);
+ netif_addr_unlock_bh(uplink_netdev);
+
+ if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ goto err_def_rx;
+
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
+ if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ goto err_def_rx;
+ rule_added = true;
+ }
+
+ if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
+ goto err_def_tx;
+
+ if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_uplink;
+
+ if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_control;
+
+ if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
+ ICE_FLTR_TX,
+ ICE_SINGLE_ACT_LB_ENABLE))
+ goto err_update_action;
+
+ return 0;
+
+err_update_action:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_control:
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_uplink:
+ ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+err_def_tx:
+ if (rule_added)
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+err_def_rx:
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
+ * @pf: pointer to PF struct
+ *
+ * In switchdev number of allocated Tx/Rx rings is equal.
+ *
+ * This function fills q_vectors structures associated with representor and
+ * move each ring pairs to port representor netdevs. Each port representor
+ * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
+ * number of VFs.
+ */
+static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.control_vsi;
+ int q_id;
+
+ ice_for_each_txq(vsi, q_id) {
+ struct ice_repr *repr = pf->vf[q_id].repr;
+ struct ice_q_vector *q_vector = repr->q_vector;
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+
+ q_vector->vsi = vsi;
+ q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
+
+ q_vector->num_ring_tx = 1;
+ q_vector->tx.tx_ring = tx_ring;
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = NULL;
+ tx_ring->netdev = repr->netdev;
+ /* In switchdev mode, from OS stack perspective, there is only
+ * one queue for given netdev, so it needs to be indexed as 0.
+ */
+ tx_ring->q_index = 0;
+
+ q_vector->num_ring_rx = 1;
+ q_vector->rx.rx_ring = rx_ring;
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = NULL;
+ rx_ring->netdev = repr->netdev;
+ }
+}
+
+/**
+ * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * @pf: pointer to PF struct
+ */
+static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int max_vsi_num = 0;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!vf->repr->dst) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ goto err;
+ }
+
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ goto err;
+ }
+
+ if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ goto err;
+ }
+
+ if (max_vsi_num < vsi->vsi_num)
+ max_vsi_num = vsi->vsi_num;
+
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+ netif_keep_dst(vf->repr->netdev);
+ }
+
+ kfree(ctrl_vsi->target_netdevs);
+
+ ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
+ sizeof(*ctrl_vsi->target_netdevs),
+ GFP_KERNEL);
+ if (!ctrl_vsi->target_netdevs)
+ goto err;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_repr *repr = pf->vf[i].repr;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
+
+ ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
+
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = repr->netdev;
+ ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ int i;
+
+ kfree(ctrl_vsi->target_netdevs);
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
+ }
+}
+
+/**
+ * ice_eswitch_update_repr - reconfigure VF port representor
+ * @vsi: VF VSI for which port representor is configured
+ */
+void ice_eswitch_update_repr(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+ int ret;
+
+ if (!ice_is_switchdev_running(pf))
+ return;
+
+ vf = &pf->vf[vsi->vf_id];
+ repr = vf->repr;
+ repr->src_vsi = vsi;
+ repr->dst->u.port_info.port_id = vsi->vsi_num;
+
+ ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (ret) {
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ }
+}
+
+/**
+ * ice_eswitch_port_start_xmit - callback for packets transmit
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+
+ np = netdev_priv(netdev);
+ vsi = np->vsi;
+
+ if (ice_is_reset_in_progress(vsi->back->state))
+ return NETDEV_TX_BUSY;
+
+ repr = ice_netdev_to_repr(netdev);
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)repr->dst);
+ skb_dst_set(skb, (struct dst_entry *)repr->dst);
+ skb->queue_mapping = repr->vf->vf_id;
+
+ return ice_start_xmit(skb, netdev);
+}
+
+/**
+ * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * @skb: pointer to send buffer
+ * @off: pointer to offload struct
+ */
+void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off)
+{
+ struct metadata_dst *dst = skb_metadata_dst(skb);
+ u64 cd_cmd, dst_vsi;
+
+ if (!dst) {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
+ off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
+ } else {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
+ dst_vsi = ((u64)dst->u.port_info.port_id <<
+ ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
+ }
+}
+
+/**
+ * ice_eswitch_release_env - clear switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function removes HW filters configuration specific for switchdev
+ * mode and restores default legacy mode settings.
+ */
+static void ice_eswitch_release_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+}
+
+/**
+ * ice_eswitch_vsi_setup - configure switchdev control VSI
+ * @pf: pointer to PF structure
+ * @pi: pointer to port_info structure
+ */
+static struct ice_vsi *
+ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+}
+
+/**
+ * ice_eswitch_napi_del - remove NAPI handle for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_del(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_enable - enable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_enable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_enable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_disable - disable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_disable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_disable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
+ * @vsi: VSI to setup rxdid on
+ * @rxdid: flex descriptor id
+ */
+static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
+ u16 pf_q = vsi->rxq_map[ring->q_index];
+
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
+ }
+}
+
+/**
+ * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
+ * @pf: pointer to PF structure
+ */
+static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi;
+
+ pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+ if (!pf->switchdev.control_vsi)
+ return -ENODEV;
+
+ ctrl_vsi = pf->switchdev.control_vsi;
+ pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
+ if (!pf->switchdev.uplink_vsi)
+ goto err_vsi;
+
+ if (ice_eswitch_setup_env(pf))
+ goto err_vsi;
+
+ if (ice_repr_add_for_all_vfs(pf))
+ goto err_repr_add;
+
+ if (ice_eswitch_setup_reprs(pf))
+ goto err_setup_reprs;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ if (ice_vsi_open(ctrl_vsi))
+ goto err_setup_reprs;
+
+ ice_eswitch_napi_enable(pf);
+
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+
+ return 0;
+
+err_setup_reprs:
+ ice_repr_rem_from_all_vfs(pf);
+err_repr_add:
+ ice_eswitch_release_env(pf);
+err_vsi:
+ ice_vsi_release(ctrl_vsi);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_disable_switchdev - disable switchdev resources
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_release_env(pf);
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
+ ice_vsi_release(ctrl_vsi);
+ ice_repr_rem_from_all_vfs(pf);
+}
+
+/**
+ * ice_eswitch_mode_set - set new eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: eswitch mode to switch to
+ * @extack: pointer to extack structure
+ */
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (pf->eswitch_mode == mode)
+ return 0;
+
+ if (pf->num_alloc_vfs) {
+ dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
+ NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
+ return -EOPNOTSUPP;
+ }
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ {
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+ break;
+ }
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
+ return -EINVAL;
+ }
+
+ pf->eswitch_mode = mode;
+ return 0;
+}
+
+/**
+ * ice_eswitch_get_target_netdev - return port representor netdev
+ * @rx_ring: pointer to Rx ring
+ * @rx_desc: pointer to Rx descriptor
+ *
+ * When working in switchdev mode context (when control VSI is used), this
+ * function returns netdev of appropriate port representor. For non-switchdev
+ * context, regular netdev associated with Rx ring is returned.
+ */
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_vsi *vsi = rx_ring->vsi;
+ struct ice_vsi *control_vsi;
+ u16 target_vsi_id;
+
+ control_vsi = vsi->back->switchdev.control_vsi;
+ if (vsi != control_vsi)
+ return rx_ring->netdev;
+
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ target_vsi_id = le16_to_cpu(desc->src_vsi);
+
+ return vsi->target_netdevs[target_vsi_id];
+}
+
+/**
+ * ice_eswitch_mode_get - get current eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: output parameter for current eswitch mode
+ */
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ *mode = pf->eswitch_mode;
+ return 0;
+}
+
+/**
+ * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
+ * false otherwise.
+ */
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
+/**
+ * ice_eswitch_release - cleanup eswitch
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_release(struct ice_pf *pf)
+{
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return;
+
+ ice_eswitch_disable_switchdev(pf);
+ pf->switchdev.is_running = false;
+}
+
+/**
+ * ice_eswitch_configure - configure eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_configure(struct ice_pf *pf)
+{
+ int status;
+
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
+ return 0;
+
+ status = ice_eswitch_enable_switchdev(pf);
+ if (status)
+ return status;
+
+ pf->switchdev.is_running = true;
+ return 0;
+}
+
+/**
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_start_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_stop_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_rebuild - rebuild eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int status;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_napi_del(pf);
+
+ status = ice_eswitch_setup_env(pf);
+ if (status)
+ return status;
+
+ status = ice_eswitch_setup_reprs(pf);
+ if (status)
+ return status;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ ice_replay_tc_fltrs(pf);
+
+ status = ice_vsi_open(ctrl_vsi);
+ if (status)
+ return status;
+
+ ice_eswitch_napi_enable(pf);
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+ ice_eswitch_start_all_tx_queues(pf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
new file mode 100644
index 000000000000..364cd2a79c37
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_H_
+#define _ICE_ESWITCH_H_
+
+#include <net/devlink.h>
+
+#ifdef CONFIG_ICE_SWITCHDEV
+void ice_eswitch_release(struct ice_pf *pf);
+int ice_eswitch_configure(struct ice_pf *pf);
+int ice_eswitch_rebuild(struct ice_pf *pf);
+
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
+
+void ice_eswitch_update_repr(struct ice_vsi *vsi);
+
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
+
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
+
+void ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off);
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+#else /* CONFIG_ICE_SWITCHDEV */
+static inline void ice_eswitch_release(struct ice_pf *pf) { }
+
+static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+
+static inline void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off) { }
+
+static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+
+static inline int ice_eswitch_configure(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ return DEVLINK_ESWITCH_MODE_LEGACY;
+}
+
+static inline int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
+
+static inline netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ return NETDEV_TX_BUSY;
+}
+#endif /* CONFIG_ICE_SWITCHDEV */
+#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c451cf401e63..572519e402f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
static void
-ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct ice_orom_info *orom;
@@ -190,9 +189,19 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+static void
+ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ __ice_get_drvinfo(netdev, drvinfo, np->vsi);
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -584,7 +593,7 @@ static bool ice_lbtest_check_frame(u8 *frame)
*
* Function sends loopback packets on a test Tx ring.
*/
-static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
+static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
{
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -637,7 +646,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
* Function receives loopback packets and verify their correctness.
* Returns number of received valid frames.
*/
-static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
+static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
struct ice_rx_buf *rx_buf;
int valid_frames, i;
@@ -676,9 +685,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
- struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
u8 *tx_frame;
int i;
@@ -866,10 +876,10 @@ skip_ol_tests:
netdev_info(netdev, "testing finished\n");
}
-static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void
+__ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
unsigned int i;
u8 *p = data;
@@ -879,6 +889,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ethtool_sprintf(&p,
ice_gstrings_vsi_stats[i].stat_string);
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
ice_for_each_alloc_txq(vsi, i) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -917,6 +930,13 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_strings(netdev, stringset, data, np->vsi);
+}
+
static int
ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
@@ -1215,6 +1235,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
enum ice_status status;
bool dcbx_agent_status;
+ if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
+ ret = -EOPNOTSUPP;
+ goto ethtool_exit;
+ }
+
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
@@ -1312,13 +1339,13 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
}
static void
-ice_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats, u64 *data)
+__ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
unsigned int j;
int i = 0;
char *p;
@@ -1332,14 +1359,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
/* populate per queue stats */
rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) {
- ring = READ_ONCE(vsi->tx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ if (tx_ring) {
+ data[i++] = tx_ring->stats.pkts;
+ data[i++] = tx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1347,10 +1377,10 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
ice_for_each_alloc_rxq(vsi, j) {
- ring = READ_ONCE(vsi->rx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ rx_ring = READ_ONCE(vsi->rx_rings[j]);
+ if (rx_ring) {
+ data[i++] = rx_ring->stats.pkts;
+ data[i++] = rx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1379,6 +1409,15 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
+static void
+ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
+}
+
#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
ICE_PHY_TYPE_LOW_100M_SGMII)
@@ -2667,9 +2706,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
static int
ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
- struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *xdp_rings = NULL;
+ struct ice_tx_ring *xdp_rings = NULL;
+ struct ice_tx_ring *tx_rings = NULL;
+ struct ice_rx_ring *rx_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2718,12 +2758,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_alloc_txq(vsi, i)
vsi->tx_rings[i]->count = new_tx_cnt;
- for (i = 0; i < vsi->alloc_rxq; i++)
+ ice_for_each_alloc_rxq(vsi, i)
vsi->rx_rings[i]->count = new_rx_cnt;
if (ice_is_xdp_ena_vsi(vsi))
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
@@ -2772,7 +2812,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto free_tx;
}
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
@@ -2866,7 +2906,7 @@ process_link:
}
if (xdp_rings) {
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
ice_free_tx_ring(vsi->xdp_rings[i]);
*vsi->xdp_rings[i] = xdp_rings[i];
}
@@ -3155,6 +3195,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EIO;
}
+ if (ice_is_adq_active(pf)) {
+ netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
@@ -3255,7 +3300,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring && q_vector->tx.ring)
+ if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
combined++;
}
@@ -3365,6 +3410,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (ice_is_adq_active(pf)) {
+ netdev_err(dev, "Cannot set channels with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
return -EOPNOTSUPP;
@@ -3466,15 +3516,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-enum ice_container_type {
- ICE_RX_CONTAINER,
- ICE_TX_CONTAINER,
-};
-
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -3484,24 +3528,23 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise.
*/
static int
-ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
- struct ice_ring_container *rc)
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
{
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting;
- ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
+ ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL;
}
@@ -3522,18 +3565,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else {
@@ -3585,7 +3628,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -3597,19 +3639,22 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise.
*/
static int
-ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
- const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
+ const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back;
u16 itr_setting;
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
+ {
+ struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
+
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
@@ -3618,22 +3663,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
ICE_MAX_INTRL);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl &&
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
(ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
c_type_str);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
- rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- ice_write_intrl(rc->ring->q_vector,
- ec->rx_coalesce_usecs_high);
- }
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl)
+ q_vector->intrl = ec->rx_coalesce_usecs_high;
use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
coalesce_usecs = ec->rx_coalesce_usecs;
break;
+ }
case ICE_TX_CONTAINER:
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3641,7 +3684,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
- c_type);
+ rc->type);
return -EINVAL;
}
@@ -3690,22 +3733,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
@@ -3778,6 +3821,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
+
+ ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
}
goto set_complete;
}
@@ -3785,6 +3830,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
+ ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
+
set_complete:
return 0;
}
@@ -3804,6 +3851,54 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+static void
+ice_repr_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
+}
+
+static void
+ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ /* for port representors only ETH_SS_STATS is supported */
+ if (ice_check_vf_ready_for_cfg(repr->vf) ||
+ stringset != ETH_SS_STATS)
+ return;
+
+ __ice_get_strings(netdev, stringset, data, repr->src_vsi);
+}
+
+static void
+ice_repr_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
+}
+
+static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ICE_VSI_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
#define ICE_MODULE_TYPE_SFP 0x03
@@ -4055,6 +4150,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
}
+static const struct ethtool_ops ice_ethtool_repr_ops = {
+ .get_drvinfo = ice_repr_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ice_repr_get_strings,
+ .get_ethtool_stats = ice_repr_get_ethtool_stats,
+ .get_sset_count = ice_repr_get_sset_count,
+};
+
+/**
+ * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
+ * @netdev: network interface device structure
+ */
+void ice_set_ethtool_repr_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ice_ethtool_repr_ops;
+}
+
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 16de603b280c..38960bcc384c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -706,7 +706,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1068,7 +1068,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 59ef68f072c0..cbd8424631e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
- loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index d2d40e18ae8a..da4163856f4c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -48,7 +48,7 @@
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
*/
-#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 06ac9badee77..23cfcceb1536 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -735,7 +735,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-static enum ice_status
+enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -748,7 +748,7 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release the change lock using the proper Admin Command.
*/
-static void ice_release_change_lock(struct ice_hw *hw)
+void ice_release_change_lock(struct ice_hw *hw)
{
ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
}
@@ -1330,6 +1330,86 @@ fw_ddp_compat_free_alloc:
}
/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1408,6 +1488,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
@@ -1485,6 +1566,195 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
}
/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+{
+ u16 i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv);
+
+ if (req_profs & prof_type)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol ID
+ * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ids_cnt || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
+
+ /* This code assumes that if a switch field vector line
+ * has a matching protocol, then this line will contain
+ * the entries necessary to represent every field in
+ * that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == ids_cnt) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list))
+ return ICE_ERR_CFG;
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
* ice_pkg_buf_free
* @hw: pointer to the HW structure
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
@@ -1668,7 +1938,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid &&
hw->tnl.tbl[i].type == type &&
- idx--)
+ idx-- == 0)
return i;
WARN_ON_ONCE(1);
@@ -1828,7 +2098,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
- index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+ index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
@@ -1863,6 +2133,35 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
return 0;
}
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return 0;
+}
+
/* PTG Management */
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 8a58e79729b9..344c2637facd 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,20 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off);
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
+ unsigned long *bm);
+void
+ice_init_prof_result_bm(struct ice_hw *hw);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 7d8b517a63c9..0f572a36d021 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -13,6 +13,8 @@ struct ice_fv_word {
u8 resvrd;
} __packed;
+#define ICE_MAX_NUM_PROFILES 256
+
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
@@ -279,6 +281,12 @@ struct ice_sw_fv_section {
struct ice_fv fv[];
};
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
@@ -365,6 +373,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ TNL_GRETAP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -603,4 +612,12 @@ struct ice_chs_chg {
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
+
+enum ice_prof_type {
+ ICE_PROF_NON_TUN = 0x1,
+ ICE_PROF_TUN_UDP = 0x2,
+ ICE_PROF_TUN_GRE = 0x4,
+ ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_ALL = 0xFF,
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 2418d4fff037..c2e78eaf4ccb 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -395,3 +395,83 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
+
+/**
+ * ice_fltr_update_rule_flags - update lan_en/lb_en flags
+ * @hw: pointer to hw
+ * @rule_id: id of rule being updated
+ * @recipe_id: recipe id of rule
+ * @act: current action field
+ * @type: Rx or Tx
+ * @src: source VSI
+ * @new_flags: combinations of lb_en and lan_en
+ */
+static enum ice_status
+ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
+ u32 act, u16 type, u16 src, u32 new_flags)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status err;
+ u32 flags_mask;
+
+ s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
+ act &= ~flags_mask;
+ act |= (flags_mask & new_flags);
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ if (type & ICE_FLTR_RX) {
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+
+ } else {
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ }
+
+ err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+
+ kfree(s_rule);
+ return err;
+}
+
+/**
+ * ice_fltr_build_action - build action for rule
+ * @vsi_id: id of VSI which is use to build action
+ */
+static u32 ice_fltr_build_action(u16 vsi_id)
+{
+ return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
+ ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+}
+
+/**
+ * ice_fltr_update_flags_dflt_rule - update flags on default rule
+ * @vsi: pointer to VSI
+ * @rule_id: id of rule
+ * @direction: Tx or Rx
+ * @new_flags: flags to update
+ *
+ * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
+ *
+ * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
+ * ICE_SINGLE_ACT_LAN_ENABLE.
+ */
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags)
+{
+ u32 action = ice_fltr_build_action(vsi->vsi_num);
+ struct ice_hw *hw = &vsi->back->hw;
+
+ return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
+ direction, vsi->vsi_num, new_flags);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 361cb4da9b43..8eec4febead1 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -36,4 +36,7 @@ enum ice_status
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 76021d977b60..a49082485642 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -182,6 +182,7 @@
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 37c18c66b5c7..e375ac849aec 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct net_device *event_netdev, *netdev_tmp;
struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
@@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
goto lag_out;
}
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
- if (!netif_is_ice(netdev_tmp))
- continue;
-
- if (netdev_tmp && netdev_tmp != lag->netdev &&
- lag->peer_netdev != netdev_tmp) {
- dev_hold(netdev_tmp);
- lag->peer_netdev = netdev_tmp;
- }
- }
- rcu_read_unlock();
-
if (bonding_info->slave.state)
ice_lag_set_backup(lag);
else
@@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
case NETDEV_BONDING_INFO:
ice_lag_info_event(lag, ptr);
break;
+ case NETDEV_UNREGISTER:
+ ice_lag_unlink(lag, ptr);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 80736e0ec0dc..d981dc6f2323 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic {
} flex_ts;
};
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 6
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Source VSI
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct ice_32b_rx_flex_desc_nic_2 {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flow_id;
+ __le16 src_vsi;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -529,6 +569,9 @@ struct ice_tx_ctx_desc {
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_QW1_VSI_S 50
+#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
+
enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_TSO = 0x01,
ICE_TX_CTX_DESC_TSYN = 0x02,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index dde9802c6c72..40562600a8cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -22,8 +22,12 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_VF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
+ case ICE_VSI_CHNL:
+ return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
+ case ICE_VSI_SWITCHDEV_CTRL:
+ return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -44,12 +48,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
int ret = 0;
u16 i;
- for (i = 0; i < vsi->num_rxq; i++)
+ ice_for_each_rxq(vsi, i)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw);
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
@@ -71,6 +75,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
struct device *dev;
dev = ice_pf_to_dev(pf);
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
@@ -132,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -200,6 +207,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ /* The number of queues for ctrl VSI is equal to number of VFs.
+ * Each ring is associated to the corresponding VF_PR netdev.
+ */
+ vsi->alloc_txq = pf->num_alloc_vfs;
+ vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
@@ -218,6 +233,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1;
break;
+ case ICE_VSI_CHNL:
+ vsi->alloc_txq = 0;
+ vsi->alloc_rxq = 0;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -263,7 +282,7 @@ static int ice_get_free_slot(void *array, int size, int curr)
* ice_vsi_delete - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-static void ice_vsi_delete(struct ice_vsi *vsi)
+void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -334,7 +353,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
*
* Returns 0 on success, negative on failure
*/
-static int ice_vsi_clear(struct ice_vsi *vsi)
+int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -379,12 +398,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring)
+ if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
- ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+ ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
}
@@ -398,7 +417,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring && !q_vector->rx.ring)
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
q_vector->total_events++;
@@ -408,16 +427,33 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+ struct ice_pf *pf = q_vector->vsi->back;
+ int i;
+
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
+ return IRQ_HANDLED;
+
+ ice_for_each_vf(pf, i)
+ napi_schedule(&pf->vf[i].repr->q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @vsi_type: type of VSI
+ * @ch: ptr to channel
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
+ struct ice_channel *ch, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -444,10 +480,17 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
- else
+ else if (vsi_type != ICE_VSI_CHNL)
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
+ case ICE_VSI_SWITCHDEV_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup eswitch MSIX irq handler for VSI */
+ vsi->irq_handler = ice_eswitch_msix_clean_rings;
+ break;
case ICE_VSI_PF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -466,6 +509,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
+ case ICE_VSI_CHNL:
+ if (!ch)
+ goto err_rings;
+ vsi->num_rxq = ch->num_rxq;
+ vsi->num_txq = ch->num_txq;
+ vsi->next_base_q = ch->base_q;
+ break;
case ICE_VSI_LB:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -582,6 +632,9 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
};
int ret;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+
ret = __ice_vsi_get_qs(&tx_qs_cfg);
if (ret)
return ret;
@@ -606,12 +659,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
clear_bit(vsi->txq_map[i], pf->avail_txqs);
vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
}
@@ -700,12 +753,23 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
cap = &pf->hw.func_caps.common_cap;
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
+ if (vsi->type == ICE_VSI_CHNL)
+ vsi->rss_size = min_t(u16, vsi->num_rxq,
+ BIT(cap->rss_table_entry_width));
+ else
+ vsi->rss_size = min_t(u16, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
@@ -775,21 +839,13 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
- bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
- /* at least TC0 should be enabled by default */
- if (vsi->tc_cfg.numtc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(0)))
- ena_tc0 = true;
- } else {
- ena_tc0 = true;
- }
-
- if (ena_tc0) {
- vsi->tc_cfg.numtc++;
- vsi->tc_cfg.ena_tc |= 1;
+ if (!vsi->tc_cfg.numtc) {
+ /* at least TC0 should be enabled by default */
+ vsi->tc_cfg.numtc = 1;
+ vsi->tc_cfg.ena_tc = 1;
}
num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
@@ -931,6 +987,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
@@ -953,6 +1010,28 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
+static void
+ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 qcount, qmap;
+ u8 offset = 0;
+ int pow;
+
+ qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+
+ pow = order_base_2(qcount);
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
+ ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
+}
+
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
@@ -980,6 +1059,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_CHNL:
+ ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
+ break;
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
@@ -990,6 +1073,21 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
goto out;
}
+ /* Handle VLAN pruning for channel VSI if main VSI has VLAN
+ * prune enabled
+ */
+ if (vsi->type == ICE_VSI_CHNL) {
+ struct ice_vsi *main_vsi;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
+ ctxt->info.sw_flags2 |=
+ ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &=
+ ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
ice_set_dflt_vsi_ctx(ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
@@ -1010,13 +1108,17 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ctxt->info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, ctxt);
- if (!init_vsi) /* means VSI being updated */
- /* must to indicate which section of VSI context are
- * being modified
- */
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ if (vsi->type == ICE_VSI_CHNL) {
+ ice_chnl_vsi_setup_q_map(vsi, ctxt);
+ } else {
+ ice_vsi_setup_q_map(vsi, ctxt);
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+ */
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ }
/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
* respectively
@@ -1195,6 +1297,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
/* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF)
return 0;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
if (vsi->base_vector) {
dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
@@ -1249,14 +1353,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (q_vector) {
- q_vector->tx.ring = NULL;
- q_vector->rx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
+ q_vector->rx.rx_ring = NULL;
}
}
}
if (vsi->tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
WRITE_ONCE(vsi->tx_rings[i], NULL);
@@ -1264,7 +1368,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
}
}
if (vsi->rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
WRITE_ONCE(vsi->rx_rings[i], NULL);
@@ -1285,8 +1389,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
- for (i = 0; i < vsi->alloc_txq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_tx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1296,7 +1400,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->txq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
@@ -1305,8 +1408,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
}
/* Allocate Rx rings */
- for (i = 0; i < vsi->alloc_rxq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_rx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1315,7 +1418,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->rxq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = dev;
@@ -1363,7 +1465,7 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
-static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
@@ -1371,7 +1473,25 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
int err;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+ if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
+ (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
+ } else {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+
+ /* If orig_rss_size is valid and it is less than determined
+ * main VSI's rss_size, update main VSI's rss_size to be
+ * orig_rss_size so that when tc-qdisc is deleted, main VSI
+ * RSS table gets programmed to be correct (whatever it was
+ * to begin with (prior to setup-tc for ADQ config)
+ */
+ if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
+ vsi->orig_rss_size <= vsi->num_rxq) {
+ vsi->rss_size = vsi->orig_rss_size;
+ /* now orig_rss_size is used, reset it to zero */
+ vsi->orig_rss_size = 0;
+ }
+ }
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1710,7 +1830,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
int err;
@@ -1766,7 +1886,7 @@ setup_rings:
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1817,8 +1937,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_xdp_txq(vsi, i)
+ vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -1853,6 +1973,24 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
}
+static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
+{
+ switch (rc->type) {
+ case ICE_RX_CONTAINER:
+ if (rc->rx_ring)
+ return rc->rx_ring->q_vector;
+ break;
+ case ICE_TX_CONTAINER:
+ if (rc->tx_ring)
+ return rc->tx_ring->q_vector;
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
/**
* __ice_write_itr - write throttle rate to register
* @q_vector: pointer to interrupt data structure
@@ -1877,15 +2015,39 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr)
{
struct ice_q_vector *q_vector;
- if (!rc->ring)
+ q_vector = ice_pull_qvec_from_rc(rc);
+ if (!q_vector)
return;
- q_vector = rc->ring->q_vector;
-
__ice_write_itr(q_vector, rc, itr);
}
/**
+ * ice_set_q_vector_intrl - set up interrupt rate limiting
+ * @q_vector: the vector to be configured
+ *
+ * Interrupt rate limiting is local to the vector, not per-queue so we must
+ * detect if either ring container has dynamic moderation enabled to decide
+ * what to set the interrupt rate limit to via INTRL settings. In the case that
+ * dynamic moderation is disabled on both, write the value with the cached
+ * setting to make sure INTRL register matches the user visible value.
+ */
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
+{
+ if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
+ /* in the case of dynamic enabled, cap each vector to no more
+ * than (4 us) 250,000 ints/sec, which allows low latency
+ * but still less than 500,000 interrupts per second, which
+ * reduces CPU a bit in the case of the lowest latency
+ * setting. The 4 here is a value in microseconds.
+ */
+ ice_write_intrl(q_vector, 4);
+ } else {
+ ice_write_intrl(q_vector, q_vector->intrl);
+ }
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*
@@ -1899,7 +2061,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
u16 reg_idx = q_vector->reg_idx;
@@ -2057,7 +2219,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
+ u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
{
u16 q_idx;
@@ -2122,11 +2284,10 @@ bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
- * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
{
struct ice_vsi_ctx *ctxt;
struct ice_pf *pf;
@@ -2154,9 +2315,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
else
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- if (!vlan_promisc)
- ctxt->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -2179,10 +2338,14 @@ err_out:
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
- struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ return;
+ }
- vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
- vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ /* set VSI TC information based on DCB config */
+ ice_vsi_set_dcb_tc_cfg(vsi);
}
/**
@@ -2295,8 +2458,10 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2393,6 +2558,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
+ * @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
*
@@ -2401,7 +2567,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2409,10 +2575,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
+ if (vsi_type == ICE_VSI_CHNL)
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
else
- vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2429,10 +2597,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_alloc_fd_res(vsi);
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
+ if (vsi_type != ICE_VSI_CHNL) {
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc;
+ }
}
/* set RSS capabilities */
@@ -2448,6 +2618,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2490,6 +2661,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
ice_init_arfs(vsi);
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
* map queues to vectors through Virtchnl, PF driver only
@@ -2528,9 +2705,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->alloc_txq;
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
+
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
+ }
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
@@ -2591,7 +2780,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
u32 rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
ice_write_intrl(q_vector, 0);
@@ -2757,7 +2946,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
ice_vsi_close(vsi);
}
}
@@ -2841,6 +3031,7 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
+ enum ice_status err;
struct ice_pf *pf;
if (!vsi->back)
@@ -2859,7 +3050,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- ice_devlink_destroy_port(vsi);
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -2912,6 +3104,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -3036,7 +3232,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
vsi->q_vectors[i]->intrl = coalesce[i].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
/* the number of queue vectors increased so write whatever is in
@@ -3054,7 +3250,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
}
@@ -3092,6 +3288,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (ret)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, ret);
ice_vsi_free_q_vectors(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
@@ -3135,6 +3335,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
switch (vtype) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -3154,7 +3355,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto err_vectors;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
@@ -3182,20 +3385,42 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vectors;
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
default:
break;
}
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- max_txqs[i] = vsi->alloc_txq;
+ /* configure VSI nodes based on number of queues and TC's.
+ * ADQ creates VSIs for each TC/Channel but doesn't
+ * allocate queues instead it reconfigures the PF queues
+ * as per the TC command. So max_txqs should point to the
+ * PF Tx queues.
+ */
+ if (vtype == ICE_VSI_CHNL)
+ max_txqs[i] = pf->num_lan_tx;
+ else
+ max_txqs[i] = vsi->alloc_txq;
if (ice_is_xdp_ena_vsi(vsi))
max_txqs[i] += vsi->num_xdp_txq;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ /* If MQPRIO is set, means channel code path, hence for main
+ * VSI's, use TC as 1
+ */
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
+
if (status) {
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
@@ -3267,7 +3492,6 @@ int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
return 0;
}
-#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
* @vsi: VSI being configured
@@ -3283,6 +3507,146 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
}
/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ int numtc = vsi->tc_cfg.numtc;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
+ numtc = vsi->all_numtc;
+
+ if (netdev_set_num_tc(netdev, numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ /* setup TC queue map for CHNL TCs */
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ break;
+ if (!vsi->mqprio_qopt.qopt.count[i])
+ break;
+ netdev_set_tc_queue(netdev, i,
+ vsi->mqprio_qopt.qopt.count[i],
+ vsi->mqprio_qopt.qopt.offset[i]);
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return;
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @ena_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ */
+static void
+ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ u8 ena_tc)
+{
+ u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
+ u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
+ int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u8 netdev_tc = 0;
+ int i;
+
+ vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
+
+ pow = order_base_2(tc0_qcount);
+ qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
+ }
+
+ if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = offset + qcount_tx;
+ vsi->num_rxq = offset + qcount_rx;
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
+
+ /* Find queue count available for channel VSIs and starting offset
+ * for channel VSIs
+ */
+ if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
+ vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
+ vsi->next_base_q = tc0_qcount;
+ }
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
+ vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+}
+
+/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
@@ -3300,6 +3664,9 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u8 num_tc = 0;
dev = ice_pf_to_dev(pf);
+ if (vsi->tc_cfg.ena_tc == ena_tc &&
+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
+ return ret;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
@@ -3307,6 +3674,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
num_tc++;
/* populate max_txqs per TC */
max_txqs[i] = vsi->alloc_txq;
+ /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
+ * zero for CHNL VSI, hence use num_txq instead as max_txqs
+ */
+ if (vsi->type == ICE_VSI_CHNL &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ max_txqs[i] = vsi->num_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3319,7 +3692,11 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->vf_num = 0;
ctx->info = vsi->info;
- ice_vsi_setup_q_map(vsi, ctx);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ else
+ ice_vsi_setup_q_map(vsi, ctx);
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -3330,8 +3707,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
goto out;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
+ max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
if (status) {
dev_err(dev, "VSI %d failed TC config, error %s\n",
@@ -3347,20 +3729,19 @@ out:
kfree(ctx);
return ret;
}
-#endif /* CONFIG_DCB */
/**
* ice_update_ring_stats - Update ring statistics
- * @ring: ring to update
+ * @stats: stats to be updated
* @pkts: number of processed packets
* @bytes: number of processed bytes
*
* This function assumes that caller has acquired a u64_stats_sync lock.
*/
-static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
+static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
{
- ring->stats.bytes += bytes;
- ring->stats.pkts += pkts;
+ stats->bytes += bytes;
+ stats->pkts += pkts;
}
/**
@@ -3369,10 +3750,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(tx_ring, pkts, bytes);
+ ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp);
}
@@ -3382,10 +3763,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(rx_ring, pkts, bytes);
+ ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp);
}
@@ -3538,6 +3919,180 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
}
/**
+ * ice_get_link_speed_mbps - get link speed in Mbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_mbps(struct ice_vsi *vsi)
+{
+ switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ return SPEED_100000;
+ case ICE_AQ_LINK_SPEED_50GB:
+ return SPEED_50000;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case ICE_AQ_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case ICE_AQ_LINK_SPEED_5GB:
+ return SPEED_5000;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return SPEED_2500;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return SPEED_1000;
+ case ICE_AQ_LINK_SPEED_100MB:
+ return SPEED_100;
+ case ICE_AQ_LINK_SPEED_10MB:
+ return SPEED_10;
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ice_get_link_speed_kbps - get link speed in Kbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_kbps(struct ice_vsi *vsi)
+{
+ int speed_mbps;
+
+ speed_mbps = ice_get_link_speed_mbps(vsi);
+
+ return speed_mbps * 1000;
+}
+
+/**
+ * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
+ * @vsi: VSI to be configured
+ * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
+ *
+ * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
+ * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (min_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure min BW for VSI limit */
+ if (min_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MIN_BW, min_tx_rate);
+ if (status) {
+ dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type));
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MIN_BW);
+ if (status) {
+ dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
+ *
+ * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
+ * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (max_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure max BW for VSI limit */
+ if (max_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MAX_BW, max_tx_rate);
+ if (status) {
+ dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MAX_BW);
+ if (status) {
+ dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
* ice_set_link - turn on/off physical link
* @vsi: VSI to modify physical link on
* @ena: turn on/off physical link
@@ -3573,3 +4128,126 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
return 0;
}
+
+/**
+ * ice_is_feature_supported
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to be checked
+ *
+ * returns true if feature is supported, false otherwise
+ */
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return false;
+
+ return test_bit(f, pf->features);
+}
+
+/**
+ * ice_set_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to set
+ */
+static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ set_bit(f, pf->features);
+}
+
+/**
+ * ice_clear_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to clear
+ */
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ clear_bit(f, pf->features);
+}
+
+/**
+ * ice_init_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ *
+ * called during init to setup supported feature
+ */
+void ice_init_feature_support(struct ice_pf *pf)
+{
+ switch (pf->hw.device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ ice_set_feature_support(pf, ICE_F_DSCP);
+ if (ice_is_e810t(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_vsi_update_security - update security block in VSI
+ * @vsi: pointer to VSI structure
+ * @fill: function pointer to fill ctx
+ */
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
+{
+ struct ice_vsi_ctx ctx = { 0 };
+
+ ctx.info = vsi->info;
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ fill(&ctx);
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
+
+/**
+ * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_set_allow_override - allow destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
+
+/**
+ * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index d5a28bf0fc2c..6c803407b67d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -45,19 +45,24 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
-#ifdef CONFIG_DCB
+void ice_vsi_delete(struct ice_vsi *vsi);
+int ice_vsi_clear(struct ice_vsi *vsi);
+
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-#endif /* CONFIG_DCB */
+
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -93,9 +98,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
-void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
-void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
@@ -103,6 +108,7 @@ int ice_status_to_errno(enum ice_status err);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
@@ -116,4 +122,22 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
+int ice_get_link_speed_kbps(struct ice_vsi *vsi);
+int ice_get_link_speed_mbps(struct ice_vsi *vsi);
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *));
+
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
+void ice_init_feature_support(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0d6c143f6653..f099797f35e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -19,6 +19,8 @@
*/
#define CREATE_TRACE_POINTS
#include "ice_trace.h"
+#include "ice_eswitch.h"
+#include "ice_tc_lib.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -42,16 +44,26 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static DEFINE_IDA(ice_aux_ida);
+DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+EXPORT_SYMBOL(ice_xdp_locking_key);
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
-static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
+static int ice_rebuild_channels(struct ice_pf *pf);
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
+
+static int
+ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+
bool netif_is_ice(struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
@@ -61,7 +73,7 @@ bool netif_is_ice(struct net_device *dev)
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u16 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
{
u16 head, tail;
@@ -100,10 +112,15 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
hw = &vsi->back->hw;
- for (i = 0; i < vsi->num_txq; i++) {
- struct ice_ring *tx_ring = vsi->tx_rings[i];
+ ice_for_each_txq(vsi, i) {
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+
+ if (!tx_ring)
+ continue;
+ if (ice_ring_ch_enabled(tx_ring))
+ continue;
- if (tx_ring && tx_ring->desc) {
+ if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
@@ -379,7 +396,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
- ice_cfg_vlan_pruning(vsi, false, false);
+ ice_cfg_vlan_pruning(vsi, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -393,7 +410,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true, false);
+ ice_cfg_vlan_pruning(vsi, true);
}
}
}
@@ -455,17 +472,21 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_prepare_for_reset - prep for the core to reset
+ * ice_prepare_for_reset - prep for reset
* @pf: board private structure
+ * @reset_type: reset type requested
*
* Inform or close all dependent features in prep for reset.
*/
static void
-ice_prepare_for_reset(struct ice_pf *pf)
+ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
unsigned int i;
+ dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
+
/* already prepared for reset */
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
@@ -480,6 +501,38 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ /* release ADQ specific HW and SW resources */
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ goto skip;
+
+ /* to be on safe side, reset orig_rss_size so that normal flow
+ * of deciding rss_size can take precedence
+ */
+ vsi->orig_rss_size = 0;
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (reset_type == ICE_RESET_PFR) {
+ vsi->old_ena_tc = vsi->all_enatc;
+ vsi->old_numtc = vsi->all_numtc;
+ } else {
+ ice_remove_q_channels(vsi, true);
+
+ /* for other reset type, do not support channel rebuild
+ * hence reset needed info
+ */
+ vsi->old_ena_tc = 0;
+ vsi->all_enatc = 0;
+ vsi->old_numtc = 0;
+ vsi->all_numtc = 0;
+ vsi->req_txq = 0;
+ vsi->req_rxq = 0;
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ }
+ }
+skip:
+
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
@@ -499,8 +552,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
/**
* ice_do_reset - Initiate one of many types of resets
* @pf: board private structure
- * @reset_type: reset type requested
- * before this function was called.
+ * @reset_type: reset type requested before this function was called.
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -509,7 +561,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
@@ -567,7 +619,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
return;
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -624,7 +676,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
break;
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
- netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
+ netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
+ else
+ netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
break;
default:
break;
@@ -882,6 +937,29 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
}
/**
+ * ice_check_phy_fw_load - check if PHY FW load failed
+ * @pf: pointer to PF struct
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * check if external PHY FW load failed and print an error message if it did
+ */
+static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
+{
+ if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
+ return;
+ }
+
+ if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
+ return;
+
+ if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
+ }
+}
+
+/**
* ice_check_module_power
* @pf: pointer to PF struct
* @link_cfg_err: bitmap from the link info structure
@@ -914,6 +992,20 @@ static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
}
/**
+ * ice_check_link_cfg_err - check if link configuration failed
+ * @pf: pointer to the PF struct
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure
+ */
+static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
+{
+ ice_check_module_power(pf, link_cfg_err);
+ ice_check_phy_fw_load(pf, link_cfg_err);
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -948,7 +1040,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
pi->lport, ice_stat_str(status),
ice_aq_str(pi->hw->adminq.sq_last_status));
- ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now.
@@ -1026,7 +1118,8 @@ static int ice_init_link_events(struct ice_port_info *pi)
u16 mask;
mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
- ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
+ ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
@@ -1965,7 +2058,8 @@ static int ice_configure_phy(struct ice_vsi *vsi)
ice_print_topo_conflict(vsi);
- if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
+ phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
return -EPERM;
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
@@ -2096,7 +2190,7 @@ static void ice_check_media_subtask(struct ice_pf *pf)
if (err)
return;
- ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
@@ -2302,14 +2396,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_num = pf->msix_entries[base + vector].vector;
- if (q_vector->tx.ring && q_vector->rx.ring) {
+ if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring) {
+ } else if (q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring) {
+ } else if (q_vector->tx.tx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2367,11 +2461,12 @@ free_q_irqs:
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int i;
+ struct ice_tx_desc *tx_desc;
+ int i, j;
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
@@ -2380,16 +2475,29 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
- xdp_ring->ring_active = false;
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ spin_lock_init(&xdp_ring->tx_lock);
+ for (j = 0; j < xdp_ring->count; j++) {
+ tx_desc = ICE_TX_DESC(xdp_ring, j);
+ tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ }
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key))
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ else
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
}
return 0;
@@ -2455,6 +2563,10 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (__ice_vsi_get_qs(&xdp_qs_cfg))
goto err_map_xdp;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ netdev_warn(vsi->netdev,
+ "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
+
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
@@ -2468,11 +2580,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
q_base = vsi->num_xdp_txq - xdp_rings_rem;
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = xdp_ring;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}
@@ -2501,7 +2613,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
return 0;
clear_xdp_rings:
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
@@ -2509,7 +2621,7 @@ clear_xdp_rings:
err_map_xdp:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
@@ -2542,25 +2654,25 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
- ice_for_each_ring(ring, q_vector->tx)
+ ice_for_each_tx_ring(ring, q_vector->tx)
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
break;
/* restore the value of last node prior to XDP setup */
- q_vector->tx.ring = ring;
+ q_vector->tx.tx_ring = ring;
}
free_qmap:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
mutex_unlock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
if (vsi->xdp_rings[i]->desc)
ice_free_tx_ring(vsi->xdp_rings[i]);
@@ -2571,6 +2683,9 @@ free_qmap:
devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
vsi->xdp_rings = NULL;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ static_branch_dec(&ice_xdp_locking_key);
+
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
return 0;
@@ -2598,7 +2713,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
int i;
ice_for_each_rxq(vsi, i) {
- struct ice_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
@@ -2606,6 +2721,29 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
+ * @vsi: VSI to determine the count of XDP Tx qs
+ *
+ * returns 0 if Tx qs count is higher than at least half of CPU count,
+ * -ENOMEM otherwise
+ */
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
+{
+ u16 avail = ice_get_avail_txq_count(vsi->back);
+ u16 cpus = num_possible_cpus();
+
+ if (avail < cpus / 2)
+ return -ENOMEM;
+
+ vsi->num_xdp_txq = min_t(u16, avail, cpus);
+
+ if (vsi->num_xdp_txq < cpus)
+ static_branch_inc(&ice_xdp_locking_key);
+
+ return 0;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2634,10 +2772,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
+ if (xdp_ring_err) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ } else {
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ }
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
@@ -3103,6 +3245,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
/* enable features */
netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
@@ -3139,7 +3284,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
@@ -3182,7 +3327,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+}
+
+static struct ice_vsi *
+ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ struct ice_channel *ch)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
}
/**
@@ -3196,7 +3348,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
}
/**
@@ -3210,7 +3362,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
}
/**
@@ -3235,7 +3387,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Enable VLAN pruning when a VLAN other than 0 is added */
if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true, false);
+ ret = ice_cfg_vlan_pruning(vsi, true);
if (ret)
return ret;
}
@@ -3279,13 +3431,70 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Disable pruning when VLAN 0 is the only VLAN rule */
if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false, false);
+ ret = ice_cfg_vlan_pruning(vsi, false);
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret;
}
/**
+ * ice_rep_indr_tc_block_unbind
+ * @cb_priv: indirection block private data
+ */
+static void ice_rep_indr_tc_block_unbind(void *cb_priv)
+{
+ struct ice_indr_block_priv *indr_priv = cb_priv;
+
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+}
+
+/**
+ * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
+ * @vsi: VSI struct which has the netdev
+ */
+static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
+
+ flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
+ ice_rep_indr_tc_block_unbind);
+}
+
+/**
+ * ice_tc_indir_block_remove - clean indirect TC block notifications
+ * @pf: PF structure
+ */
+static void ice_tc_indir_block_remove(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+ if (!pf_vsi)
+ return;
+
+ ice_tc_indir_block_unregister(pf_vsi);
+}
+
+/**
+ * ice_tc_indir_block_register - Register TC indirect block notifications
+ * @vsi: VSI struct which has the netdev
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_tc_indir_block_register(struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np;
+
+ if (!vsi || !vsi->netdev)
+ return -EINVAL;
+
+ np = netdev_priv(vsi->netdev);
+
+ INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
+ return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
+}
+
+/**
* ice_setup_pf_sw - Setup the HW switch on startup or after reset
* @pf: board private structure
*
@@ -3293,6 +3502,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
*/
static int ice_setup_pf_sw(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
int status = 0;
@@ -3303,6 +3513,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (!vsi)
return -ENOMEM;
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
status = ice_cfg_netdev(vsi);
if (status) {
status = -ENODEV;
@@ -3311,6 +3524,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
+ /* init indirect block notifications */
+ status = ice_tc_indir_block_register(vsi);
+ if (status) {
+ dev_err(dev, "Failed to register netdev notifier\n");
+ goto unroll_cfg_netdev;
+ }
+
/* Setup DCB netlink interface */
ice_dcbnl_setup(vsi);
@@ -3322,7 +3542,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
status = ice_set_cpu_rx_rmap(vsi);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
+ dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
vsi->vsi_num, status);
status = -EINVAL;
goto unroll_napi_add;
@@ -3335,8 +3555,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
free_cpu_rx_map:
ice_free_cpu_rx_rmap(vsi);
-
unroll_napi_add:
+ ice_tc_indir_block_unregister(vsi);
+unroll_cfg_netdev:
if (vsi) {
ice_napi_del(vsi);
if (vsi->netdev) {
@@ -3538,6 +3759,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
}
+ /* reserve for switchdev */
+ needed = ICE_ESWITCH_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+
/* total used for non-traffic vectors */
v_other = v_budget;
@@ -4170,11 +4398,11 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_port(vsi);
+ err = ice_devlink_create_pf_port(pf);
if (err)
goto err_devlink_create;
- devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
err_devlink_create:
@@ -4224,6 +4452,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (!pf)
return -ENOMEM;
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
@@ -4258,12 +4489,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
- err = ice_devlink_register(pf);
- if (err) {
- dev_err(dev, "ice_devlink_register failed: %d\n", err);
- goto err_exit_unroll;
- }
-
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -4276,6 +4501,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
+ ice_init_feature_support(pf);
+
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -4411,7 +4638,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_link_dflt_override(pf->hw.port_info);
- ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf,
+ pf->hw.port_info->phy.link_info.link_cfg_err);
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
@@ -4497,6 +4725,7 @@ probe_done:
dev_warn(dev, "RDMA is not supported on this device\n");
}
+ ice_devlink_register(pf);
return 0;
err_init_aux_unroll:
@@ -4520,7 +4749,6 @@ err_init_pf_unroll:
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
- ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return err;
@@ -4597,15 +4825,15 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- if (!pf)
- return;
-
+ ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
+ ice_tc_indir_block_remove(pf);
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -4615,7 +4843,8 @@ static void ice_remove(struct pci_dev *pdev)
ice_aq_cancel_waiting_tasks(pf);
ice_unplug_aux_dev(pf);
- ida_free(&ice_aux_ida, pf->aux_idx);
+ if (pf->aux_idx >= 0)
+ ida_free(&ice_aux_ida, pf->aux_idx);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -4636,7 +4865,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
- ice_devlink_unregister(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -4898,7 +5126,7 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
@@ -4990,7 +5218,7 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
}
@@ -5016,6 +5244,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
@@ -5144,10 +5374,16 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
+ if (ice_chnl_dmac_fltr_cnt(pf)) {
+ netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
+ mac);
+ return -EAGAIN;
+ }
+
netif_addr_lock_bh(netdev);
ether_addr_copy(old_mac, netdev->dev_addr);
/* change the netdev's MAC address */
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
@@ -5175,7 +5411,7 @@ err_update_filters:
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, old_mac);
+ eth_hw_addr_set(netdev, old_mac);
netif_addr_unlock_bh(netdev);
return err;
}
@@ -5380,10 +5616,10 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true, false);
+ ret = ice_cfg_vlan_pruning(vsi, true);
else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false, false);
+ ret = ice_cfg_vlan_pruning(vsi, false);
if ((features & NETIF_F_NTUPLE) &&
!(netdev->features & NETIF_F_NTUPLE)) {
@@ -5395,6 +5631,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
ice_clear_arfs(vsi);
}
+ /* don't turn off hw_tc_offload when ADQ is already enabled */
+ if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
+ dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
+ return -EACCES;
+ }
+
+ if ((features & NETIF_F_HW_TC) &&
+ !(netdev->features & NETIF_F_HW_TC))
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ else
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+
return ret;
}
@@ -5444,77 +5692,59 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
}
/* THEORY OF MODERATION:
- * The below code creates custom DIM profiles for use by this driver, because
- * the ice driver hardware works differently than the hardware that DIMLIB was
+ * The ice driver hardware works differently than the hardware that DIMLIB was
* originally made for. ice hardware doesn't have packet count limits that
* can trigger an interrupt, but it *does* have interrupt rate limit support,
- * and this code adds that capability to be used by the driver when it's using
- * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
- * for how to "respond" to traffic and interrupts, so this driver uses a
- * slightly different set of moderation parameters to get best performance.
+ * which is hard-coded to a limit of 250,000 ints/second.
+ * If not using dynamic moderation, the INTRL value can be modified
+ * by ethtool rx-usecs-high.
*/
struct ice_dim {
/* the throttle rate for interrupts, basically worst case delay before
* an initial interrupt fires, value is stored in microseconds.
*/
u16 itr;
- /* the rate limit for interrupts, which can cap a delay from a small
- * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
- * could yield as much as 500,000 interrupts per second, but with a
- * 10us rate limit, it limits to 100,000 interrupts per second. Value
- * is stored in microseconds.
- */
- u16 intrl;
};
/* Make a different profile for Rx that doesn't allow quite so aggressive
- * moderation at the high end (it maxes out at 128us or about 8k interrupts a
- * second. The INTRL/rate parameters here are only useful to cap small ITR
- * values, which is why for larger ITR's - like 128, which can only generate
- * 8k interrupts per second, there is no point to rate limit and the values
- * are set to zero. The rate limit values do affect latency, and so must
- * be reasonably small so to not impact latency sensitive tests.
+ * moderation at the high end (it maxes out at 126us or about 8k interrupts a
+ * second.
*/
static const struct ice_dim rx_profile[] = {
- {2, 10},
- {8, 16},
- {32, 0},
- {96, 0},
- {128, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {16}, /* 62,500 ints/s */
+ {62}, /* 16,129 ints/s */
+ {126} /* 7,936 ints/s */
};
/* The transmit profile, which has the same sorts of values
* as the previous struct
*/
static const struct ice_dim tx_profile[] = {
- {2, 10},
- {8, 16},
- {64, 0},
- {128, 0},
- {256, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {40}, /* 16,125 ints/s */
+ {128}, /* 7,812 ints/s */
+ {256} /* 3,906 ints/s */
};
static void ice_tx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, tx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
- dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
/* look up the values in our local table */
itr = tx_profile[dim->profile_ix].itr;
- intrl = tx_profile[dim->profile_ix].intrl;
- ice_trace(tx_dim_work, q_vector, dim);
+ ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
@@ -5522,28 +5752,65 @@ static void ice_tx_dim_work(struct work_struct *work)
static void ice_rx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, rx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
- dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
/* look up the values in our local table */
itr = rx_profile[dim->profile_ix].itr;
- intrl = rx_profile[dim->profile_ix].intrl;
- ice_trace(rx_dim_work, q_vector, dim);
+ ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
+#define ICE_DIM_DEFAULT_PROFILE_IX 1
+
+/**
+ * ice_init_moderation - set up interrupt moderation
+ * @q_vector: the vector containing rings to be configured
+ *
+ * Set up interrupt moderation registers, with the intent to do the right thing
+ * when called from reset or from probe, and whether or not dynamic moderation
+ * is enabled or not. Take special care to write all the registers in both
+ * dynamic moderation mode or not in order to make sure hardware is in a known
+ * state.
+ */
+static void ice_init_moderation(struct ice_q_vector *q_vector)
+{
+ struct ice_ring_container *rc;
+ bool tx_dynamic, rx_dynamic;
+
+ rc = &q_vector->tx;
+ INIT_WORK(&rc->dim.work, ice_tx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ tx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial TX ITR to match the above */
+ ice_write_itr(rc, tx_dynamic ?
+ tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
+
+ rc = &q_vector->rx;
+ INIT_WORK(&rc->dim.work, ice_rx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ rx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial RX ITR to match the above */
+ ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
+ rc->itr_setting);
+
+ ice_set_q_vector_intrl(q_vector);
+}
+
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
@@ -5558,13 +5825,9 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
- q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ ice_init_moderation(q_vector);
- INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
- q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_enable(&q_vector->napi);
}
}
@@ -5624,7 +5887,8 @@ int ice_up(struct ice_vsi *vsi)
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
- * @ring: Tx or Rx ring to read stats from
+ * @syncp: pointer to u64_stats_sync
+ * @stats: stats that pkts and bytes count will be taken from
* @pkts: packets stats counter
* @bytes: bytes stats counter
*
@@ -5632,19 +5896,16 @@ int ice_up(struct ice_vsi *vsi)
* that needs to be performed to read u64 values in 32 bit machine.
*/
static void
-ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
+ u64 *pkts, u64 *bytes)
{
unsigned int start;
- *pkts = 0;
- *bytes = 0;
- if (!ring)
- return;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- *pkts = ring->stats.pkts;
- *bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(syncp);
+ *pkts = stats.pkts;
+ *bytes = stats.bytes;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
}
/**
@@ -5654,18 +5915,19 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
u16 count)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
- struct ice_ring *ring;
- u64 pkts, bytes;
+ struct ice_tx_ring *ring;
+ u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ if (ring)
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -5704,9 +5966,9 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
@@ -5970,7 +6232,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_disable(&q_vector->napi);
cancel_work_sync(&q_vector->tx.dim.work);
@@ -5989,9 +6251,11 @@ int ice_down(struct ice_vsi *vsi)
/* Caller of this function is expected to set the
* vsi->state ICE_DOWN bit
*/
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
+ } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -6053,12 +6317,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- struct ice_ring *ring = vsi->tx_rings[i];
+ struct ice_tx_ring *ring = vsi->tx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_tx_ring(ring);
if (err)
break;
@@ -6084,12 +6349,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = vsi->rx_rings[i];
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_rx_ring(ring);
if (err)
break;
@@ -6162,7 +6428,7 @@ err_setup_tx:
*
* Returns 0 on success, negative value on error
*/
-static int ice_vsi_open(struct ice_vsi *vsi)
+int ice_vsi_open(struct ice_vsi *vsi)
{
char int_name[ICE_INT_NAME_STR_LEN];
struct ice_pf *pf = vsi->back;
@@ -6187,14 +6453,16 @@ static int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
- if (err)
- goto err_set_qs;
+ if (vsi->type == ICE_VSI_PF) {
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
+ if (err)
+ goto err_set_qs;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
- if (err)
- goto err_set_qs;
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
+ if (err)
+ goto err_set_qs;
+ }
err = ice_up_complete(vsi);
if (err)
@@ -6229,6 +6497,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi[i])
continue;
+ if (pf->vsi[i]->type == ICE_VSI_CHNL)
+ continue;
+
err = ice_vsi_release(pf->vsi[i]);
if (err)
dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
@@ -6433,6 +6704,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
+ if (err) {
+ dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
+ goto err_vsi_rebuild;
+ }
+
+ if (reset_type == ICE_RESET_PFR) {
+ err = ice_rebuild_channels(pf);
+ if (err) {
+ dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
+ err);
+ goto err_vsi_rebuild;
+ }
+ }
+
/* If Flow Director is active */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
@@ -6979,7 +7265,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *tx_ring = NULL;
+ struct ice_tx_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 i;
@@ -6997,7 +7283,7 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
+ ice_for_each_txq(vsi, i)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
if (txqueue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
@@ -7054,6 +7340,1050 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_setup_tc_cls_flower - flower classifier offloads
+ * @np: net device to configure
+ * @filter_dev: device on which filter is added
+ * @cls_flower: offload data
+ */
+static int
+ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
+ struct net_device *filter_dev,
+ struct flow_cls_offload *cls_flower)
+{
+ struct ice_vsi *vsi = np->vsi;
+
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(vsi, cls_flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb - callback handler registered for TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ */
+static int
+ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_validate_mqprio_qopt - Validate TCF input parameters
+ * @vsi: Pointer to VSI
+ * @mqprio_qopt: input parameters for mqprio queue configuration
+ *
+ * This function validates MQPRIO params, such as qcount (power of 2 wherever
+ * needed), and make sure user doesn't specify qcount and BW rate limit
+ * for TCs, which are more than "num_tc"
+ */
+static int
+ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 sum_max_rate = 0, sum_min_rate = 0;
+ int non_power_of_2_qcount = 0;
+ struct ice_pf *pf = vsi->back;
+ int max_rss_q_cnt = 0;
+ struct device *dev;
+ int i, speed;
+ u8 num_tc;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ if (mqprio_qopt->qopt.offset[0] != 0 ||
+ mqprio_qopt->qopt.num_tc < 1 ||
+ mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ vsi->ch_rss_size = 0;
+ num_tc = mqprio_qopt->qopt.num_tc;
+
+ for (i = 0; num_tc; i++) {
+ int qcount = mqprio_qopt->qopt.count[i];
+ u64 max_rate, min_rate, rem;
+
+ if (!qcount)
+ return -EINVAL;
+
+ if (is_power_of_2(qcount)) {
+ if (non_power_of_2_qcount &&
+ qcount > non_power_of_2_qcount) {
+ dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount > max_rss_q_cnt)
+ max_rss_q_cnt = qcount;
+ } else {
+ if (non_power_of_2_qcount &&
+ qcount != non_power_of_2_qcount) {
+ dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount < max_rss_q_cnt) {
+ dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
+ qcount, max_rss_q_cnt);
+ return -EINVAL;
+ }
+ max_rss_q_cnt = qcount;
+ non_power_of_2_qcount = qcount;
+ }
+
+ /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
+ * converts the bandwidth rate limit into Bytes/s when
+ * passing it down to the driver. So convert input bandwidth
+ * from Bytes/s to Kbps
+ */
+ max_rate = mqprio_qopt->max_rate[i];
+ max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
+ sum_max_rate += max_rate;
+
+ /* min_rate is minimum guaranteed rate and it can't be zero */
+ min_rate = mqprio_qopt->min_rate[i];
+ min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
+ sum_min_rate += min_rate;
+
+ if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
+ dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
+ min_rate, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ /* min_rate can't be more than max_rate, except when max_rate
+ * is zero (implies max_rate sought is max line rate). In such
+ * a case min_rate can be more than max.
+ */
+ if (max_rate && min_rate > max_rate) {
+ dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
+ min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ if (i >= mqprio_qopt->qopt.num_tc - 1)
+ break;
+ if (mqprio_qopt->qopt.offset[i + 1] !=
+ (mqprio_qopt->qopt.offset[i] + qcount))
+ return -EINVAL;
+ }
+ if (vsi->num_rxq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+ if (vsi->num_txq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (sum_max_rate && sum_max_rate > (u64)speed) {
+ dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
+ sum_max_rate, speed);
+ return -EINVAL;
+ }
+ if (sum_min_rate && sum_min_rate > (u64)speed) {
+ dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
+ sum_min_rate, speed);
+ return -EINVAL;
+ }
+
+ /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
+ vsi->ch_rss_size = max_rss_q_cnt;
+
+ return 0;
+}
+
+/**
+ * ice_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @sw_id: underlying HW switching element ID
+ * @ch: ptr to channel structure
+ *
+ * Add a channel (VSI) using add_vsi and queue_map
+ */
+static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+
+ if (ch->type != ICE_VSI_CHNL) {
+ dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
+ return -EINVAL;
+ }
+
+ vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
+ if (!vsi || vsi->type != ICE_VSI_CHNL) {
+ dev_err(dev, "create chnl VSI failure\n");
+ return -EINVAL;
+ }
+
+ ch->sw_id = sw_id;
+ ch->vsi_num = vsi->vsi_num;
+ ch->info.mapping_flags = vsi->info.mapping_flags;
+ ch->ch_vsi = vsi;
+ /* set the back pointer of channel for newly created VSI */
+ vsi->ch = ch;
+
+ memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+
+ return 0;
+}
+
+/**
+ * ice_chnl_cfg_res
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure channel specific resources such as rings, vector.
+ */
+static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ int i;
+
+ for (i = 0; i < ch->num_txq; i++) {
+ struct ice_q_vector *tx_q_vector, *rx_q_vector;
+ struct ice_ring_container *rc;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (!tx_ring || !rx_ring)
+ continue;
+
+ /* setup ring being channel enabled */
+ tx_ring->ch = ch;
+ rx_ring->ch = ch;
+
+ /* following code block sets up vector specific attributes */
+ tx_q_vector = tx_ring->q_vector;
+ rx_q_vector = rx_ring->q_vector;
+ if (!tx_q_vector && !rx_q_vector)
+ continue;
+
+ if (tx_q_vector) {
+ tx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &tx_q_vector->tx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ if (rx_q_vector) {
+ rx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &rx_q_vector->rx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ }
+
+ /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
+ * GLINT_ITR register would have written to perform in-context
+ * update, hence perform flush
+ */
+ if (ch->num_txq || ch->num_rxq)
+ ice_flush(&vsi->back->hw);
+}
+
+/**
+ * ice_cfg_chnl_all_res - configure channel resources
+ * @vsi: pte to main_vsi
+ * @ch: ptr to channel structure
+ *
+ * This function configures channel specific resources such as flow-director
+ * counter index, and other resources such as queues, vectors, ITR settings
+ */
+static void
+ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ /* configure channel (aka ADQ) resources such as queues, vectors,
+ * ITR settings for channel specific vectors and anything else
+ */
+ ice_chnl_cfg_res(vsi, ch);
+}
+
+/**
+ * ice_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @sw_id: underlying HW switching element ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures Tx rings accordingly
+ */
+static int
+ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch, u16 sw_id, u8 type)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret;
+
+ ch->base_q = vsi->next_base_q;
+ ch->type = type;
+
+ ret = ice_add_channel(pf, sw_id, ch);
+ if (ret) {
+ dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
+ return ret;
+ }
+
+ /* configure/setup ADQ specific resources */
+ ice_cfg_chnl_all_res(vsi, ch);
+
+ /* make sure to update the next_base_q so that subsequent channel's
+ * (aka ADQ) VSI queue map is correct
+ */
+ vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
+ dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
+ ch->num_rxq);
+
+ return 0;
+}
+
+/**
+ * ice_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element
+ */
+static bool
+ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u16 sw_id;
+ int ret;
+
+ if (vsi->type != ICE_VSI_PF) {
+ dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
+ return false;
+ }
+
+ sw_id = pf->first_sw->sw_id;
+
+ /* create channel (VSI) */
+ ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
+ if (ret) {
+ dev_err(dev, "failed to setup hw_channel\n");
+ return false;
+ }
+ dev_dbg(dev, "successfully created channel()\n");
+
+ return ch->ch_vsi ? true : false;
+}
+
+/**
+ * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
+ * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
+ */
+static int
+ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
+{
+ int err;
+
+ err = ice_set_min_bw_limit(vsi, min_tx_rate);
+ if (err)
+ return err;
+
+ return ice_set_max_bw_limit(vsi, max_tx_rate);
+}
+
+/**
+ * ice_create_q_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ */
+static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ if (!ch)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ if (!ch->num_txq || !ch->num_rxq) {
+ dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
+ return -EINVAL;
+ }
+
+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
+ dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
+ vsi->cnt_q_avail, ch->num_txq);
+ return -EINVAL;
+ }
+
+ if (!ice_setup_channel(pf, vsi, ch)) {
+ dev_info(dev, "Failed to setup channel\n");
+ return -EINVAL;
+ }
+ /* configure BW rate limit */
+ if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
+ int ret;
+
+ ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (ret)
+ dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ }
+
+ vsi->cnt_q_avail -= ch->num_txq;
+
+ return 0;
+}
+
+/**
+ * ice_rem_all_chnl_fltrs - removes all channel filters
+ * @pf: ptr to PF, TC-flower based filter are tracked at PF level
+ *
+ * Remove all advanced switch filters only if they are channel specific
+ * tc-flower based filter
+ */
+static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ /* to remove all channel filters, iterate an ordered list of filters */
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ struct ice_rule_query_data rule;
+ int status;
+
+ /* for now process only channel specific filters */
+ if (!ice_is_chnl_fltr(fltr))
+ continue;
+
+ rule.rid = fltr->rid;
+ rule.rule_id = fltr->rule_id;
+ rule.vsi_handle = fltr->dest_id;
+ status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
+ if (status) {
+ if (status == -ENOENT)
+ dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
+ rule.rule_id);
+ else
+ dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
+ status);
+ } else if (fltr->dest_vsi) {
+ /* update advanced switch filter count */
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ u32 flags = fltr->flags;
+
+ fltr->dest_vsi->num_chnl_fltr--;
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+
+ hlist_del(&fltr->tc_flower_node);
+ kfree(fltr);
+ }
+}
+
+/**
+ * ice_remove_q_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
+ * @rem_fltr: delete advanced switch filter or not
+ *
+ * Remove queue channels for the TCs
+ */
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
+{
+ struct ice_channel *ch, *ch_tmp;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* remove all tc-flower based filter if they are channel filters only */
+ if (rem_fltr)
+ ice_rem_all_chnl_fltrs(pf);
+
+ /* perform cleanup for channels if they exist */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ list_del(&ch->list);
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi) {
+ kfree(ch);
+ continue;
+ }
+
+ /* Reset queue contexts */
+ for (i = 0; i < ch->num_rxq; i++) {
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (tx_ring) {
+ tx_ring->ch = NULL;
+ if (tx_ring->q_vector)
+ tx_ring->q_vector->ch = NULL;
+ }
+ if (rx_ring) {
+ rx_ring->ch = NULL;
+ if (rx_ring->q_vector)
+ rx_ring->q_vector->ch = NULL;
+ }
+ }
+
+ /* clear the VSI from scheduler tree */
+ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
+
+ /* Delete VSI from FW */
+ ice_vsi_delete(ch->ch_vsi);
+
+ /* Delete VSI from PF and HW VSI arrays */
+ ice_vsi_clear(ch->ch_vsi);
+
+ /* free the channel */
+ kfree(ch);
+ }
+
+ /* clear the channel VSI map which is stored in main VSI */
+ ice_for_each_chnl_tc(i)
+ vsi->tc_map_vsi[i] = NULL;
+
+ /* reset main VSI's all TC information */
+ vsi->all_enatc = 0;
+ vsi->all_numtc = 0;
+}
+
+/**
+ * ice_rebuild_channels - rebuild channel
+ * @pf: ptr to PF
+ *
+ * Recreate channel VSIs and replay filters
+ */
+static int ice_rebuild_channels(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi;
+ bool rem_adv_fltr = true;
+ struct ice_channel *ch;
+ struct ice_vsi *vsi;
+ int tc_idx = 1;
+ int i, err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return 0;
+
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
+ main_vsi->old_numtc == 1)
+ return 0; /* nothing to be done */
+
+ /* reconfigure main VSI based on old value of TC and cached values
+ * for MQPRIO opts
+ */
+ err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
+ if (err) {
+ dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
+ main_vsi->old_ena_tc, main_vsi->vsi_num);
+ return err;
+ }
+
+ /* rebuild ADQ VSIs */
+ ice_for_each_vsi(pf, i) {
+ enum ice_vsi_type type;
+
+ vsi = pf->vsi[i];
+ if (!vsi || vsi->type != ICE_VSI_CHNL)
+ continue;
+
+ type = vsi->type;
+
+ /* rebuild ADQ VSI */
+ err = ice_vsi_rebuild(vsi, true);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
+ ice_vsi_type_str(type), vsi->idx, err);
+ goto cleanup;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+
+ /* replay filters for the VSI */
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
+ ice_vsi_type_str(type), err, vsi->idx);
+ rem_adv_fltr = false;
+ goto cleanup;
+ }
+ dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
+ ice_vsi_type_str(type), vsi->idx);
+
+ /* store ADQ VSI at correct TC index in main VSI's
+ * map of TC to VSI
+ */
+ main_vsi->tc_map_vsi[tc_idx++] = vsi;
+ }
+
+ /* ADQ VSI(s) has been rebuilt successfully, so setup
+ * channel for main VSI's Tx and Rx rings
+ */
+ list_for_each_entry(ch, &main_vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi)
+ continue;
+
+ /* reconfig channel resources */
+ ice_cfg_chnl_all_res(main_vsi, ch);
+
+ /* replay BW rate limit if it is non-zero */
+ if (!ch->max_tx_rate && !ch->min_tx_rate)
+ continue;
+
+ err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (err)
+ dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ err, ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ }
+
+ /* reconfig RSS for main VSI */
+ if (main_vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(main_vsi);
+
+ return 0;
+
+cleanup:
+ ice_remove_q_channels(main_vsi, rem_adv_fltr);
+ return err;
+}
+
+/**
+ * ice_create_q_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
+ *
+ * Configures queue channel mapping to the given TCs
+ */
+static int ice_create_q_channels(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_channel *ch;
+ int ret = 0, i;
+
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
+ ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
+ ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
+ ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
+ ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
+
+ /* convert to Kbits/s */
+ if (ch->max_tx_rate)
+ ch->max_tx_rate = div_u64(ch->max_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+ if (ch->min_tx_rate)
+ ch->min_tx_rate = div_u64(ch->min_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_create_q_channel(vsi, ch);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed creating channel TC:%d\n", i);
+ kfree(ch);
+ goto err_free;
+ }
+ list_add_tail(&ch->list, &vsi->ch_list);
+ vsi->tc_map_vsi[i] = ch->ch_vsi;
+ dev_dbg(ice_pf_to_dev(pf),
+ "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ }
+ return 0;
+
+err_free:
+ ice_remove_q_channels(vsi, false);
+
+ return ret;
+}
+
+/**
+ * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @type_data: TC offload data
+ */
+static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 mode, ena_tc_qdisc = 0;
+ int cur_txq, cur_rxq;
+ u8 hw = 0, num_tcf;
+ struct device *dev;
+ int ret, i;
+
+ dev = ice_pf_to_dev(pf);
+ num_tcf = mqprio_qopt->qopt.num_tc;
+ hw = mqprio_qopt->qopt.hw;
+ mode = mqprio_qopt->mode;
+ if (!hw) {
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ vsi->ch_rss_size = 0;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ goto config_tcf;
+ }
+
+ /* Generate queue region map for number of TCF requested */
+ for (i = 0; i < num_tcf; i++)
+ ena_tc_qdisc |= BIT(i);
+
+ switch (mode) {
+ case TC_MQPRIO_MODE_CHANNEL:
+
+ ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
+ if (ret) {
+ netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ /* don't assume state of hw_tc_offload during driver load
+ * and set the flag for TC flower filter if hw_tc_offload
+ * already ON
+ */
+ if (vsi->netdev->features & NETIF_F_HW_TC)
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+config_tcf:
+
+ /* Requesting same TCF configuration as already enabled */
+ if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
+ mode != TC_MQPRIO_MODE_CHANNEL)
+ return 0;
+
+ /* Pause VSI queues */
+ ice_dis_vsi(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_remove_q_channels(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
+ num_online_cpus());
+ vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
+ num_online_cpus());
+ } else {
+ /* logic to rebuild VSI, same like ethtool -L */
+ u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
+
+ for (i = 0; i < num_tcf; i++) {
+ if (!(ena_tc_qdisc & BIT(i)))
+ continue;
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ vsi->req_txq = offset + qcount_tx;
+ vsi->req_rxq = offset + qcount_rx;
+
+ /* store away original rss_size info, so that it gets reused
+ * form ice_vsi_rebuild during tc-qdisc delete stage - to
+ * determine, what should be the rss_sizefor main VSI
+ */
+ vsi->orig_rss_size = vsi->rss_size;
+ }
+
+ /* save current values of Tx and Rx queues before calling VSI rebuild
+ * for fallback option
+ */
+ cur_txq = vsi->num_txq;
+ cur_rxq = vsi->num_rxq;
+
+ /* proceed with rebuild main VSI using correct number of queues */
+ ret = ice_vsi_rebuild(vsi, false);
+ if (ret) {
+ /* fallback to current number of queues */
+ dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
+ vsi->req_txq = cur_txq;
+ vsi->req_rxq = cur_rxq;
+ clear_bit(ICE_RESET_FAILED, pf->state);
+ if (ice_vsi_rebuild(vsi, false)) {
+ dev_err(dev, "Rebuild of main VSI failed again\n");
+ return ret;
+ }
+ }
+
+ vsi->all_numtc = num_tcf;
+ vsi->all_enatc = ena_tc_qdisc;
+ ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
+ if (ret) {
+ netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
+ vsi->vsi_num);
+ goto exit;
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
+
+ /* set TC0 rate limit if specified */
+ if (max_tx_rate || min_tx_rate) {
+ /* convert to Kbits/s */
+ if (max_tx_rate)
+ max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
+ if (min_tx_rate)
+ min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
+ if (!ret) {
+ dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ } else {
+ dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ goto exit;
+ }
+ }
+ ret = ice_create_q_channels(vsi);
+ if (ret) {
+ netdev_err(netdev, "failed configuring queue channels\n");
+ goto exit;
+ } else {
+ netdev_dbg(netdev, "successfully configured channels\n");
+ }
+ }
+
+ if (vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(vsi);
+
+exit:
+ /* if error, reset the all_numtc and all_enatc */
+ if (ret) {
+ vsi->all_numtc = 0;
+ vsi->all_enatc = 0;
+ }
+ /* resume VSI */
+ ice_ena_vsi(vsi, true);
+
+ return ret;
+}
+
+static LIST_HEAD(ice_block_cb_list);
+
+static int
+ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ int err;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &ice_block_cb_list,
+ ice_setup_tc_block_cb,
+ np, np, true);
+ case TC_SETUP_QDISC_MQPRIO:
+ /* setup traffic classifier for receive side */
+ mutex_lock(&pf->tc_mutex);
+ err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct ice_indr_block_priv *
+ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
+ struct net_device *netdev)
+{
+ struct ice_indr_block_priv *cb_priv;
+
+ list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
+ if (!cb_priv->netdev)
+ return NULL;
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+ }
+ return NULL;
+}
+
+static int
+ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
+ void *indr_priv)
+{
+ struct ice_indr_block_priv *priv = indr_priv;
+ struct ice_netdev_priv *np = priv->np;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, priv->netdev,
+ (struct flow_cls_offload *)
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
+ struct ice_netdev_priv *np,
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct ice_indr_block_priv *indr_priv;
+ struct flow_block_cb *block_cb;
+
+ if (!ice_is_tunnel_supported(netdev) &&
+ !(is_vlan_dev(netdev) &&
+ vlan_dev_real_dev(netdev) == np->vsi->netdev))
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ indr_priv = ice_indr_block_priv_lookup(np, netdev);
+ if (indr_priv)
+ return -EEXIST;
+
+ indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
+ if (!indr_priv)
+ return -ENOMEM;
+
+ indr_priv->netdev = netdev;
+ indr_priv->np = np;
+ list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
+
+ block_cb =
+ flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
+ indr_priv, indr_priv,
+ ice_rep_indr_tc_block_unbind,
+ f, netdev, sch, data, np,
+ cleanup);
+
+ if (IS_ERR(block_cb)) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ return PTR_ERR(block_cb);
+ }
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ indr_priv = ice_indr_block_priv_lookup(np, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ ice_indr_setup_block_cb,
+ indr_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_indr_block_cb_remove(block_cb, f);
+
+ list_del(&block_cb->driver_list);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
+ data, cleanup);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -7111,7 +8441,7 @@ int ice_open_internal(struct net_device *netdev)
return -EIO;
}
- ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
@@ -7239,6 +8569,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
+ .ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
@@ -7254,8 +8585,10 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_get_vf_stats = ice_get_vf_stats,
+ .ndo_set_vf_rate = ice_set_vf_bw,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_setup_tc = ice_setup_tc,
.ndo_set_features = ice_set_features,
.ndo_bridge_getlink = ice_bridge_getlink,
.ndo_bridge_setlink = ice_bridge_setlink,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 199aa5b71540..dc1b0e9e6df5 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -3,6 +3,56 @@
#ifndef _ICE_PROTOCOL_TYPE_H_
#define _ICE_PROTOCOL_TYPE_H_
+#define ICE_IPV6_ADDR_LENGTH 16
+
+/* Each recipe can match up to 5 different fields. Fields to match can be meta-
+ * data, values extracted from packet headers, or results from other recipes.
+ * One of the 5 fields is reserved for matching the switch ID. So, up to 4
+ * recipes can provide intermediate results to another one through chaining,
+ * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ */
+#define ICE_NUM_WORDS_RECIPE 4
+
+/* Max recipes that can be chained */
+#define ICE_MAX_CHAIN_RECIPE 5
+
+/* 1 word reserved for switch ID from allowed 5 words.
+ * So a recipe can have max 4 words. And you can chain 5 such recipes
+ * together. So maximum words that can be programmed for look up is 5 * 4.
+ */
+#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
+
+/* Field vector index corresponding to chaining */
+#define ICE_CHAIN_FV_INDEX_START 47
+
+enum ice_protocol_type {
+ ICE_MAC_OFOS = 0,
+ ICE_MAC_IL,
+ ICE_ETYPE_OL,
+ ICE_VLAN_OFOS,
+ ICE_IPV4_OFOS,
+ ICE_IPV4_IL,
+ ICE_IPV6_OFOS,
+ ICE_IPV6_IL,
+ ICE_TCP_IL,
+ ICE_UDP_OF,
+ ICE_UDP_ILOS,
+ ICE_VXLAN,
+ ICE_GENEVE,
+ ICE_NVGRE,
+ ICE_VXLAN_GPE,
+ ICE_SCTP_IL,
+ ICE_PROTOCOL_LAST
+};
+
+enum ice_sw_tunnel_type {
+ ICE_NON_TUN = 0,
+ ICE_SW_TUN_VXLAN,
+ ICE_SW_TUN_GENEVE,
+ ICE_SW_TUN_NVGRE,
+ ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
+};
+
/* Decoders for ice_prot_id:
* - F: First
* - I: Inner
@@ -35,4 +85,158 @@ enum ice_prot_id {
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
+
+#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
+
+#define ICE_MAC_OFOS_HW 1
+#define ICE_MAC_IL_HW 4
+#define ICE_ETYPE_OL_HW 9
+#define ICE_VLAN_OF_HW 16
+#define ICE_VLAN_OL_HW 17
+#define ICE_IPV4_OFOS_HW 32
+#define ICE_IPV4_IL_HW 33
+#define ICE_IPV6_OFOS_HW 40
+#define ICE_IPV6_IL_HW 41
+#define ICE_TCP_IL_HW 49
+#define ICE_UDP_ILOS_HW 53
+#define ICE_GRE_OF_HW 64
+
+#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+
+#define ICE_MDID_SIZE 2
+#define ICE_TUN_FLAG_MDID 21
+#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
+#define ICE_TUN_FLAG_MASK 0xFF
+
+#define ICE_TUN_FLAG_FV_IND 2
+
+/* Mapping of software defined protocol ID to hardware defined protocol ID */
+struct ice_protocol_entry {
+ enum ice_protocol_type type;
+ u8 protocol_id;
+};
+
+struct ice_ether_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+};
+
+struct ice_ethtype_hdr {
+ __be16 ethtype_id;
+};
+
+struct ice_ether_vlan_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 vlan_id;
+};
+
+struct ice_vlan_hdr {
+ __be16 type;
+ __be16 vlan;
+};
+
+struct ice_ipv4_hdr {
+ u8 version;
+ u8 tos;
+ __be16 total_length;
+ __be16 id;
+ __be16 frag_off;
+ u8 time_to_live;
+ u8 protocol;
+ __be16 check;
+ __be32 src_addr;
+ __be32 dst_addr;
+};
+
+struct ice_ipv6_hdr {
+ __be32 be_ver_tc_flow;
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 hop_limit;
+ u8 src_addr[ICE_IPV6_ADDR_LENGTH];
+ u8 dst_addr[ICE_IPV6_ADDR_LENGTH];
+};
+
+struct ice_sctp_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 verification_tag;
+ __be32 check;
+};
+
+struct ice_l4_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 len;
+ __be16 check;
+};
+
+struct ice_udp_tnl_hdr {
+ __be16 field;
+ __be16 proto_type;
+ __be32 vni; /* only use lower 24-bits */
+};
+
+struct ice_nvgre_hdr {
+ __be16 flags;
+ __be16 protocol;
+ __be32 tni_flow;
+};
+
+union ice_prot_hdr {
+ struct ice_ether_hdr eth_hdr;
+ struct ice_ethtype_hdr ethertype;
+ struct ice_vlan_hdr vlan_hdr;
+ struct ice_ipv4_hdr ipv4_hdr;
+ struct ice_ipv6_hdr ipv6_hdr;
+ struct ice_l4_hdr l4_hdr;
+ struct ice_sctp_hdr sctp_hdr;
+ struct ice_udp_tnl_hdr tnl_hdr;
+ struct ice_nvgre_hdr nvgre_hdr;
+};
+
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for e.g. dst address is 3 words in ethertype header and corresponding bytes
+ * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ */
+struct ice_prot_ext_tbl_entry {
+ enum ice_protocol_type prot_type;
+ /* Byte offset into header of given protocol type */
+ u8 offs[sizeof(union ice_prot_hdr)];
+};
+
+/* Extractions to be looked up for a given recipe */
+struct ice_prot_lkup_ext {
+ u16 prot_type;
+ u8 n_val_words;
+ /* create a buffer to hold max words per recipe */
+ u16 field_off[ICE_MAX_CHAIN_WORDS];
+ u16 field_mask[ICE_MAX_CHAIN_WORDS];
+
+ struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
+
+ /* Indicate field offsets that have field vector indices assigned */
+ DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
+};
+
+struct ice_pref_recipe_group {
+ u8 n_val_pairs; /* Number of valid pairs */
+ struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
+ u16 mask[ICE_NUM_WORDS_RECIPE];
+};
+
+struct ice_recp_grp_entry {
+ struct list_head l_entry;
+
+#define ICE_INVAL_CHAIN_IND 0xFF
+ u16 rid;
+ u8 chain_idx;
+ u16 fv_idx[ICE_NUM_WORDS_RECIPE];
+ u16 fv_mask[ICE_NUM_WORDS_RECIPE];
+ struct ice_pref_recipe_group r_group;
+};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 80380aed8882..bf7247c6f58e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,252 @@
#define E810_OUT_PROP_DELAY_NS 1
+static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+};
+
+/**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+static int
+ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+{
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+static int
+ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+{
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+}
+
+/**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+static int
+ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+{
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+}
+
+/**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+static int
+ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+}
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@ -735,17 +981,34 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@ -757,7 +1020,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@ -1012,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* The timestamp is in ns, so we must convert the result first.
*/
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@ -1038,13 +1313,93 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
}
/**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+static void
+ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+}
+
+/**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+static void
+ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+}
+
+/**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+}
+
+/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@ -1062,7 +1417,10 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
@@ -1571,6 +1929,9 @@ err_kworker:
*/
void ice_ptp_release(struct ice_pf *pf)
{
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index e1c787bd5b96..f71ad317d6c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -9,12 +9,21 @@
#include "ice_ptp_hw.h"
-enum ice_ptp_pin {
+enum ice_ptp_pin_e810 {
GPIO_20 = 0,
GPIO_21,
GPIO_22,
GPIO_23,
- NUM_ICE_PTP_PIN
+ NUM_PTP_PIN_E810
+};
+
+enum ice_ptp_pin_e810t {
+ GNSS = 0,
+ SMA1,
+ UFL1,
+ SMA2,
+ UFL2,
+ NUM_PTP_PINS_E810T
};
struct ice_perout_channel {
@@ -155,8 +164,11 @@ struct ice_ptp {
#define PPS_CLK_SRC_CHAN 2
#define PPS_PIN_INDEX 5
#define TIME_SYNC_PIN_INDEX 4
-#define E810_N_EXT_TS 3
-#define E810_N_PER_OUT 4
+#define N_EXT_TS_E810 3
+#define N_PER_OUT_E810 4
+#define N_PER_OUT_E810T 3
+#define N_PER_OUT_E810T_NO_SMA 2
+#define N_EXT_TS_E810_NO_SMA 2
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
@@ -168,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_process_ts(struct ice_pf *pf);
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
@@ -196,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 3eca0e4eab0b..29f947c0cd2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -649,3 +649,154 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
return ice_clear_phy_tstamp_e810(hw, block, idx);
}
+
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static int
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -EOPNOTSUPP;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return -EOPNOTSUPP;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -EOPNOTSUPP;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
+ * PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
+ * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_pca9575_present
+ * @hw: pointer to the hw struct
+ *
+ * Check if the SW IO expander is present in the netlist
+ */
+bool ice_is_pca9575_present(struct ice_hw *hw)
+{
+ u16 handle = 0;
+ int status;
+
+ if (!ice_is_e810t(hw))
+ return false;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+
+ return !status && handle;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 55a414e87018..b2984b5c22c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -30,6 +30,9 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -76,4 +79,23 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
+/* E810T SMA controller pin control */
+#define ICE_SMA1_DIR_EN_E810T BIT(4)
+#define ICE_SMA1_TX_EN_E810T BIT(5)
+#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3)
+#define ICE_SMA2_DIR_EN_E810T BIT(6)
+#define ICE_SMA2_TX_EN_E810T BIT(7)
+
+#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \
+ ICE_SMA1_TX_EN_E810T)
+#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \
+ ICE_SMA2_DIR_EN_E810T | \
+ ICE_SMA2_TX_EN_E810T)
+#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \
+ ICE_SMA2_MASK_E810T)
+
+#define ICE_SMA_MIN_BIT_E810T 3
+#define ICE_SMA_MAX_BIT_E810T 7
+#define ICE_PCA9575_P1_OFFSET 8
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
new file mode 100644
index 000000000000..af8e6ef5f571
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch.h"
+#include "ice_devlink.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_repr_get_sw_port_id - get port ID associated with representor
+ * @repr: pointer to port representor
+ */
+static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+{
+ return repr->vf->pf->hw.port_info->lport;
+}
+
+/**
+ * ice_repr_get_phys_port_name - get phys port name
+ * @netdev: pointer to port representor netdev
+ * @buf: write here port name
+ * @len: max length of buf
+ */
+static int
+ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
+ int res;
+
+ /* Devlink port is registered and devlink core is taking care of name formatting. */
+ if (repr->vf->devlink_port.devlink)
+ return -EOPNOTSUPP;
+
+ res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
+ repr->vf->vf_id);
+ if (res <= 0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+/**
+ * ice_repr_get_stats64 - get VF stats for VFPR use
+ * @netdev: pointer to port representor netdev
+ * @stats: pointer to struct where stats can be stored
+ */
+static void
+ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_eth_stats *eth_stats;
+ struct ice_vsi *vsi;
+
+ if (ice_is_vf_disabled(np->repr->vf))
+ return;
+ vsi = np->repr->src_vsi;
+
+ ice_update_vsi_stats(vsi);
+ eth_stats = &vsi->eth_stats;
+
+ stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
+ eth_stats->tx_multicast;
+ stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
+ eth_stats->rx_multicast;
+ stats->tx_bytes = eth_stats->tx_bytes;
+ stats->rx_bytes = eth_stats->rx_bytes;
+ stats->multicast = eth_stats->rx_multicast;
+ stats->tx_errors = eth_stats->tx_errors;
+ stats->tx_dropped = eth_stats->tx_discards;
+ stats->rx_dropped = eth_stats->rx_discards;
+}
+
+/**
+ * ice_netdev_to_repr - Get port representor for given netdevice
+ * @netdev: pointer to port representor netdev
+ */
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return np->repr;
+}
+
+/**
+ * ice_repr_open - Enable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a port representor's network
+ * interface is made active by the system (IFF_UP). Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_open(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = true;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
+/**
+ * ice_repr_stop - Disable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when a port representor's network
+ * interface is de-activated by the system. Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_stop(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = false;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static struct devlink_port *
+ice_repr_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ return &repr->vf->devlink_port;
+}
+
+static int
+ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
+ struct flow_cls_offload *flower)
+{
+ switch (flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(repr->src_vsi, flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
+ struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_repr_setup_tc_cls_flower(np->repr, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(ice_repr_block_cb_list);
+
+static int
+ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple((struct flow_block_offload *)
+ type_data,
+ &ice_repr_block_cb_list,
+ ice_repr_setup_tc_block_cb,
+ np, np, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops ice_repr_netdev_ops = {
+ .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_open,
+ .ndo_stop = ice_repr_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_get_devlink_port = ice_repr_get_devlink_port,
+ .ndo_setup_tc = ice_repr_setup_tc,
+};
+
+/**
+ * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
+ * @netdev: pointer to netdev
+ */
+bool ice_is_port_repr_netdev(struct net_device *netdev)
+{
+ return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+}
+
+/**
+ * ice_repr_reg_netdev - register port representor netdev
+ * @netdev: pointer to port representor netdev
+ */
+static int
+ice_repr_reg_netdev(struct net_device *netdev)
+{
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &ice_repr_netdev_ops;
+ ice_set_ethtool_repr_ops(netdev);
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return register_netdev(netdev);
+}
+
+/**
+ * ice_repr_add - add representor for VF
+ * @vf: pointer to VF structure
+ */
+static int ice_repr_add(struct ice_vf *vf)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ int err;
+
+ repr = kzalloc(sizeof(*repr), GFP_KERNEL);
+ if (!repr)
+ return -ENOMEM;
+
+ repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
+ if (!repr->netdev) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->vf = vf;
+ vf->repr = repr;
+ np = netdev_priv(repr->netdev);
+ np->repr = repr;
+
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector) {
+ err = -ENOMEM;
+ goto err_alloc_q_vector;
+ }
+ repr->q_vector = q_vector;
+
+ err = ice_devlink_create_vf_port(vf);
+ if (err)
+ goto err_devlink;
+
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
+
+ err = ice_repr_reg_netdev(repr->netdev);
+ if (err)
+ goto err_netdev;
+
+ devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_vf_port(vf);
+err_devlink:
+ kfree(repr->q_vector);
+ vf->repr->q_vector = NULL;
+err_alloc_q_vector:
+ free_netdev(repr->netdev);
+ repr->netdev = NULL;
+err_alloc:
+ kfree(repr);
+ vf->repr = NULL;
+ return err;
+}
+
+/**
+ * ice_repr_rem - remove representor from VF
+ * @vf: pointer to VF structure
+ */
+static void ice_repr_rem(struct ice_vf *vf)
+{
+ ice_devlink_destroy_vf_port(vf);
+ kfree(vf->repr->q_vector);
+ vf->repr->q_vector = NULL;
+ unregister_netdev(vf->repr->netdev);
+ free_netdev(vf->repr->netdev);
+ vf->repr->netdev = NULL;
+ kfree(vf->repr);
+ vf->repr = NULL;
+}
+
+/**
+ * ice_repr_add_for_all_vfs - add port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+int ice_repr_add_for_all_vfs(struct ice_pf *pf)
+{
+ int err;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ err = ice_repr_add(vf);
+ if (err)
+ goto err;
+
+ ice_vc_change_ops_to_repr(&vf->vc_ops);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+
+ return err;
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+}
+
+/**
+ * ice_repr_start_tx_queues - start Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_start_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_on(repr->netdev);
+ netif_tx_start_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_stop_tx_queues - stop Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_stop_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_off(repr->netdev);
+ netif_tx_stop_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_set_traffic_vsi - set traffic VSI for port representor
+ * @repr: repr on with VSI will be set
+ * @vsi: pointer to VSI that will be used by port representor to pass traffic
+ */
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np = netdev_priv(repr->netdev);
+
+ np->vsi = vsi;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
new file mode 100644
index 000000000000..806de22933c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_REPR_H_
+#define _ICE_REPR_H_
+
+#include <net/dst_metadata.h>
+#include "ice.h"
+
+struct ice_repr {
+ struct ice_vsi *src_vsi;
+ struct ice_vf *vf;
+ struct ice_q_vector *q_vector;
+ struct net_device *netdev;
+ struct metadata_dst *dst;
+};
+
+int ice_repr_add_for_all_vfs(struct ice_pf *pf);
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+
+void ice_repr_start_tx_queues(struct ice_repr *repr);
+void ice_repr_stop_tx_queues(struct ice_repr *repr);
+
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
+
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+bool ice_is_port_repr_netdev(struct net_device *netdev);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 9f07b6641705..ce3c7bded4cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -2071,6 +2071,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its RDMA children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
+}
+
+/**
* ice_get_agg_info - get the aggregator ID
* @hw: pointer to the hardware structure
* @agg_id: aggregator ID
@@ -2999,6 +3012,43 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
}
/**
+ * ice_sched_save_vsi_bw - save VSI node's BW information
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of VSI type node for post replay use.
+ */
+static int
+ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return -EINVAL;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return -EINVAL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
* @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
@@ -3771,6 +3821,153 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
}
/**
+ * ice_sched_get_node_by_id_type - get node from ID type
+ * @pi: port information structure
+ * @id: identifier
+ * @agg_type: type of aggregator
+ * @tc: traffic class
+ *
+ * This function returns node identified by ID of type aggregator, and
+ * based on traffic class (TC). This function needs to be called with
+ * the scheduler lock held.
+ */
+static struct ice_sched_node *
+ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc)
+{
+ struct ice_sched_node *node = NULL;
+
+ switch (agg_type) {
+ case ICE_AGG_TYPE_VSI: {
+ struct ice_vsi_ctx *vsi_ctx;
+ u16 vsi_handle = (u16)id;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ break;
+ /* Get sched_vsi_info */
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ break;
+ node = vsi_ctx->sched.vsi_node[tc];
+ break;
+ }
+
+ case ICE_AGG_TYPE_AGG: {
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (tc_node)
+ node = ice_sched_get_agg_node(pi, tc_node, id);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
+ * @pi: port information structure
+ * @id: ID (software VSI handle or AGG ID)
+ * @agg_type: aggregator type (VSI or AGG type node)
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of VSI or Aggregator scheduling node
+ * based on TC information from passed in argument BW.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+
+ if (!pi)
+ return status;
+
+ if (rl_type == ICE_UNKNOWN_BW)
+ return status;
+
+ mutex_lock(&pi->sched_lock);
+ node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
+ goto exit_set_node_bw_lmt_per_tc;
+ }
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+exit_set_node_bw_lmt_per_tc:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type, bw);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ *
+ * This function configures default BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
* ice_cfg_rl_burst_size - Set burst size value
* @hw: pointer to the HW struct
* @bytes: burst size in bytes
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 9beef8f0ec76..6bddcbecaf5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -58,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
+ /* bw_t_info saves aggregator BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
/* save aggregator TC bitmap */
DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
@@ -89,6 +91,7 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
enum ice_status
@@ -103,6 +106,12 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3b6c1420aa7b..793f4a9fc2cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -8,6 +8,7 @@
#define ICE_ETH_ETHTYPE_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
+#define ICE_IPV6_ETHER_ID 0x86DD
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
* struct to configure any switch filter rules.
@@ -29,6 +30,476 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+struct ice_dummy_pkt_offsets {
+ enum ice_protocol_type type;
+ u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_IPV4_IL, 56 },
+ { ICE_TCP_IL, 76 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_IPV4_IL, 56 },
+ { ICE_UDP_ILOS, 76 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_IPV4_IL, 64 },
+ { ICE_TCP_IL, 84 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x46, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_IPV4_IL, 64 },
+ { ICE_UDP_ILOS, 84 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x3a, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+/* offset info for MAC + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + UDP */
+static const u8 dummy_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_UDP_ILOS, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:UDP dummy packet */
+static const u8 dummy_vlan_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_TCP_IL, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + TCP */
+static const u8 dummy_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_TCP_IL, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:TCP dummy packet */
+static const u8 dummy_vlan_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_TCP_IL, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + TCP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_TCP_IL, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + TCP dummy packet */
+static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* IPv6 + UDP dummy packet */
+static const u8 dummy_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_UDP_ILOS, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + UDP dummy packet */
+static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -42,6 +513,14 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+/* this is a recipe to profile association bitmap */
+static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
+ ICE_MAX_NUM_PROFILES);
+
+/* this is a profile to recipe association bitmap */
+static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
+ ICE_MAX_NUM_RECIPES);
+
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -59,10 +538,11 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
if (!recps)
return ICE_ERR_NO_MEMORY;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -518,7 +998,7 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
@@ -543,6 +1023,360 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return status;
}
+/**
+ * ice_aq_add_recipe - add switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: number of switch recipes in the list
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add(0x0290)
+ */
+static enum ice_status
+ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ u16 buf_size;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
+
+ cmd->num_sub_recipes = cpu_to_le16(num_recipes);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ buf_size = num_recipes * sizeof(*s_recipe_list);
+
+ return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_recipe - get switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: pointer to the number of recipes (input and output)
+ * @recipe_root: root recipe number of recipe(s) to retrieve
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get(0x0292)
+ *
+ * On input, *num_recipes should equal the number of entries in s_recipe_list.
+ * On output, *num_recipes will equal the number of entries returned in
+ * s_recipe_list.
+ *
+ * The caller must supply enough space in s_recipe_list to hold all possible
+ * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
+ */
+static enum ice_status
+ice_aq_get_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 buf_size;
+
+ if (*num_recipes != ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
+
+ cmd->return_index = cpu_to_le16(recipe_root);
+ cmd->num_sub_recipes = 0;
+
+ buf_size = *num_recipes * sizeof(*s_recipe_list);
+
+ status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+ *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
+
+ return status;
+}
+
+/**
+ * ice_aq_map_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Recipe to profile association (0x0291)
+ */
+static enum ice_status
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+ /* Set the recipe ID bit in the bitmask to let the device know which
+ * profile we are associating the recipe to
+ */
+ memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_get_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Associate profile ID with given recipe (0x0293)
+ */
+static enum ice_status
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status)
+ memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+
+ return status;
+}
+
+/**
+ * ice_alloc_recipe - add recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID returned as response to AQ call
+ */
+static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(sw_buf, elem, 1);
+ sw_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = cpu_to_le16(1);
+ sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
+ ICE_AQC_RES_TYPE_S) |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ kfree(sw_buf);
+
+ return status;
+}
+
+/**
+ * ice_get_recp_to_prof_map - updates recipe to profile mapping
+ * @hw: pointer to hardware structure
+ *
+ * This function is used to populate recipe_to_profile matrix where index to
+ * this array is the recipe ID and the element is the mapping of which profiles
+ * is this recipe mapped to.
+ */
+static void ice_get_recp_to_prof_map(struct ice_hw *hw)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 i;
+
+ for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
+ u16 j;
+
+ bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
+ bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
+ if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ continue;
+ bitmap_copy(profile_to_recipe[i], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit(i, recipe_to_profile[j]);
+ }
+}
+
+/**
+ * ice_collect_result_idx - copy result index values
+ * @buf: buffer that contains the result index
+ * @recp: the recipe struct to copy data into
+ */
+static void
+ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
+ struct ice_sw_recipe *recp)
+{
+ if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ recp->res_idxs);
+}
+
+/**
+ * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
+ * @hw: pointer to hardware structure
+ * @recps: struct that we need to populate
+ * @rid: recipe ID that we are populating
+ * @refresh_required: true if we should get recipe to profile mapping from FW
+ *
+ * This function is used to populate all the necessary entries into our
+ * bookkeeping so that we have a current list of all the recipes that are
+ * programmed in the firmware.
+ */
+static enum ice_status
+ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
+ bool *refresh_required)
+{
+ DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ struct ice_prot_lkup_ext *lkup_exts;
+ enum ice_status status;
+ u8 fv_word_idx = 0;
+ u16 sub_recps;
+
+ bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
+
+ /* we need a buffer big enough to accommodate all the recipes */
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp[0].recipe_indx = rid;
+ status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
+ /* non-zero status meaning recipe doesn't exist */
+ if (status)
+ goto err_unroll;
+
+ /* Get recipe to profile map so that we can get the fv from lkups that
+ * we read for a recipe from FW. Since we want to minimize the number of
+ * times we make this FW call, just make one call and cache the copy
+ * until a new recipe is added. This operation is only required the
+ * first time to get the changes from FW. Then to search existing
+ * entries we don't need to update the cache again until another recipe
+ * gets added.
+ */
+ if (*refresh_required) {
+ ice_get_recp_to_prof_map(hw);
+ *refresh_required = false;
+ }
+
+ /* Start populating all the entries for recps[rid] based on lkups from
+ * firmware. Note that we are only creating the root recipe in our
+ * database.
+ */
+ lkup_exts = &recps[rid].lkup_exts;
+
+ for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
+ struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
+ struct ice_recp_grp_entry *rg_entry;
+ u8 i, prof, idx, prot = 0;
+ bool is_root;
+ u16 off = 0;
+
+ rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
+ GFP_KERNEL);
+ if (!rg_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+
+ idx = root_bufs.recipe_indx;
+ is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
+
+ /* Mark all result indices in this chain */
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ result_bm);
+
+ /* get the first profile that is associated with rid */
+ prof = find_first_bit(recipe_to_profile[idx],
+ ICE_MAX_NUM_PROFILES);
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
+
+ rg_entry->fv_idx[i] = lkup_indx;
+ rg_entry->fv_mask[i] =
+ le16_to_cpu(root_bufs.content.mask[i + 1]);
+
+ /* If the recipe is a chained recipe then all its
+ * child recipe's result will have a result index.
+ * To fill fv_words we should not use those result
+ * index, we only need the protocol ids and offsets.
+ * We will skip all the fv_idx which stores result
+ * index in them. We also need to skip any fv_idx which
+ * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
+ * valid offset value.
+ */
+ if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
+ rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
+ rg_entry->fv_idx[i] == 0)
+ continue;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, prof,
+ rg_entry->fv_idx[i], &prot, &off);
+ lkup_exts->fv_words[fv_word_idx].prot_id = prot;
+ lkup_exts->fv_words[fv_word_idx].off = off;
+ lkup_exts->field_mask[fv_word_idx] =
+ rg_entry->fv_mask[i];
+ fv_word_idx++;
+ }
+ /* populate rg_list with the data from the child entry of this
+ * recipe
+ */
+ list_add(&rg_entry->l_entry, &recps[rid].rg_list);
+
+ /* Propagate some data to the recipe database */
+ recps[idx].is_root = !!is_root;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
+ recps[idx].chain_idx = root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
+ } else {
+ recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ }
+
+ if (!is_root)
+ continue;
+
+ /* Only do the following for root recipes entries */
+ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
+ sizeof(recps[idx].r_bitmap));
+ recps[idx].root_rid = root_bufs.content.rid &
+ ~ICE_AQ_RECIPE_ID_IS_ROOT;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ }
+
+ /* Complete initialization of the root recipe entry */
+ lkup_exts->n_val_words = fv_word_idx;
+ recps[rid].big_recp = (num_recps > 1);
+ recps[rid].n_grp_count = (u8)num_recps;
+ recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
+ recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
+ GFP_KERNEL);
+ if (!recps[rid].root_buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+
+ /* Copy result indexes */
+ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
+ recps[rid].recp_created = true;
+
+err_unroll:
+ kfree(tmp);
+ return status;
+}
+
/* ice_init_port_info - Initialize port_info with switch configuration data
* @pi: pointer to port_info
* @vsi_port_num: VSI number or port number
@@ -1627,6 +2461,125 @@ exit:
}
/**
+ * ice_mac_fltr_exist - does this MAC filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @mac: MAC address to be checked (for MAC filter)
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
+
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_MAC ||
+ f_info->fltr_act != ICE_FWD_TO_VSI ||
+ hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+
+ if (ether_addr_equal(mac, mac_addr)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+ return false;
+}
+
+/**
+ * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @vlan_id: VLAN ID
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (vlan_id > ICE_MAX_VLAN_ID)
+ return false;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
+ struct ice_vsi_list_map_info *map_info;
+
+ if (entry_vlan_id > ICE_MAX_VLAN_ID)
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ continue;
+
+ /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
+ if (f_info->fltr_act != ICE_FWD_TO_VSI &&
+ f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
+ continue;
+
+ if (f_info->fltr_act == ICE_FWD_TO_VSI) {
+ if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+ } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ /* If filter_action is FWD_TO_VSI_LIST, make sure
+ * that VSI being checked is part of VSI list
+ */
+ if (entry->vsi_count == 1 &&
+ entry->vsi_list_info) {
+ map_info = entry->vsi_list_info;
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ }
+ }
+
+ if (vlan_id == entry_vlan_id) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2037,6 +2990,27 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
}
/**
+ * ice_rem_adv_rule_info
+ * @hw: pointer to the hardware structure
+ * @rule_head: pointer to the switch list structure that we want to delete
+ */
+static void
+ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
+{
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ if (list_empty(rule_head))
+ return;
+
+ list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+}
+
+/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to set as default
@@ -2773,6 +3747,1621 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
return status;
}
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for example dst address is 3 words in ethertype header and corresponding
+ * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
+ * matching entry describing its field. This needs to be updated if new
+ * structure is added to that union.
+ */
+static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_ETYPE_OL, { 0 } },
+ { ICE_VLAN_OFOS, { 2, 0 } },
+ { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_TCP_IL, { 0, 2 } },
+ { ICE_UDP_OF, { 0, 2 } },
+ { ICE_UDP_ILOS, { 0, 2 } },
+ { ICE_VXLAN, { 8, 10, 12, 14 } },
+ { ICE_GENEVE, { 8, 10, 12, 14 } },
+ { ICE_NVGRE, { 0, 2, 4, 6 } },
+};
+
+static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
+ { ICE_MAC_IL, ICE_MAC_IL_HW },
+ { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
+ { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
+ { ICE_IPV4_IL, ICE_IPV4_IL_HW },
+ { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
+ { ICE_IPV6_IL, ICE_IPV6_IL_HW },
+ { ICE_TCP_IL, ICE_TCP_IL_HW },
+ { ICE_UDP_OF, ICE_UDP_OF_HW },
+ { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
+ { ICE_VXLAN, ICE_UDP_OF_HW },
+ { ICE_GENEVE, ICE_UDP_OF_HW },
+ { ICE_NVGRE, ICE_GRE_OF_HW },
+};
+
+/**
+ * ice_find_recp - find a recipe
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: extension sequence to match
+ *
+ * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
+ */
+static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+{
+ bool refresh_required = true;
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ /* Walk through existing recipes to find a match */
+ recp = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ /* If recipe was not created for this ID, in SW bookkeeping,
+ * check if FW has an entry for this recipe. If the FW has an
+ * entry update it in our SW bookkeeping and continue with the
+ * matching.
+ */
+ if (!recp[i].recp_created)
+ if (ice_get_recp_frm_fw(hw,
+ hw->switch_info->recp_list, i,
+ &refresh_required))
+ continue;
+
+ /* Skip inverse action recipes */
+ if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_INV_ACT)
+ continue;
+
+ /* if number of words we are looking for match */
+ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
+ struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
+ struct ice_fv_word *be = lkup_exts->fv_words;
+ u16 *cr = recp[i].lkup_exts.field_mask;
+ u16 *de = lkup_exts->field_mask;
+ bool found = true;
+ u8 pe, qr;
+
+ /* ar, cr, and qr are related to the recipe words, while
+ * be, de, and pe are related to the lookup words
+ */
+ for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
+ for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
+ qr++) {
+ if (ar[qr].off == be[pe].off &&
+ ar[qr].prot_id == be[pe].prot_id &&
+ cr[qr] == de[pe])
+ /* Found the "pe"th word in the
+ * given recipe
+ */
+ break;
+ }
+ /* After walking through all the words in the
+ * "i"th recipe if "p"th word was not found then
+ * this recipe is not what we are looking for.
+ * So break out from this loop and try the next
+ * recipe
+ */
+ if (qr >= recp[i].lkup_exts.n_val_words) {
+ found = false;
+ break;
+ }
+ }
+ /* If for "i"th recipe the found was never set to false
+ * then it means we found our match
+ */
+ if (found)
+ return i; /* Return the recipe ID */
+ }
+ }
+ return ICE_MAX_NUM_RECIPES;
+}
+
+/**
+ * ice_prot_type_to_id - get protocol ID from protocol type
+ * @type: protocol type
+ * @id: pointer to variable that will receive the ID
+ *
+ * Returns true if found, false otherwise
+ */
+static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == type) {
+ *id = ice_prot_id_tbl[i].protocol_id;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_fill_valid_words - count valid words
+ * @rule: advanced rule with lookup information
+ * @lkup_exts: byte offset extractions of the words that are valid
+ *
+ * calculate valid words in a lookup rule using mask value
+ */
+static u8
+ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ u8 j, word, prot_id, ret_val;
+
+ if (!ice_prot_type_to_id(rule->type, &prot_id))
+ return 0;
+
+ word = lkup_exts->n_val_words;
+
+ for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
+ if (((u16 *)&rule->m_u)[j] &&
+ rule->type < ARRAY_SIZE(ice_prot_ext)) {
+ /* No more space to accommodate */
+ if (word >= ICE_MAX_CHAIN_WORDS)
+ return 0;
+ lkup_exts->fv_words[word].off =
+ ice_prot_ext[rule->type].offs[j];
+ lkup_exts->fv_words[word].prot_id =
+ ice_prot_id_tbl[rule->type].protocol_id;
+ lkup_exts->field_mask[word] =
+ be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
+ word++;
+ }
+
+ ret_val = word - lkup_exts->n_val_words;
+ lkup_exts->n_val_words = word;
+
+ return ret_val;
+}
+
+/**
+ * ice_create_first_fit_recp_def - Create a recipe grouping
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: an array of protocol header extractions
+ * @rg_list: pointer to a list that stores new recipe groups
+ * @recp_cnt: pointer to a variable that stores returned number of recipe groups
+ *
+ * Using first fit algorithm, take all the words that are still not done
+ * and start grouping them in 4-word groups. Each group makes up one
+ * recipe.
+ */
+static enum ice_status
+ice_create_first_fit_recp_def(struct ice_hw *hw,
+ struct ice_prot_lkup_ext *lkup_exts,
+ struct list_head *rg_list,
+ u8 *recp_cnt)
+{
+ struct ice_pref_recipe_group *grp = NULL;
+ u8 j;
+
+ *recp_cnt = 0;
+
+ /* Walk through every word in the rule to check if it is not done. If so
+ * then this word needs to be part of a new recipe.
+ */
+ for (j = 0; j < lkup_exts->n_val_words; j++)
+ if (!test_bit(j, lkup_exts->done)) {
+ if (!grp ||
+ grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
+ struct ice_recp_grp_entry *entry;
+
+ entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry)
+ return ICE_ERR_NO_MEMORY;
+ list_add(&entry->l_entry, rg_list);
+ grp = &entry->r_group;
+ (*recp_cnt)++;
+ }
+
+ grp->pairs[grp->n_val_pairs].prot_id =
+ lkup_exts->fv_words[j].prot_id;
+ grp->pairs[grp->n_val_pairs].off =
+ lkup_exts->fv_words[j].off;
+ grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
+ grp->n_val_pairs++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
+ * @hw: pointer to the hardware structure
+ * @fv_list: field vector with the extraction sequence information
+ * @rg_list: recipe groupings with protocol-offset pairs
+ *
+ * Helper function to fill in the field vector indices for protocol-offset
+ * pairs. These indexes are then ultimately programmed into a recipe.
+ */
+static enum ice_status
+ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
+ struct list_head *rg_list)
+{
+ struct ice_sw_fv_list_entry *fv;
+ struct ice_recp_grp_entry *rg;
+ struct ice_fv_word *fv_ext;
+
+ if (list_empty(fv_list))
+ return 0;
+
+ fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ list_entry);
+ fv_ext = fv->fv_ptr->ew;
+
+ list_for_each_entry(rg, rg_list, l_entry) {
+ u8 i;
+
+ for (i = 0; i < rg->r_group.n_val_pairs; i++) {
+ struct ice_fv_word *pr;
+ bool found = false;
+ u16 mask;
+ u8 j;
+
+ pr = &rg->r_group.pairs[i];
+ mask = rg->r_group.mask[i];
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv_ext[j].prot_id == pr->prot_id &&
+ fv_ext[j].off == pr->off) {
+ found = true;
+
+ /* Store index of field vector */
+ rg->fv_idx[i] = j;
+ rg->fv_mask[i] = mask;
+ break;
+ }
+
+ /* Protocol/offset could not be found, caller gave an
+ * invalid pair
+ */
+ if (!found)
+ return ICE_ERR_PARAM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_free_recp_res_idx - find free result indexes for recipe
+ * @hw: pointer to hardware structure
+ * @profiles: bitmap of profiles that will be associated with the new recipe
+ * @free_idx: pointer to variable to receive the free index bitmap
+ *
+ * The algorithm used here is:
+ * 1. When creating a new recipe, create a set P which contains all
+ * Profiles that will be associated with our new recipe
+ *
+ * 2. For each Profile p in set P:
+ * a. Add all recipes associated with Profile p into set R
+ * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
+ * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
+ * i. Or just assume they all have the same possible indexes:
+ * 44, 45, 46, 47
+ * i.e., PossibleIndexes = 0x0000F00000000000
+ *
+ * 3. For each Recipe r in set R:
+ * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
+ * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
+ *
+ * FreeIndexes will contain the bits indicating the indexes free for use,
+ * then the code needs to update the recipe[r].used_result_idx_bits to
+ * indicate which indexes were selected for use by this recipe.
+ */
+static u16
+ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
+ unsigned long *free_idx)
+{
+ DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
+ DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
+ DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
+ u16 bit;
+
+ bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
+ bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
+
+ bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+
+ /* For each profile we are going to associate the recipe with, add the
+ * recipes that are associated with that profile. This will give us
+ * the set of recipes that our recipe may collide with. Also, determine
+ * what possible result indexes are usable given this set of profiles.
+ */
+ for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
+ bitmap_or(recipes, recipes, profile_to_recipe[bit],
+ ICE_MAX_NUM_RECIPES);
+ bitmap_and(possible_idx, possible_idx,
+ hw->switch_info->prof_res_bm[bit],
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* For each recipe that our new recipe may collide with, determine
+ * which indexes have been used.
+ */
+ for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
+ bitmap_or(used_idx, used_idx,
+ hw->switch_info->recp_list[bit].res_idxs,
+ ICE_MAX_FV_WORDS);
+
+ bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
+
+ /* return number of free indexes */
+ return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
+}
+
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static enum ice_status
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ unsigned long *profiles)
+{
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ struct ice_aqc_recipe_data_elem *buf;
+ struct ice_recp_grp_entry *entry;
+ enum ice_status status;
+ u16 free_res_idx;
+ u16 recipe_count;
+ u8 chain_idx;
+ u8 recps = 0;
+
+ /* When more than one recipe are required, another recipe is needed to
+ * chain them together. Matching a tunnel metadata ID takes up one of
+ * the match fields in the chaining recipe reducing the number of
+ * chained recipes by one.
+ */
+ /* check number of free result indices */
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, rm->n_grp_count);
+
+ if (rm->n_grp_count > 1) {
+ if (rm->n_grp_count > free_res_idx)
+ return ICE_ERR_MAX_LIMIT;
+
+ rm->n_grp_count++;
+ }
+
+ if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
+ return ICE_ERR_MAX_LIMIT;
+
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
+ GFP_KERNEL);
+ if (!buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_mem;
+ }
+
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
+ recipe_count = ICE_MAX_NUM_RECIPES;
+ status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
+ NULL);
+ if (status || recipe_count == 0)
+ goto err_unroll;
+
+ /* Allocate the recipe resources, and configure them according to the
+ * match fields from protocol headers and extracted field vectors.
+ */
+ chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ u8 i;
+
+ status = ice_alloc_recipe(hw, &entry->rid);
+ if (status)
+ goto err_unroll;
+
+ /* Clear the result index of the located recipe, as this will be
+ * updated, if needed, later in the recipe creation process.
+ */
+ tmp[0].content.result_indx = 0;
+
+ buf[recps] = tmp[0];
+ buf[recps].recipe_indx = (u8)entry->rid;
+ /* if the recipe is a non-root recipe RID should be programmed
+ * as 0 for the rules to be applied correctly.
+ */
+ buf[recps].content.rid = 0;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
+ * to be 0
+ */
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] = 0x80;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ for (i = 0; i < entry->r_group.n_val_pairs; i++) {
+ buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
+ buf[recps].content.mask[i + 1] =
+ cpu_to_le16(entry->fv_mask[i]);
+ }
+
+ if (rm->n_grp_count > 1) {
+ /* Checks to see if there really is a valid result index
+ * that can be used.
+ */
+ if (chain_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ status = ICE_ERR_MAX_LIMIT;
+ goto err_unroll;
+ }
+
+ entry->chain_idx = chain_idx;
+ buf[recps].content.result_indx =
+ ICE_AQ_RECIPE_RESULT_EN |
+ ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
+ ICE_AQ_RECIPE_RESULT_DATA_M);
+ clear_bit(chain_idx, result_idx_bm);
+ chain_idx = find_first_bit(result_idx_bm,
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* fill recipe dependencies */
+ bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ set_bit(buf[recps].recipe_indx,
+ (unsigned long *)buf[recps].recipe_bitmap);
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ recps++;
+ }
+
+ if (rm->n_grp_count == 1) {
+ rm->root_rid = buf[0].recipe_indx;
+ set_bit(buf[0].recipe_indx, rm->r_bitmap);
+ buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
+ if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
+ memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[0].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ /* Applicable only for ROOT_RECIPE, set the fwd_priority for
+ * the recipe which is getting created if specified
+ * by user. Usually any advanced switch filter, which results
+ * into new extraction sequence, ended up creating a new recipe
+ * of type ROOT and usually recipes are associated with profiles
+ * Switch rule referreing newly created recipe, needs to have
+ * either/or 'fwd' or 'join' priority, otherwise switch rule
+ * evaluation will not happen correctly. In other words, if
+ * switch rule to be evaluated on priority basis, then recipe
+ * needs to have priority, otherwise it will be evaluated last.
+ */
+ buf[0].content.act_ctrl_fwd_priority = rm->priority;
+ } else {
+ struct ice_recp_grp_entry *last_chain_entry;
+ u16 rid, i;
+
+ /* Allocate the last recipe that will chain the outcomes of the
+ * other recipes together
+ */
+ status = ice_alloc_recipe(hw, &rid);
+ if (status)
+ goto err_unroll;
+
+ buf[recps].recipe_indx = (u8)rid;
+ buf[recps].content.rid = (u8)rid;
+ buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ /* the new entry created should also be part of rg_list to
+ * make sure we have complete recipe
+ */
+ last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*last_chain_entry),
+ GFP_KERNEL);
+ if (!last_chain_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+ last_chain_entry->rid = rid;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] =
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ i = 1;
+ /* update r_bitmap with the recp that is used for chaining */
+ set_bit(rid, rm->r_bitmap);
+ /* this is the recipe that chains all the other recipes so it
+ * should not have a chaining ID to indicate the same
+ */
+ last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ last_chain_entry->fv_idx[i] = entry->chain_idx;
+ buf[recps].content.lkup_indx[i] = entry->chain_idx;
+ buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ set_bit(entry->rid, rm->r_bitmap);
+ }
+ list_add(&last_chain_entry->l_entry, &rm->rg_list);
+ if (sizeof(buf[recps].recipe_bitmap) >=
+ sizeof(rm->r_bitmap)) {
+ memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[recps].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+
+ recps++;
+ rm->root_rid = (u8)rid;
+ }
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ goto err_unroll;
+
+ /* Every recipe that just got created add it to the recipe
+ * book keeping list
+ */
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ struct ice_switch_info *sw = hw->switch_info;
+ bool is_root, idx_found = false;
+ struct ice_sw_recipe *recp;
+ u16 idx, buf_idx = 0;
+
+ /* find buffer index for copying some data */
+ for (idx = 0; idx < rm->n_grp_count; idx++)
+ if (buf[idx].recipe_indx == entry->rid) {
+ buf_idx = idx;
+ idx_found = true;
+ }
+
+ if (!idx_found) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto err_unroll;
+ }
+
+ recp = &sw->recp_list[entry->rid];
+ is_root = (rm->root_rid == entry->rid);
+ recp->is_root = is_root;
+
+ recp->root_rid = entry->rid;
+ recp->big_recp = (is_root && rm->n_grp_count > 1);
+
+ memcpy(&recp->ext_words, entry->r_group.pairs,
+ entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+
+ memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
+ sizeof(recp->r_bitmap));
+
+ /* Copy non-result fv index values and masks to recipe. This
+ * call will also update the result recipe bitmask.
+ */
+ ice_collect_result_idx(&buf[buf_idx], recp);
+
+ /* for non-root recipes, also copy to the root, this allows
+ * easier matching of a complete chained recipe
+ */
+ if (!is_root)
+ ice_collect_result_idx(&buf[buf_idx],
+ &sw->recp_list[rm->root_rid]);
+
+ recp->n_ext_words = entry->r_group.n_val_pairs;
+ recp->chain_idx = entry->chain_idx;
+ recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
+ recp->n_grp_count = rm->n_grp_count;
+ recp->tun_type = rm->tun_type;
+ recp->recp_created = true;
+ }
+ rm->root_buf = buf;
+ kfree(tmp);
+ return status;
+
+err_unroll:
+err_mem:
+ kfree(tmp);
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_create_recipe_group - creates recipe group
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @lkup_exts: lookup elements
+ */
+static enum ice_status
+ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ enum ice_status status;
+ u8 recp_count = 0;
+
+ rm->n_grp_count = 0;
+
+ /* Create recipes for words that are marked not done by packing them
+ * as best fit.
+ */
+ status = ice_create_first_fit_recp_def(hw, lkup_exts,
+ &rm->rg_list, &recp_count);
+ if (!status) {
+ rm->n_grp_count += recp_count;
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(&rm->ext_words, lkup_exts->fv_words,
+ sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask,
+ sizeof(rm->word_masks));
+ }
+
+ return status;
+}
+
+/**
+ * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: pointer to a list that holds the returned field vectors
+ */
+static enum ice_status
+ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ enum ice_status status;
+ u8 *prot_ids;
+ u16 i;
+
+ prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
+ if (!prot_ids)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < lkups_cnt; i++)
+ if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
+ status = ICE_ERR_CFG;
+ goto free_mem;
+ }
+
+ /* Find field vectors that include all specified protocol types */
+ status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+
+free_mem:
+ kfree(prot_ids);
+ return status;
+}
+
+/**
+ * ice_tun_type_match_word - determine if tun type needs a match mask
+ * @tun_type: tunnel type
+ * @mask: mask to be used for the tunnel
+ */
+static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
+{
+ switch (tun_type) {
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ case ICE_SW_TUN_NVGRE:
+ *mask = ICE_TUN_FLAG_MASK;
+ return true;
+
+ default:
+ *mask = 0;
+ return false;
+ }
+}
+
+/**
+ * ice_add_special_words - Add words that are not protocols, such as metadata
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @lkup_exts: lookup word structure
+ */
+static enum ice_status
+ice_add_special_words(struct ice_adv_rule_info *rinfo,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ u16 mask;
+
+ /* If this is a tunneled packet, then add recipe index to match the
+ * tunnel bit in the packet metadata flags.
+ */
+ if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] = mask;
+ } else {
+ return ICE_ERR_MAX_LIMIT;
+ }
+ }
+
+ return 0;
+}
+
+/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
+ * @hw: pointer to hardware structure
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+static void
+ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
+ unsigned long *bm)
+{
+ enum ice_prof_type prof_type;
+
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+
+ switch (rinfo->tun_type) {
+ case ICE_NON_TUN:
+ prof_type = ICE_PROF_NON_TUN;
+ break;
+ case ICE_ALL_TUNNELS:
+ prof_type = ICE_PROF_TUN_ALL;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ prof_type = ICE_PROF_TUN_UDP;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ prof_type = ICE_PROF_TUN_GRE;
+ break;
+ default:
+ prof_type = ICE_PROF_ALL;
+ break;
+ }
+
+ ice_get_sw_fv_bitmap(hw, prof_type, bm);
+}
+
+/**
+ * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @rid: return the recipe ID of the recipe created
+ */
+static enum ice_status
+ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
+{
+ DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
+ DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
+ struct ice_prot_lkup_ext *lkup_exts;
+ struct ice_recp_grp_entry *r_entry;
+ struct ice_sw_fv_list_entry *fvit;
+ struct ice_recp_grp_entry *r_tmp;
+ struct ice_sw_fv_list_entry *tmp;
+ enum ice_status status = 0;
+ struct ice_sw_recipe *rm;
+ u8 i;
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
+ if (!lkup_exts)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Determine the number of words to be matched and if it exceeds a
+ * recipe's restrictions
+ */
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+
+ count = ice_fill_valid_words(&lkups[i], lkup_exts);
+ if (!count) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+ }
+
+ rm = kzalloc(sizeof(*rm), GFP_KERNEL);
+ if (!rm) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_free_lkup_exts;
+ }
+
+ /* Get field vectors that contain fields extracted from all the protocol
+ * headers being programmed.
+ */
+ INIT_LIST_HEAD(&rm->fv_list);
+ INIT_LIST_HEAD(&rm->rg_list);
+
+ /* Get bitmap of field vectors (profiles) that are compatible with the
+ * rule request; only these will be searched in the subsequent call to
+ * ice_get_fv.
+ */
+ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
+
+ status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ if (status)
+ goto err_unroll;
+
+ /* Create any special protocol/offset pairs, such as looking at tunnel
+ * bits by extracting metadata
+ */
+ status = ice_add_special_words(rinfo, lkup_exts);
+ if (status)
+ goto err_free_lkup_exts;
+
+ /* Group match words into recipes using preferred recipe grouping
+ * criteria.
+ */
+ status = ice_create_recipe_group(hw, rm, lkup_exts);
+ if (status)
+ goto err_unroll;
+
+ /* set the recipe priority if specified */
+ rm->priority = (u8)rinfo->priority;
+
+ /* Find offsets from the field vector. Pick the first one for all the
+ * recipes.
+ */
+ status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ if (status)
+ goto err_unroll;
+
+ /* get bitmap of all profiles the recipe will be associated with */
+ bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
+ set_bit((u16)fvit->profile_id, profiles);
+ }
+
+ /* Look for a recipe which matches our requested fv / mask list */
+ *rid = ice_find_recp(hw, lkup_exts);
+ if (*rid < ICE_MAX_NUM_RECIPES)
+ /* Success if found a recipe that match the existing criteria */
+ goto err_unroll;
+
+ /* Recipe we need does not exist, add a recipe */
+ status = ice_add_sw_recipe(hw, rm, profiles);
+ if (status)
+ goto err_unroll;
+
+ /* Associate all the recipes created with all the profiles in the
+ * common field vector.
+ */
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 j;
+
+ status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap, NULL);
+ if (status)
+ goto err_unroll;
+
+ bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap,
+ NULL);
+ ice_release_change_lock(hw);
+
+ if (status)
+ goto err_unroll;
+
+ /* Update profile to recipe bitmap array */
+ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+
+ /* Update recipe to profile bitmap array */
+ for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
+ }
+
+ *rid = rm->root_rid;
+ memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
+ sizeof(*lkup_exts));
+err_unroll:
+ list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
+ list_del(&r_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r_entry);
+ }
+
+ list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
+ list_del(&fvit->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvit);
+ }
+
+ if (rm->root_buf)
+ devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
+
+ kfree(rm);
+
+err_free_lkup_exts:
+ kfree(lkup_exts);
+
+ return status;
+}
+
+/**
+ * ice_find_dummy_packet - find dummy packet
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @tun_type: tunnel type
+ * @pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: pointer to receive the pointer to the offsets for the packet
+ */
+static void
+ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ enum ice_sw_tunnel_type tun_type,
+ const u8 **pkt, u16 *pkt_len,
+ const struct ice_dummy_pkt_offsets **offsets)
+{
+ bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ u16 i;
+
+ for (i = 0; i < lkups_cnt; i++) {
+ if (lkups[i].type == ICE_UDP_ILOS)
+ udp = true;
+ else if (lkups[i].type == ICE_TCP_IL)
+ tcp = true;
+ else if (lkups[i].type == ICE_IPV6_OFOS)
+ ipv6 = true;
+ else if (lkups[i].type == ICE_VLAN_OFOS)
+ vlan = true;
+ else if (lkups[i].type == ICE_ETYPE_OL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ ipv6 = true;
+ }
+
+ if (tun_type == ICE_SW_TUN_NVGRE) {
+ if (tcp) {
+ *pkt = dummy_gre_tcp_packet;
+ *pkt_len = sizeof(dummy_gre_tcp_packet);
+ *offsets = dummy_gre_tcp_packet_offsets;
+ return;
+ }
+
+ *pkt = dummy_gre_udp_packet;
+ *pkt_len = sizeof(dummy_gre_udp_packet);
+ *offsets = dummy_gre_udp_packet_offsets;
+ return;
+ }
+
+ if (tun_type == ICE_SW_TUN_VXLAN ||
+ tun_type == ICE_SW_TUN_GENEVE) {
+ if (tcp) {
+ *pkt = dummy_udp_tun_tcp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
+ *offsets = dummy_udp_tun_tcp_packet_offsets;
+ return;
+ }
+
+ *pkt = dummy_udp_tun_udp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_udp_packet);
+ *offsets = dummy_udp_tun_udp_packet_offsets;
+ return;
+ }
+
+ if (udp && !ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_packet);
+ *offsets = dummy_vlan_udp_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_packet;
+ *pkt_len = sizeof(dummy_udp_packet);
+ *offsets = dummy_udp_packet_offsets;
+ return;
+ } else if (udp && ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
+ *offsets = dummy_vlan_udp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_udp_ipv6_packet);
+ *offsets = dummy_udp_ipv6_packet_offsets;
+ return;
+ } else if ((tcp && ipv6) || ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
+ *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_tcp_ipv6_packet);
+ *offsets = dummy_tcp_ipv6_packet_offsets;
+ return;
+ }
+
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_packet);
+ *offsets = dummy_vlan_tcp_packet_offsets;
+ } else {
+ *pkt = dummy_tcp_packet;
+ *pkt_len = sizeof(dummy_tcp_packet);
+ *offsets = dummy_tcp_packet_offsets;
+ }
+}
+
+/**
+ * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @s_rule: stores rule information from the match criteria
+ * @dummy_pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: offset info for the dummy packet
+ */
+static enum ice_status
+ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ struct ice_aqc_sw_rules_elem *s_rule,
+ const u8 *dummy_pkt, u16 pkt_len,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u8 *pkt;
+ u16 i;
+
+ /* Start with a packet with a pre-defined/dummy content. Then, fill
+ * in the header values to be looked up or matched.
+ */
+ pkt = s_rule->pdata.lkup_tx_rx.hdr;
+
+ memcpy(pkt, dummy_pkt, pkt_len);
+
+ for (i = 0; i < lkups_cnt; i++) {
+ enum ice_protocol_type type;
+ u16 offset = 0, len = 0, j;
+ bool found = false;
+
+ /* find the start of this layer; it should be found since this
+ * was already checked when search for the dummy packet
+ */
+ type = lkups[i].type;
+ for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
+ if (type == offsets[j].type) {
+ offset = offsets[j].offset;
+ found = true;
+ break;
+ }
+ }
+ /* this should never happen in a correct calling sequence */
+ if (!found)
+ return ICE_ERR_PARAM;
+
+ switch (lkups[i].type) {
+ case ICE_MAC_OFOS:
+ case ICE_MAC_IL:
+ len = sizeof(struct ice_ether_hdr);
+ break;
+ case ICE_ETYPE_OL:
+ len = sizeof(struct ice_ethtype_hdr);
+ break;
+ case ICE_VLAN_OFOS:
+ len = sizeof(struct ice_vlan_hdr);
+ break;
+ case ICE_IPV4_OFOS:
+ case ICE_IPV4_IL:
+ len = sizeof(struct ice_ipv4_hdr);
+ break;
+ case ICE_IPV6_OFOS:
+ case ICE_IPV6_IL:
+ len = sizeof(struct ice_ipv6_hdr);
+ break;
+ case ICE_TCP_IL:
+ case ICE_UDP_OF:
+ case ICE_UDP_ILOS:
+ len = sizeof(struct ice_l4_hdr);
+ break;
+ case ICE_SCTP_IL:
+ len = sizeof(struct ice_sctp_hdr);
+ break;
+ case ICE_NVGRE:
+ len = sizeof(struct ice_nvgre_hdr);
+ break;
+ case ICE_VXLAN:
+ case ICE_GENEVE:
+ len = sizeof(struct ice_udp_tnl_hdr);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ /* the length should be a word multiple */
+ if (len % ICE_BYTES_PER_WORD)
+ return ICE_ERR_CFG;
+
+ /* We have the offset to the header start, the length, the
+ * caller's header values and mask. Use this information to
+ * copy the data into the dummy packet appropriately based on
+ * the mask. Note that we need to only write the bits as
+ * indicated by the mask to make sure we don't improperly write
+ * over any significant packet data.
+ */
+ for (j = 0; j < len / sizeof(u16); j++)
+ if (((u16 *)&lkups[i].m_u)[j])
+ ((u16 *)(pkt + offset))[j] =
+ (((u16 *)(pkt + offset))[j] &
+ ~((u16 *)&lkups[i].m_u)[j]) |
+ (((u16 *)&lkups[i].h_u)[j] &
+ ((u16 *)&lkups[i].m_u)[j]);
+ }
+
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+
+ return 0;
+}
+
+/**
+ * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
+ * @hw: pointer to the hardware structure
+ * @tun_type: tunnel type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static enum ice_status
+ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+ u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 open_port, i;
+
+ switch (tun_type) {
+ case ICE_SW_TUN_VXLAN:
+ case ICE_SW_TUN_GENEVE:
+ if (!ice_get_open_tunnel_port(hw, &open_port))
+ return ICE_ERR_CFG;
+ break;
+
+ default:
+ /* Nothing needs to be done for this tunnel type */
+ return 0;
+ }
+
+ /* Find the outer UDP protocol header and insert the port number */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_UDP_OF) {
+ struct ice_l4_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_l4_hdr *)&pkt[offset];
+ hdr->dst_port = cpu_to_be16(open_port);
+
+ return 0;
+ }
+ }
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_find_adv_rule_entry - Search a rule entry
+ * @hw: pointer to the hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @recp_id: recipe ID for which we are finding the rule
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ *
+ * Helper function to search for a given advance rule entry
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_adv_fltr_mgmt_list_entry *
+ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, u16 recp_id,
+ struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_switch_info *sw = hw->switch_info;
+ int i;
+
+ list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
+ list_entry) {
+ bool lkups_matched = true;
+
+ if (lkups_cnt != list_itr->lkups_cnt)
+ continue;
+ for (i = 0; i < list_itr->lkups_cnt; i++)
+ if (memcmp(&list_itr->lkups[i], &lkups[i],
+ sizeof(*lkups))) {
+ lkups_matched = false;
+ break;
+ }
+ if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
+ rinfo->tun_type == list_itr->rule_info.tun_type &&
+ lkups_matched)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
+ * ice_adv_add_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @m_entry: pointer to current adv filter management list entry
+ * @cur_fltr: filter information from the book keeping entry
+ * @new_fltr: filter information with the new VSI to be added
+ *
+ * Call AQ command to add or update previously created VSI list with new VSI.
+ *
+ * Helper function to do book keeping associated with adding filter information
+ * The algorithm to do the booking keeping is described below :
+ * When a VSI needs to subscribe to a given advanced filter
+ * if only one VSI has been added till now
+ * Allocate a new VSI list and add two VSIs
+ * to this list using switch rule command
+ * Update the previously created switch rule with the
+ * newly created VSI list ID
+ * if a VSI list was previously created
+ * Add the new VSI to the previously created VSI list set
+ * using the update switch rule command
+ */
+static enum ice_status
+ice_adv_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_adv_fltr_mgmt_list_entry *m_entry,
+ struct ice_adv_rule_info *cur_fltr,
+ struct ice_adv_rule_info *new_fltr)
+{
+ enum ice_status status;
+ u16 vsi_list_id = 0;
+
+ if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
+ return ICE_ERR_NOT_IMPL;
+
+ if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
+ (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+ return ICE_ERR_NOT_IMPL;
+
+ if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
+ /* Only one entry existed in the mapping and it was not already
+ * a part of a VSI list. So, create a VSI list with the old and
+ * new VSIs.
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
+ new_fltr->sw_act.fwd_id.hw_vsi_id)
+ return ICE_ERR_ALREADY_EXISTS;
+
+ vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
+ vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id,
+ ICE_SW_LKUP_LAST);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
+
+ /* Update the previous switch rule of "forward to VSI" to
+ * "fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ return status;
+
+ cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
+ cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
+ m_entry->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ } else {
+ u16 vsi_handle = new_fltr->sw_act.vsi_handle;
+
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+ return 0;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+ */
+ vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false,
+ ice_aqc_opc_update_sw_rules,
+ ICE_SW_LKUP_LAST);
+ /* update VSI list mapping info with new VSI ID */
+ if (!status)
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
+ }
+ if (!status)
+ m_entry->vsi_count++;
+ return status;
+}
+
+/**
+ * ice_add_adv_rule - helper function to create an advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: other information related to the rule that needs to be programmed
+ * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
+ * ignored is case of error.
+ *
+ * This function can program only 1 rule at a time. The lkups is used to
+ * describe the all the words that forms the "lookup" portion of the recipe.
+ * These words can span multiple protocols. Callers to this function need to
+ * pass in a list of protocol headers with lookup information along and mask
+ * that determines which words are valid from the given protocol header.
+ * rinfo describes other information related to this rule such as forwarding
+ * IDs, priority of this rule, etc.
+ */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
+ u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
+ const struct ice_dummy_pkt_offsets *pkt_offsets;
+ struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ enum ice_status status;
+ const u8 *pkt = NULL;
+ u16 word_cnt;
+ u32 act = 0;
+ u8 q_rgn;
+
+ /* Initialize profile to result index bitmap */
+ if (!hw->switch_info->prof_res_bm_init) {
+ hw->switch_info->prof_res_bm_init = 1;
+ ice_init_prof_result_bm(hw);
+ }
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ /* get # of words we need to match */
+ word_cnt = 0;
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 j, *ptr;
+
+ ptr = (u16 *)&lkups[i].m_u;
+ for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
+ if (ptr[j] != 0)
+ word_cnt++;
+ }
+
+ if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
+ return ICE_ERR_PARAM;
+
+ /* make sure that we can locate a dummy packet */
+ ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
+ &pkt_offsets);
+ if (!pkt) {
+ status = ICE_ERR_PARAM;
+ goto err_ice_add_adv_rule;
+ }
+
+ if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
+ return ICE_ERR_CFG;
+
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ rinfo->sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (rinfo->sw_act.flag & ICE_FLTR_TX)
+ rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
+ if (status)
+ return status;
+ m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ if (m_entry) {
+ /* we have to add VSI to VSI_LIST and increment vsi_count.
+ * Also Update VSI list so that we can change forwarding rule
+ * if the rule already exists, we will check if it exists with
+ * same vsi_id, if not then add it to the VSI list if it already
+ * exists if not then create a VSI list and add the existing VSI
+ * ID and the new VSI ID to the list
+ * We will add that VSI to the list
+ */
+ status = ice_adv_add_update_vsi_list(hw, m_entry,
+ &m_entry->rule_info,
+ rinfo);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+ return status;
+ }
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
+
+ switch (rinfo->sw_act.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
+ ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_Q:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ break;
+ case ICE_FWD_TO_QGRP:
+ q_rgn = rinfo->sw_act.qgrp_size > 0 ?
+ (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
+ ICE_SINGLE_ACT_Q_REGION_M;
+ break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ default:
+ status = ICE_ERR_CFG;
+ goto err_ice_add_adv_rule;
+ }
+
+ /* set the rule LOOKUP type based on caller specified 'Rx'
+ * instead of hardcoding it to be either LOOKUP_TX/RX
+ *
+ * for 'Rx' set the source to be the port number
+ * for 'Tx' set the source to be the source HW VSI number (determined
+ * by caller)
+ */
+ if (rinfo->rx) {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ }
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
+ pkt_len, pkt_offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+
+ if (rinfo->tun_type != ICE_NON_TUN) {
+ status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
+ s_rule->pdata.lkup_tx_rx.hdr,
+ pkt_offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
+ NULL);
+ if (status)
+ goto err_ice_add_adv_rule;
+ adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(struct ice_adv_fltr_mgmt_list_entry),
+ GFP_KERNEL);
+ if (!adv_fltr) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
+ lkups_cnt * sizeof(*lkups), GFP_KERNEL);
+ if (!adv_fltr->lkups) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups_cnt = lkups_cnt;
+ adv_fltr->rule_info = *rinfo;
+ adv_fltr->rule_info.fltr_rule_id =
+ le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ sw = hw->switch_info;
+ sw->recp_list[rid].adv_rule = true;
+ rule_head = &sw->recp_list[rid].filt_rules;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ adv_fltr->vsi_count = 1;
+
+ /* Add rule entry to book keeping list */
+ list_add(&adv_fltr->list_entry, rule_head);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+err_ice_add_adv_rule:
+ if (status && adv_fltr) {
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr);
+ }
+
+ kfree(s_rule);
+
+ return status;
+}
+
/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
@@ -2831,6 +5420,236 @@ end:
}
/**
+ * ice_adv_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_adv_fltr_mgmt_list_entry *fm_list)
+{
+ struct ice_vsi_list_map_info *vsi_list_info;
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status;
+ u16 vsi_list_id;
+
+ if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = ICE_SW_LKUP_LAST;
+ vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+ vsi_list_info = fm_list->vsi_list_info;
+ if (fm_list->vsi_count == 1) {
+ struct ice_fltr_info tmp_fltr;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
+ fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
+
+ /* Update the previous switch rule of "MAC forward to VSI" to
+ * "MAC fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+ fm_list->vsi_list_info->ref_cnt--;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule - removes existing advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: Its the pointer to the rule information for the rule
+ *
+ * This function can be used to remove 1 rule at a time. The lkups is
+ * used to describe all the words that forms the "lookup" portion of the
+ * rule. These words can span multiple protocols. Callers to this function
+ * need to pass in a list of protocol headers with lookup information along
+ * and mask that determines which words are valid from the given protocol
+ * header. rinfo describes other information related to this rule such as
+ * forwarding IDs, priority of this rule, etc.
+ */
+static enum ice_status
+ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_elem;
+ struct ice_prot_lkup_ext lkup_exts;
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 i, rid, vsi_handle;
+
+ memset(&lkup_exts, 0, sizeof(lkup_exts));
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST)
+ return ICE_ERR_CFG;
+
+ count = ice_fill_valid_words(&lkups[i], &lkup_exts);
+ if (!count)
+ return ICE_ERR_CFG;
+ }
+
+ /* Create any special protocol/offset pairs, such as looking at tunnel
+ * bits by extracting metadata
+ */
+ status = ice_add_special_words(rinfo, &lkup_exts);
+ if (status)
+ return status;
+
+ rid = ice_find_recp(hw, &lkup_exts);
+ /* If did not find a recipe that match the existing criteria */
+ if (rid == ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
+ list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ /* the rule is already removed */
+ if (!list_elem)
+ return 0;
+ mutex_lock(rule_lock);
+ if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (list_elem->vsi_count > 1) {
+ remove_rule = false;
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ } else {
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status) {
+ mutex_unlock(rule_lock);
+ return status;
+ }
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+ mutex_unlock(rule_lock);
+ if (remove_rule) {
+ struct ice_aqc_sw_rules_elem *s_rule;
+ u16 rule_buf_sz;
+
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ s_rule->pdata.lkup_tx_rx.act = 0;
+ s_rule->pdata.lkup_tx_rx.index =
+ cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ struct ice_switch_info *sw = hw->switch_info;
+
+ mutex_lock(rule_lock);
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ mutex_unlock(rule_lock);
+ if (list_empty(&sw->recp_list[rid].filt_rules))
+ sw->recp_list[rid].adv_rule = false;
+ }
+ kfree(s_rule);
+ }
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
+ * @hw: pointer to the hardware structure
+ * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
+ *
+ * This function is used to remove 1 rule at a time. The removal is based on
+ * the remove_entry parameter. This function will remove rule for a given
+ * vsi_handle with a given rule_id which is passed as parameter in remove_entry
+ */
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_switch_info *sw;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[remove_entry->rid].recp_created)
+ return ICE_ERR_PARAM;
+ list_head = &sw->recp_list[remove_entry->rid].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->rule_info.fltr_rule_id ==
+ remove_entry->rule_id) {
+ rinfo = list_itr->rule_info;
+ rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
+ return ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ }
+ }
+ /* either list is empty or unable to find rule */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
@@ -2868,12 +5687,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
if (!sw)
return;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
struct list_head *l_head;
l_head = &sw->recp_list[i].filt_replay_rules;
- ice_rem_sw_rule_info(hw, l_head);
+ if (!sw->recp_list[i].adv_rule)
+ ice_rem_sw_rule_info(hw, l_head);
+ else
+ ice_rem_adv_rule_info(hw, l_head);
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c5db8d56133f..d8a38906f16f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,6 +14,9 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
@@ -122,30 +125,124 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_adv_lkup_elem {
+ enum ice_protocol_type type;
+ union ice_prot_hdr h_u; /* Header values */
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+};
+
+struct ice_sw_act_ctrl {
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+ u16 flag;
+ enum ice_sw_fwd_act_type fltr_act;
+ /* Depending on filter action */
+ union {
+ /* This is a queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 vsi_id:10;
+ u16 hw_vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+ /* software VSI handle */
+ u16 vsi_handle;
+ u8 qgrp_size;
+};
+
+struct ice_rule_query_data {
+ /* Recipe ID for which the requested rule was added */
+ u16 rid;
+ /* Rule ID that was added or is supposed to be removed */
+ u16 rule_id;
+ /* vsi_handle for which Rule was added or is supposed to be removed */
+ u16 vsi_handle;
+};
+
+/* This structure allows to pass info about lb_en and lan_en
+ * flags to ice_add_adv_rule. Values in act would be used
+ * only if act_valid was set to true, otherwise default
+ * values would be used.
+ */
+struct ice_adv_rule_flags_info {
+ u32 act;
+ u8 act_valid; /* indicate if flags in act are valid */
+};
+
+struct ice_adv_rule_info {
+ enum ice_sw_tunnel_type tun_type;
+ struct ice_sw_act_ctrl sw_act;
+ u32 priority;
+ u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
+ u16 fltr_rule_id;
+ struct ice_adv_rule_flags_info flags_info;
+};
+
+/* A collection of one or more four word recipe */
struct ice_sw_recipe {
- struct list_head l_entry;
+ /* For a chained recipe the root recipe is what should be used for
+ * programming rules
+ */
+ u8 is_root;
+ u8 root_rid;
+ u8 recp_created;
- /* To protect modification of filt_rule list
- * defined below
+ /* Number of extraction words */
+ u8 n_ext_words;
+ /* Protocol ID and Offset pair (extraction word) to describe the
+ * recipe
*/
- struct mutex filt_rule_lock;
+ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
+ u16 word_masks[ICE_MAX_CHAIN_WORDS];
- /* List of type ice_fltr_mgmt_list_entry */
+ /* if this recipe is a collection of other recipe */
+ u8 big_recp;
+
+ /* if this recipe is part of another bigger recipe then chain index
+ * corresponding to this recipe
+ */
+ u8 chain_idx;
+
+ /* if this recipe is a collection of other recipe then count of other
+ * recipes and recipe IDs of those recipes
+ */
+ u8 n_grp_count;
+
+ /* Bit map specifying the IDs associated with this group of recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+
+ enum ice_sw_tunnel_type tun_type;
+
+ /* List of type ice_fltr_mgmt_list_entry or adv_rule */
+ u8 adv_rule;
struct list_head filt_rules;
struct list_head filt_replay_rules;
- /* linked list of type recipe_list_entry */
- struct list_head rg_list;
- /* linked list of type ice_sw_fv_list_entry*/
+ struct mutex filt_rule_lock; /* protect filter rule structure */
+
+ /* Profiles this recipe should be associated with */
struct list_head fv_list;
- struct ice_aqc_recipe_data_elem *r_buf;
- u8 recp_count;
- u8 root_rid;
- u8 num_profs;
- u8 *prof_ids;
- /* recipe bitmap: what all recipes makes this recipe */
- DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ /* Profiles this recipe is associated with */
+ u8 num_profs, *prof_ids;
+
+ /* Bit map for possible result indexes */
+ DECLARE_BITMAP(res_idxs, ICE_MAX_FV_WORDS);
+
+ /* This allows user to specify the recipe priority.
+ * For now, this becomes 'fwd_priority' when recipe
+ * is created, usually recipes can have 'fwd' and 'join'
+ * priority.
+ */
+ u8 priority;
+
+ struct list_head rg_list;
+
+ /* AQ buffer associated with this recipe */
+ struct ice_aqc_recipe_data_elem *root_buf;
+ /* This struct saves the fv_words for a given lookup */
+ struct ice_prot_lkup_ext lkup_exts;
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
@@ -183,6 +280,16 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+struct ice_adv_fltr_mgmt_list_entry {
+ struct list_head list_entry;
+
+ struct ice_adv_lkup_elem *lkups;
+ struct ice_adv_rule_info rule_info;
+ u16 lkups_cnt;
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+};
+
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
@@ -218,6 +325,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
@@ -227,6 +338,8 @@ enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
@@ -245,10 +358,19 @@ enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
+enum ice_status
+ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry);
+
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
new file mode 100644
index 000000000000..e5d23feb6701
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_tc_lib.h"
+#include "ice_fltr.h"
+#include "ice_lib.h"
+#include "ice_protocol_type.h"
+
+/**
+ * ice_tc_count_lkups - determine lookup count for switch filter
+ * @flags: TC-flower flags
+ * @headers: Pointer to TC flower filter header structure
+ * @fltr: Pointer to outer TC filter structure
+ *
+ * Determine lookup count based on TC flower input for switch filter.
+ */
+static int
+ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int lkups_cnt = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
+ lkups_cnt++;
+
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
+ lkups_cnt++;
+
+ /* currently inner etype filter isn't supported */
+ if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
+ fltr->tunnel_type == TNL_LAST)
+ lkups_cnt++;
+
+ /* are MAC fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
+ lkups_cnt++;
+
+ /* is VLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ lkups_cnt++;
+
+ /* are IPv[4|6] fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
+ lkups_cnt++;
+
+ /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT))
+ lkups_cnt++;
+
+ return lkups_cnt;
+}
+
+static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
+{
+ return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
+}
+
+static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
+{
+ return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
+}
+
+static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
+{
+ return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
+}
+
+static enum ice_protocol_type
+ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+{
+ if (inner) {
+ switch (ip_proto) {
+ case IPPROTO_UDP:
+ return ICE_UDP_ILOS;
+ }
+ } else {
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ return ICE_TCP_IL;
+ case IPPROTO_UDP:
+ return ICE_UDP_OF;
+ }
+ }
+
+ return 0;
+}
+
+static enum ice_protocol_type
+ice_proto_type_from_tunnel(enum ice_tunnel_type type)
+{
+ switch (type) {
+ case TNL_VXLAN:
+ return ICE_VXLAN;
+ case TNL_GENEVE:
+ return ICE_GENEVE;
+ case TNL_GRETAP:
+ return ICE_NVGRE;
+ default:
+ return 0;
+ }
+}
+
+static enum ice_sw_tunnel_type
+ice_sw_type_from_tunnel(enum ice_tunnel_type type)
+{
+ switch (type) {
+ case TNL_VXLAN:
+ return ICE_SW_TUN_VXLAN;
+ case TNL_GENEVE:
+ return ICE_SW_TUN_GENEVE;
+ case TNL_GRETAP:
+ return ICE_SW_TUN_NVGRE;
+ default:
+ return ICE_NON_TUN;
+ }
+}
+
+static int
+ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
+ struct ice_adv_lkup_elem *list)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
+ int i = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
+ u32 tenant_id;
+
+ list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+ switch (fltr->tunnel_type) {
+ case TNL_VXLAN:
+ case TNL_GENEVE:
+ tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
+ list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
+ memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
+ i++;
+ break;
+ case TNL_GRETAP:
+ list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
+ memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+ i++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
+ list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
+ list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
+ list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
+ list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
+ }
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
+ memcpy(&list[i].h_u.ipv6_hdr.src_addr,
+ &hdr->l3_key.src_ipv6_addr,
+ sizeof(hdr->l3_key.src_ipv6_addr));
+ memcpy(&list[i].m_u.ipv6_hdr.src_addr,
+ &hdr->l3_mask.src_ipv6_addr,
+ sizeof(hdr->l3_mask.src_ipv6_addr));
+ }
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
+ memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
+ &hdr->l3_key.dst_ipv6_addr,
+ sizeof(hdr->l3_key.dst_ipv6_addr));
+ memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
+ &hdr->l3_mask.dst_ipv6_addr,
+ sizeof(hdr->l3_mask.dst_ipv6_addr));
+ }
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
+ list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+ list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
+ list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
+ i++;
+ }
+
+ return i;
+}
+
+/**
+ * ice_tc_fill_rules - fill filter rules based on TC fltr
+ * @hw: pointer to HW structure
+ * @flags: tc flower field flags
+ * @tc_fltr: pointer to TC flower filter
+ * @list: list of advance rule elements
+ * @rule_info: pointer to information about rule
+ * @l4_proto: pointer to information such as L4 proto type
+ *
+ * Fill ice_adv_lkup_elem list based on TC flower flags and
+ * TC flower headers. This list should be used to add
+ * advance filter in hardware.
+ */
+static int
+ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
+ struct ice_tc_flower_fltr *tc_fltr,
+ struct ice_adv_lkup_elem *list,
+ struct ice_adv_rule_info *rule_info,
+ u16 *l4_proto)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ bool inner = false;
+ int i = 0;
+
+ rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
+ if (tc_fltr->tunnel_type != TNL_LAST) {
+ i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
+
+ headers = &tc_fltr->inner_headers;
+ inner = true;
+ } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ICE_ETYPE_OL;
+ list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
+ list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC)) {
+ struct ice_tc_l2_hdr *l2_key, *l2_mask;
+
+ l2_key = &headers->l2_key;
+ l2_mask = &headers->l2_mask;
+
+ list[i].type = ice_proto_type_from_mac(inner);
+ if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ l2_key->dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ l2_mask->dst_mac);
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
+ l2_key->src_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
+ l2_mask->src_mac);
+ }
+ i++;
+ }
+
+ /* copy VLAN info */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].type = ICE_VLAN_OFOS;
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
+ /* copy L3 (IPv[4|6]: src, dest) address */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_SRC_IPV4)) {
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ice_proto_type_from_ipv4(inner);
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
+ list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
+ list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
+ list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
+ list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
+ }
+ i++;
+ } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6)) {
+ struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ice_proto_type_from_ipv6(inner);
+ ipv6_hdr = &list[i].h_u.ipv6_hdr;
+ ipv6_mask = &list[i].m_u.ipv6_hdr;
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
+ memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
+ sizeof(l3_key->dst_ipv6_addr));
+ memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
+ sizeof(l3_mask->dst_ipv6_addr));
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
+ memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
+ sizeof(l3_key->src_ipv6_addr));
+ memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
+ sizeof(l3_mask->src_ipv6_addr));
+ }
+ i++;
+ }
+
+ /* copy L4 (src, dest) port */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
+ struct ice_tc_l4_hdr *l4_key, *l4_mask;
+
+ list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+ l4_key = &headers->l4_key;
+ l4_mask = &headers->l4_mask;
+
+ if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
+ list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
+ list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
+ list[i].h_u.l4_hdr.src_port = l4_key->src_port;
+ list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
+ }
+ i++;
+ }
+
+ return i;
+}
+
+/**
+ * ice_tc_tun_get_type - get the tunnel type
+ * @tunnel_dev: ptr to tunnel device
+ *
+ * This function detects appropriate tunnel_type if specified device is
+ * tunnel device such as VXLAN/Geneve
+ */
+static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return TNL_VXLAN;
+ if (netif_is_geneve(tunnel_dev))
+ return TNL_GENEVE;
+ if (netif_is_gretap(tunnel_dev) ||
+ netif_is_ip6gretap(tunnel_dev))
+ return TNL_GRETAP;
+ return TNL_LAST;
+}
+
+bool ice_is_tunnel_supported(struct net_device *dev)
+{
+ return ice_tc_tun_get_type(dev) != TNL_LAST;
+}
+
+static int
+ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_repr *repr;
+
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ break;
+
+ case FLOW_ACTION_REDIRECT:
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+ if (ice_is_port_repr_netdev(act->dev)) {
+ repr = ice_netdev_to_repr(act->dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else if (netif_is_ice(act->dev) ||
+ ice_is_tunnel_supported(act->dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data rule_added;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_adv_lkup_elem *list;
+ u32 flags = fltr->flags;
+ enum ice_status status;
+ int lkups_cnt;
+ int ret = 0;
+ int i;
+
+ if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* egress traffic is always redirect to uplink */
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
+
+ rule_info.sw_act.fltr_act = fltr->action.fltr_act;
+ if (fltr->action.fltr_act != ICE_DROP_PACKET)
+ rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
+ /* For now, making priority to be highest, and it also becomes
+ * the priority for recipe which will get created as a result of
+ * new extraction sequence based on input set.
+ * Priority '7' is max val for switch recipe, higher the number
+ * results into order of switch rule evaluation.
+ */
+ rule_info.priority = 7;
+
+ if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info.flags_info.act_valid = true;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = fltr->cookie;
+
+ status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (status) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ fltr->rid = rule_added.rid;
+ fltr->rule_id = rule_added.rule_id;
+
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_add_tc_flower_adv_fltr - add appropriate filter rules
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * based on filter parameters using Advance recipes supported
+ * by OS package.
+ */
+static int
+ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = {0};
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_lkup_elem *list;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 flags = tc_fltr->flags;
+ struct ice_vsi *ch_vsi;
+ struct device *dev;
+ u16 lkups_cnt = 0;
+ u16 l4_proto = 0;
+ int ret = 0;
+ u16 i = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ /* get the channel (aka ADQ VSI) */
+ if (tc_fltr->dest_vsi)
+ ch_vsi = tc_fltr->dest_vsi;
+ else
+ ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
+ if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
+ if (!ch_vsi) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.priority = 7;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
+ tc_fltr->action.tc_class,
+ rule_info.sw_act.vsi_handle, lkups_cnt);
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
+
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (ret) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ tc_fltr->rid = rule_added.rid;
+ tc_fltr->rule_id = rule_added.rule_id;
+ if (tc_fltr->action.tc_class > 0 && ch_vsi) {
+ /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
+ * for PF ADQ filter, it is not yet set in tc_fltr,
+ * hence store the dest_vsi ptr in tc_fltr
+ */
+ if (ch_vsi->type == ICE_VSI_CHNL)
+ tc_fltr->dest_vsi = ch_vsi;
+ /* keep track of advanced switch filter for
+ * destination VSI (channel VSI)
+ */
+ ch_vsi->num_chnl_fltr++;
+ /* in this case, dest_id is VSI handle (sw handle) */
+ tc_fltr->dest_id = rule_added.vsi_handle;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs++;
+ }
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel IPv4 address
+ */
+static int
+ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
+{
+ if (match->key->dst) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
+ headers->l3_key.dst_ipv4 = match->key->dst;
+ headers->l3_mask.dst_ipv4 = match->mask->dst;
+ }
+ if (match->key->src) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
+ headers->l3_key.src_ipv4 = match->key->src;
+ headers->l3_mask.src_ipv4 = match->mask->src;
+ }
+ return 0;
+}
+
+/**
+ * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel IPv6 address
+ */
+static int
+ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
+{
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ /* src and dest IPV6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&match->key->dst) ||
+ ipv6_addr_loopback(&match->key->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
+ return -EINVAL;
+ }
+ /* if src/dest IPv6 address is *,* error */
+ if (ipv6_addr_any(&match->mask->dst) &&
+ ipv6_addr_any(&match->mask->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
+ return -EINVAL;
+ }
+ if (!ipv6_addr_any(&match->mask->dst)) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
+ }
+ if (!ipv6_addr_any(&match->mask->src)) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
+ }
+
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6)) {
+ memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
+ sizeof(match->key->src.s6_addr));
+ memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
+ sizeof(match->mask->src.s6_addr));
+ }
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_DEST_IPV6)) {
+ memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
+ sizeof(match->key->dst.s6_addr));
+ memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
+ sizeof(match->mask->dst.s6_addr));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tc_set_port - Parse ports from TC flower filter
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel port
+ */
+static int
+ice_tc_set_port(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
+{
+ if (match.key->dst) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+ headers->l4_key.dst_port = match.key->dst;
+ headers->l4_mask.dst_port = match.mask->dst;
+ }
+ if (match.key->src) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+ headers->l4_key.src_port = match.key->src;
+ headers->l4_mask.src_port = match.mask->src;
+ }
+ return 0;
+}
+
+static struct net_device *
+ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ if (ice_is_tunnel_supported(dev))
+ return dev;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_REDIRECT &&
+ ice_is_tunnel_supported(act->dev))
+ return act->dev;
+ }
+
+ return NULL;
+}
+
+static int
+ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct flow_match_control enc_control;
+
+ fltr->tunnel_type = ice_tc_tun_get_type(dev);
+ headers->l3_key.ip_proto = IPPROTO_UDP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid ||
+ enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
+ return -EINVAL;
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
+ fltr->tenant_id = enc_keyid.key->keyid;
+ }
+
+ flow_rule_match_enc_control(rule, &enc_control);
+
+ if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ if (ice_tc_set_ipv4(&match, fltr, headers, true))
+ return -EINVAL;
+ } else if (enc_control.key->addr_type ==
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ if (ice_tc_set_ipv6(&match, fltr, headers, true))
+ return -EINVAL;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ headers->l3_key.tos = match.key->tos;
+ headers->l3_key.ttl = match.key->ttl;
+ headers->l3_mask.tos = match.mask->tos;
+ headers->l3_mask.ttl = match.mask->ttl;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_enc_ports(rule, &match);
+ if (ice_tc_set_port(match, fltr, headers, true))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_parse_cls_flower - Parse TC flower filters provided by kernel
+ * @vsi: Pointer to the VSI
+ * @filter_dev: Pointer to device on which filter is being added
+ * @f: Pointer to struct flow_cls_offload
+ * @fltr: Pointer to filter structure
+ */
+static int
+ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+ struct flow_dissector *dissector;
+ struct net_device *tunnel_dev;
+
+ dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
+ return -EOPNOTSUPP;
+ }
+
+ tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
+ if (tunnel_dev) {
+ int err;
+
+ filter_dev = tunnel_dev;
+
+ err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
+ return err;
+ }
+
+ /* header pointers should point to the inner headers, outer
+ * header were already set by ice_parse_tunnel_attr
+ */
+ headers = &fltr->inner_headers;
+ } else if (dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
+ return -EOPNOTSUPP;
+ } else {
+ fltr->tunnel_type = TNL_LAST;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ } else {
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+ }
+
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
+ headers->l3_key.ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.key->dst)) {
+ ether_addr_copy(headers->l2_key.dst_mac,
+ match.key->dst);
+ ether_addr_copy(headers->l2_mask.dst_mac,
+ match.mask->dst);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ if (!is_zero_ether_addr(match.key->src)) {
+ ether_addr_copy(headers->l2_key.src_mac,
+ match.key->src);
+ ether_addr_copy(headers->l2_mask.src_mac,
+ match.mask->src);
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+ is_vlan_dev(filter_dev)) {
+ struct flow_dissector_key_vlan mask;
+ struct flow_dissector_key_vlan key;
+ struct flow_match_vlan match;
+
+ if (is_vlan_dev(filter_dev)) {
+ match.key = &key;
+ match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+ match.key->vlan_priority = 0;
+ match.mask = &mask;
+ memset(match.mask, 0xff, sizeof(*match.mask));
+ match.mask->vlan_priority = 0;
+ } else {
+ flow_rule_match_vlan(rule, &match);
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (ice_tc_set_ipv4(&match, fltr, headers, false))
+ return -EINVAL;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (ice_tc_set_ipv6(&match, fltr, headers, false))
+ return -EINVAL;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (ice_tc_set_port(match, fltr, headers, false))
+ return -EINVAL;
+ switch (headers->l3_key.ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_switch_fltr - Add TC flower filters
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * Add filter in HW switch block
+ */
+static int
+ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
+ return -EOPNOTSUPP;
+
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ return ice_eswitch_add_tc_fltr(vsi, fltr);
+
+ return ice_add_tc_flower_adv_fltr(vsi, fltr);
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+ struct ice_vsi *main_vsi;
+
+ if (tc < 0) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
+ return -EINVAL;
+ }
+ if (!tc) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
+
+ /* Redirect to a TC class or Queue Group */
+ main_vsi = ice_get_main_vsi(vsi->back);
+ if (!main_vsi || !main_vsi->netdev) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of invalid netdevice");
+ return -EINVAL;
+ }
+
+ if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
+ return -EOPNOTSUPP;
+ }
+
+ /* For ADQ, filter must include dest MAC address, otherwise unwanted
+ * packets with unrelated MAC address get delivered to ADQ VSIs as long
+ * as remaining filter criteria is satisfied such as dest IP address
+ * and dest/src L4 port. Following code is trying to handle:
+ * 1. For non-tunnel, if user specify MAC addresses, use them (means
+ * this code won't do anything
+ * 2. For non-tunnel, if user didn't specify MAC address, add implicit
+ * dest MAC to be lower netdev's active unicast MAC address
+ */
+ if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->netdev->dev_addr);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ /* validate specified dest MAC address, make sure either it belongs to
+ * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
+ * unicast MAC filter destined to main VSI.
+ */
+ if (!ice_mac_fltr_exist(&main_vsi->back->hw,
+ fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+
+ /* Make sure VLAN is already added to main VSI, before allowing ADQ to
+ * add a VLAN based filter such as MAC + VLAN + L4 port.
+ */
+ if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
+ u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
+
+ if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ fltr->action.tc_class = tc;
+
+ return 0;
+}
+
+/**
+ * ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Parse the actions for a TC filter
+ */
+static int
+ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_action *flow_action = &rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ if (cls_flower->classid)
+ return ice_handle_tclass_action(vsi, cls_flower, fltr);
+
+ if (!flow_action_has_entries(flow_action))
+ return -EINVAL;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ int err = ice_eswitch_tc_parse_action(fltr, act);
+
+ if (err)
+ return err;
+ continue;
+ }
+ /* Allow only one rule per filter */
+
+ /* Drop action */
+ if (act->id == FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ }
+ return 0;
+}
+
+/**
+ * ice_del_tc_fltr - deletes a filter from HW table
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function deletes a filter from HW table and manages book-keeping
+ */
+static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_rule_query_data rule_rem;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ rule_rem.rid = fltr->rid;
+ rule_rem.rule_id = fltr->rule_id;
+ rule_rem.vsi_handle = fltr->dest_id;
+ err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
+ if (err) {
+ if (err == ICE_ERR_DOES_NOT_EXIST) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
+ return -ENOENT;
+ }
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
+ return -EIO;
+ }
+
+ /* update advanced switch filter count for destination
+ * VSI if filter destination was VSI
+ */
+ if (fltr->dest_vsi) {
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ fltr->dest_vsi->num_chnl_fltr--;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_tc_fltr - adds a TC flower filter
+ * @netdev: Pointer to netdev
+ * @vsi: Pointer to VSI
+ * @f: Pointer to flower offload structure
+ * @__fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function parses TC-flower input fields, parses action,
+ * and adds a filter.
+ */
+static int
+ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr **__fltr)
+{
+ struct ice_tc_flower_fltr *fltr;
+ int err;
+
+ /* by default, set output to be INVALID */
+ *__fltr = NULL;
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ fltr->cookie = f->cookie;
+ fltr->extack = f->common.extack;
+ fltr->src_vsi = vsi;
+ INIT_HLIST_NODE(&fltr->tc_flower_node);
+
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_add_switch_fltr(vsi, fltr);
+ if (err < 0)
+ goto err;
+
+ /* return the newly created filter */
+ *__fltr = fltr;
+
+ return 0;
+err:
+ kfree(fltr);
+ return err;
+}
+
+/**
+ * ice_find_tc_flower_fltr - Find the TC flower filter in the list
+ * @pf: Pointer to PF
+ * @cookie: filter specific cookie
+ */
+static struct ice_tc_flower_fltr *
+ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (cookie == fltr->cookie)
+ return fltr;
+
+ return NULL;
+}
+
+/**
+ * ice_add_cls_flower - add TC flower filters
+ * @netdev: Pointer to filter device
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to flower offload structure
+ */
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower)
+{
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct net_device *vsi_netdev = vsi->netdev;
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ return -EINVAL;
+
+ if (ice_is_port_repr_netdev(netdev))
+ vsi_netdev = netdev;
+
+ if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
+ !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
+ /* Based on TC indirect notifications from kernel, all ice
+ * devices get an instance of rule from higher level device.
+ * Avoid triggering explicit error in this case.
+ */
+ if (netdev == vsi_netdev)
+ NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
+ return -EINVAL;
+ }
+
+ /* avoid duplicate entries, if exists - return error */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (fltr) {
+ NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
+ return -EEXIST;
+ }
+
+ /* prep and add TC-flower filter in HW */
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ if (err)
+ return err;
+
+ /* add filter into an ordered list */
+ hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
+ return 0;
+}
+
+/**
+ * ice_del_cls_flower - delete TC flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ */
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* find filter */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (!fltr) {
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
+ hlist_empty(&pf->tc_flower_fltr_list))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
+ return -EINVAL;
+ }
+
+ fltr->extack = cls_flower->common.extack;
+ /* delete filter from HW */
+ err = ice_del_tc_fltr(vsi, fltr);
+ if (err)
+ return err;
+
+ /* delete filter from an ordered list */
+ hlist_del(&fltr->tc_flower_node);
+
+ /* free the filter node */
+ kfree(fltr);
+
+ return 0;
+}
+
+/**
+ * ice_replay_tc_fltrs - replay TC filters
+ * @pf: pointer to PF struct
+ */
+void ice_replay_tc_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ fltr->extack = NULL;
+ ice_add_switch_fltr(fltr->src_vsi, fltr);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
new file mode 100644
index 000000000000..319049477959
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_TC_LIB_H_
+#define _ICE_TC_LIB_H_
+
+#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
+#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
+#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
+#define ICE_TC_FLWR_FIELD_DEST_IPV4 BIT(3)
+#define ICE_TC_FLWR_FIELD_SRC_IPV4 BIT(4)
+#define ICE_TC_FLWR_FIELD_DEST_IPV6 BIT(5)
+#define ICE_TC_FLWR_FIELD_SRC_IPV6 BIT(6)
+#define ICE_TC_FLWR_FIELD_DEST_L4_PORT BIT(7)
+#define ICE_TC_FLWR_FIELD_SRC_L4_PORT BIT(8)
+#define ICE_TC_FLWR_FIELD_TENANT_ID BIT(9)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 BIT(10)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 BIT(11)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 BIT(12)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 BIT(13)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT BIT(14)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
+#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
+#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+
+#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+
+struct ice_indr_block_priv {
+ struct net_device *netdev;
+ struct ice_netdev_priv *np;
+ struct list_head list;
+};
+
+struct ice_tc_flower_action {
+ u32 tc_class;
+ enum ice_sw_fwd_act_type fltr_act;
+};
+
+struct ice_tc_vlan_hdr {
+ __be16 vlan_id; /* Only last 12 bits valid */
+ u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+};
+
+struct ice_tc_l2_hdr {
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 n_proto; /* Ethernet Protocol */
+};
+
+struct ice_tc_l3_hdr {
+ u8 ip_proto; /* IPPROTO value */
+ union {
+ struct {
+ struct in_addr dst_ip;
+ struct in_addr src_ip;
+ } v4;
+ struct {
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ } v6;
+ } ip;
+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
+#define dst_ipv6_addr ip.v6.dst_ip6.s6_addr
+#define src_ipv6 ip.v6.src_ip6.s6_addr32
+#define src_ipv6_addr ip.v6.src_ip6.s6_addr
+#define dst_ipv4 ip.v4.dst_ip.s_addr
+#define src_ipv4 ip.v4.src_ip.s_addr
+
+ u8 tos;
+ u8 ttl;
+};
+
+struct ice_tc_l4_hdr {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ice_tc_flower_lyr_2_4_hdrs {
+ /* L2 layer fields with their mask */
+ struct ice_tc_l2_hdr l2_key;
+ struct ice_tc_l2_hdr l2_mask;
+ struct ice_tc_vlan_hdr vlan_hdr;
+ /* L3 (IPv4[6]) layer fields with their mask */
+ struct ice_tc_l3_hdr l3_key;
+ struct ice_tc_l3_hdr l3_mask;
+
+ /* L4 layer fields with their mask */
+ struct ice_tc_l4_hdr l4_key;
+ struct ice_tc_l4_hdr l4_mask;
+};
+
+enum ice_eswitch_fltr_direction {
+ ICE_ESWITCH_FLTR_INGRESS,
+ ICE_ESWITCH_FLTR_EGRESS,
+};
+
+struct ice_tc_flower_fltr {
+ struct hlist_node tc_flower_node;
+
+ /* cookie becomes filter_rule_id if rule is added successfully */
+ unsigned long cookie;
+
+ /* add_adv_rule returns information like recipe ID, rule_id. Store
+ * those values since they are needed to remove advanced rule
+ */
+ u16 rid;
+ u16 rule_id;
+ /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
+ * destination type
+ */
+ u16 dest_id;
+ /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ struct ice_vsi *dest_vsi;
+ /* direction of fltr for eswitch use case */
+ enum ice_eswitch_fltr_direction direction;
+
+ /* Parsed TC flower configuration params */
+ struct ice_tc_flower_lyr_2_4_hdrs outer_headers;
+ struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
+ struct ice_vsi *src_vsi;
+ __be32 tenant_id;
+ u32 flags;
+ u8 tunnel_type;
+ struct ice_tc_flower_action action;
+
+ /* cache ptr which is used wherever needed to communicate netlink
+ * messages
+ */
+ struct netlink_ext_ack *extack;
+};
+
+/**
+ * ice_is_chnl_fltr - is this a valid channel filter
+ * @f: Pointer to tc-flower filter
+ *
+ * Criteria to determine of given filter is valid channel filter
+ * or not is based on its "destination". If destination is hw_tc (aka tc_class)
+ * and it is non-zero, then it is valid channel (aka ADQ) filter
+ */
+static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
+{
+ return !!f->action.tc_class;
+}
+
+/**
+ * ice_chnl_dmac_fltr_cnt - DMAC based CHNL filter count
+ * @pf: Pointer to PF
+ */
+static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
+{
+ return pf->num_dmac_chnl_fltrs;
+}
+
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+void ice_replay_tc_fltrs(struct ice_pf *pf);
+bool ice_is_tunnel_supported(struct net_device *dev);
+
+#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 9bc0b8fdfc77..cf685247c07a 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->rx.ring->netdev->name)),
+ __string(devname, q_vector->rx.rx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->rx.ring->netdev->name);),
+ __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->rx.ring->q_index,
+ __entry->q_vector->rx.rx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->tx.ring->netdev->name)),
+ __string(devname, q_vector->tx.tx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->tx.ring->netdev->name);),
+ __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->tx.ring->q_index,
+ __entry->q_vector->tx.tx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(ice_tx_template,
- TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+ TP_PROTO(struct ice_tx_ring *ring, struct ice_tx_desc *desc,
struct ice_tx_buf *buf),
TP_ARGS(ring, desc, buf),
@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_tx_template, name, \
- TP_PROTO(struct ice_ring *ring, \
+ TP_PROTO(struct ice_tx_ring *ring, \
struct ice_tx_desc *desc, \
struct ice_tx_buf *buf), \
TP_ARGS(ring, desc, buf))
@@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
DECLARE_EVENT_CLASS(ice_rx_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc),
@@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc)
);
DECLARE_EVENT_CLASS(ice_rx_indicate_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
);
DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb)
);
DECLARE_EVENT_CLASS(ice_xmit_template,
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb),
TP_ARGS(ring, skb),
@@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_xmit_template, name, \
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), \
TP_ARGS(ring, skb))
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6ee8e0032d52..bc3ba19dc88f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include <linux/bpf_trace.h>
+#include <net/dsfield.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -13,6 +14,7 @@
#include "ice_trace.h"
#include "ice_dcb_lib.h"
#include "ice_xsk.h"
+#include "ice_eswitch.h"
#define ICE_RX_HDR_SIZE 256
@@ -32,7 +34,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
struct ice_tx_buf *tx_buf, *first;
struct ice_fltr_desc *f_desc;
struct ice_tx_desc *tx_desc;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
struct device *dev;
dma_addr_t dma;
u32 td_cmd;
@@ -106,7 +108,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
* @tx_buf: the buffer to free
*/
static void
-ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
+ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
@@ -133,7 +135,7 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
/* tx_buf must be completely set up in the transmit path */
}
-static struct netdev_queue *txring_txq(const struct ice_ring *ring)
+static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->q_index);
}
@@ -142,8 +144,9 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
-void ice_clean_tx_ring(struct ice_ring *tx_ring)
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
u16 i;
if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
@@ -162,8 +165,10 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
/* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ memset(tx_ring->desc, 0, size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -181,14 +186,18 @@ tx_skip_free:
*
* Free all transmit software resources
*/
-void ice_free_tx_ring(struct ice_ring *tx_ring)
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
+
ice_clean_tx_ring(tx_ring);
devm_kfree(tx_ring->dev, tx_ring->tx_buf);
tx_ring->tx_buf = NULL;
if (tx_ring->desc) {
- dmam_free_coherent(tx_ring->dev, tx_ring->size,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -201,7 +210,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
+static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = ICE_DFLT_IRQ_WORK;
@@ -238,11 +247,8 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- if (ice_ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
- else
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -298,9 +304,6 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -329,9 +332,10 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
*
* Return 0 on success, negative on error
*/
-int ice_setup_tx_ring(struct ice_ring *tx_ring)
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -339,19 +343,19 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
tx_ring->tx_buf =
- devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
+ devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
GFP_KERNEL);
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest page */
- tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- PAGE_SIZE);
- tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
- tx_ring->size);
+ size);
goto err;
}
@@ -370,9 +374,10 @@ err:
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
-void ice_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
u16 i;
/* ring already cleared, nothing to do */
@@ -417,7 +422,9 @@ rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ memset(rx_ring->desc, 0, size);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
@@ -430,8 +437,10 @@ rx_skip_free:
*
* Free all receive software resources
*/
-void ice_free_rx_ring(struct ice_ring *rx_ring)
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ u32 size;
+
ice_clean_rx_ring(rx_ring);
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
@@ -441,7 +450,9 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
rx_ring->rx_buf = NULL;
if (rx_ring->desc) {
- dmam_free_coherent(rx_ring->dev, rx_ring->size,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(rx_ring->dev, size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -453,9 +464,10 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
*
* Return 0 on success, negative on error
*/
-int ice_setup_rx_ring(struct ice_ring *rx_ring)
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -463,19 +475,19 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
+ devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
/* round up to nearest page */
- rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
- PAGE_SIZE);
- rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
- rx_ring->size);
+ size);
goto err;
}
@@ -499,7 +511,7 @@ err:
}
static unsigned int
-ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -519,15 +531,15 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
- struct ice_ring *xdp_ring;
- int err, result;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -535,11 +547,14 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
return ICE_XDP_PASS;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- if (result == ICE_XDP_CONSUMED)
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
+ err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+ if (err == ICE_XDP_CONSUMED)
goto out_failure;
- return result;
+ return err;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (err)
@@ -576,7 +591,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct ice_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -588,7 +603,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- xdp_ring = vsi->xdp_rings[queue_index];
+ if (static_branch_unlikely(&ice_xdp_locking_key)) {
+ queue_index %= vsi->num_xdp_txq;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ spin_lock(&xdp_ring->tx_lock);
+ } else {
+ xdp_ring = vsi->xdp_rings[queue_index];
+ }
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -602,6 +624,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+
return nxmit;
}
@@ -614,7 +639,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
* reused.
*/
static bool
-ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
+ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -665,7 +690,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -794,7 +819,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
@@ -820,7 +845,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* Synchronizes page for reuse by the adapter
*/
static void
-ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -851,7 +876,7 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
+ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
@@ -888,7 +913,7 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *
-ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
@@ -940,7 +965,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
@@ -1000,7 +1025,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1036,7 +1061,7 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
+ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
@@ -1060,11 +1085,12 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int offset = rx_ring->rx_offset;
+ struct ice_tx_ring *xdp_ring = NULL;
unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
@@ -1077,6 +1103,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (xdp_prog)
+ xdp_ring = rx_ring->xdp_ring;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1140,11 +1170,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
goto construct_skb;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
@@ -1221,7 +1250,7 @@ construct_skb:
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
if (xdp_prog)
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1230,6 +1259,41 @@ construct_skb:
return failure ? budget : (int)total_rx_pkts;
}
+static void __ice_update_sample(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc,
+ struct dim_sample *sample,
+ bool is_tx)
+{
+ u64 packets = 0, bytes = 0;
+
+ if (is_tx) {
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_tx_ring(tx_ring, *rc) {
+ packets += tx_ring->stats.pkts;
+ bytes += tx_ring->stats.bytes;
+ }
+ } else {
+ struct ice_rx_ring *rx_ring;
+
+ ice_for_each_rx_ring(rx_ring, *rc) {
+ packets += rx_ring->stats.pkts;
+ bytes += rx_ring->stats.bytes;
+ }
+ }
+
+ dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1
+ * second or longer, force it to start again. This addresses the
+ * frequent case of an idle queue being switched to by the
+ * scheduler. The 1,000 here means 1,000 milliseconds.
+ */
+ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
+ rc->dim.state = DIM_START_MEASURE;
+}
+
/**
* ice_net_dim - Update net DIM algorithm
* @q_vector: the vector associated with the interrupt
@@ -1245,34 +1309,16 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
if (ITR_IS_DYNAMIC(tx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->tx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, tx, &dim_sample, true);
net_dim(&tx->dim, dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->rx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, rx, &dim_sample, false);
net_dim(&rx->dim, dim_sample);
}
}
@@ -1299,15 +1345,14 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
}
/**
- * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
+ * ice_enable_interrupt - re-enable MSI-X interrupt
* @q_vector: the vector associated with the interrupt to enable
*
- * Update the net_dim() algorithm and re-enable the interrupt associated with
- * this vector.
- *
- * If the VSI is down, the interrupt will not be re-enabled.
+ * If the VSI is down, the interrupt will not be re-enabled. Also,
+ * when enabling the interrupt always reset the wb_on_itr to false
+ * and trigger a software interrupt to clean out internal state.
*/
-static void ice_update_ena_itr(struct ice_q_vector *q_vector)
+static void ice_enable_interrupt(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
bool wb_en = q_vector->wb_on_itr;
@@ -1316,25 +1361,25 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
if (test_bit(ICE_DOWN, vsi->state))
return;
- /* When exiting WB_ON_ITR, let ITR resume its normal
- * interrupts-enabled path.
+ /* trigger an ITR delayed software interrupt when exiting busy poll, to
+ * make sure to catch any pending cleanups that might have been missed
+ * due to interrupt state transition. If busy poll or poll isn't
+ * enabled, then don't update ITR, and just enable the interrupt.
*/
- if (wb_en)
+ if (!wb_en) {
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ } else {
q_vector->wb_on_itr = false;
- /* This will do nothing if dynamic updates are not enabled. */
- ice_net_dim(q_vector);
-
- /* net_dim() updates ITR out-of-band using a work item */
- itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
- /* trigger an immediate software interrupt when exiting
- * busy poll, to make sure to catch any pending cleanups
- * that might have been missed due to interrupt state
- * transition.
- */
- if (wb_en) {
+ /* do two things here with a single write. Set up the third ITR
+ * index to be used for software interrupt moderation, and then
+ * trigger a software interrupt with a rate limit of 20K on
+ * software interrupts, this will help avoid high interrupt
+ * loads due to frequently polling and exiting polling.
+ */
+ itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_SW_ITR_INDX_M |
+ ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
}
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
@@ -1387,18 +1432,24 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
{
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
bool clean_complete = true;
- struct ice_ring *ring;
int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_pool ?
- ice_clean_tx_irq_zc(ring, budget) :
- ice_clean_tx_irq(ring, budget);
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ bool wd;
+
+ if (tx_ring->xsk_pool)
+ wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ else if (ice_ring_is_xdp(tx_ring))
+ wd = true;
+ else
+ wd = ice_clean_tx_irq(tx_ring, budget);
if (!wd)
clean_complete = false;
@@ -1419,16 +1470,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
- ice_for_each_ring(ring, q_vector->rx) {
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
int cleaned;
/* A dedicated path for zero-copy allows making a single
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_pool ?
- ice_clean_rx_irq_zc(ring, budget_per_ring) :
- ice_clean_rx_irq(ring, budget_per_ring);
+ cleaned = rx_ring->xsk_pool ?
+ ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1447,10 +1498,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
- ice_update_ena_itr(q_vector);
- else
+ if (likely(napi_complete_done(napi, work_done))) {
+ ice_net_dim(q_vector);
+ ice_enable_interrupt(q_vector);
+ } else {
ice_set_wb_on_itr(q_vector);
+ }
return min_t(int, work_done, budget - 1);
}
@@ -1462,7 +1515,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*
* Returns -EBUSY if a stop is needed, else 0
*/
-static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
/* Memory barrier before checking head and tail */
@@ -1485,7 +1538,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -1504,7 +1557,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
* it and the length into the transmit descriptor.
*/
static void
-ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
+ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct ice_tx_offload_params *off)
{
u64 td_offset, td_tag, td_cmd;
@@ -1840,7 +1893,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
*/
static void
-ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
+ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -2146,7 +2199,7 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
* @off: Tx offload parameters
*/
static void
-ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
+ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
s8 idx;
@@ -2181,7 +2234,7 @@ ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t
-ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
@@ -2245,6 +2298,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
+ if (ice_is_switchdev_running(vsi->back))
+ ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2282,7 +2337,7 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
tx_ring = vsi->tx_rings[skb->queue_mapping];
@@ -2296,10 +2351,43 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
/**
+ * ice_get_dscp_up - return the UP/TC value for a SKB
+ * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
+ * @skb: SKB to query for info to determine UP/TC
+ *
+ * This function is to only be called when the PF is in L3 DSCP PFC mode
+ */
+static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
+{
+ u8 dscp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return dcbcfg->dscp_map[dscp];
+}
+
+u16
+ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *dcbcfg;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
+ skb->priority = ice_get_dscp_up(dcbcfg, skb);
+
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
+/**
* ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
* @tx_ring: tx_ring to clean
*/
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
{
struct ice_vsi *vsi = tx_ring->vsi;
s16 i = tx_ring->next_to_clean;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1e46e80f3d6f..c56dd1749903 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,6 +13,7 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -154,7 +155,7 @@ struct ice_tx_buf {
struct ice_tx_offload_params {
u64 cd_qw1;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
@@ -164,17 +165,10 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- union {
- struct {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
- };
- struct {
- struct xdp_buff *xdp;
- };
- };
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -258,9 +252,9 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
-struct ice_ring {
+struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_ring *next; /* pointer to next ring in q_vector */
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -268,14 +262,13 @@ struct ice_ring {
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
union {
- struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp_buf;
};
/* CL2 - 2nd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
+ /* CL3 - 3rd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u16 q_handle; /* Queue handle per TC */
-
- u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -284,63 +277,104 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
+ u16 rx_offset;
+ u16 rx_buf_len;
/* stats structs */
+ struct ice_rxq_stats rx_stats;
struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
struct rcu_head rcu; /* to avoid race on free */
- DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ /* CL4 - 3rd cacheline starts here */
+ struct ice_channel *ch;
struct bpf_prog *xdp_prog;
+ struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- u16 rx_offset;
- /* CL3 - 3rd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb;
- /* CLX - the below items are only accessed infrequently and should be
- * in their own cache line if possible
- */
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ dma_addr_t dma; /* physical address of ring */
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u64 cached_phctime;
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 ptp_rx;
u8 flags;
+} ____cacheline_internodealigned_in_smp;
+
+struct ice_tx_ring {
+ /* CL1 - 1st cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ u8 __iomem *tail;
+ struct ice_tx_buf *tx_buf;
+ struct ice_q_vector *q_vector; /* Backreference to associated vector */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
+ /* CL2 - 2nd cacheline starts here */
dma_addr_t dma; /* physical address of ring */
- unsigned int size; /* length of descriptor ring in bytes */
+ struct xsk_buff_pool *xsk_pool;
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_rs;
+ u16 next_dd;
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 count; /* Number of descriptors */
+ u16 q_index; /* Queue number of ring */
+ /* stats structs */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ struct ice_txq_stats tx_stats;
+
+ /* CL3 - 3rd cacheline starts here */
+ struct rcu_head rcu; /* to avoid race on free */
+ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ struct ice_channel *ch;
+ struct ice_ptp_tx *tx_tstamps;
+ spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
- u16 rx_buf_len;
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- struct ice_ptp_tx *tx_tstamps;
- u64 cached_phctime;
- u8 ptp_rx:1;
- u8 ptp_tx:1;
+ u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{
return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
}
-static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
+{
+ return !!ring->ch;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
struct ice_ring_container {
/* head of linked-list of rings */
- struct ice_ring *ring;
+ union {
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ };
struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */
/* this matches the maximum number of ITR bits, but in usec
@@ -349,6 +383,7 @@ struct ice_ring_container {
u16 itr_setting:13;
u16 itr_reserved:2;
u16 itr_mode:1;
+ enum ice_container_type type;
};
struct ice_coalesce_stored {
@@ -360,10 +395,13 @@ struct ice_coalesce_stored {
};
/* iterator for handling rings in ring container */
-#define ice_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+#define ice_for_each_rx_ring(pos, head) \
+ for (pos = (head).rx_ring; pos; pos = pos->next)
+
+#define ice_for_each_tx_ring(pos, head) \
+ for (pos = (head).tx_ring; pos; pos = pos->next)
-static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
@@ -376,18 +414,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_clean_tx_ring(struct ice_ring *tx_ring);
-void ice_clean_rx_ring(struct ice_ring *rx_ring);
-int ice_setup_tx_ring(struct ice_ring *tx_ring);
-int ice_setup_rx_ring(struct ice_ring *rx_ring);
-void ice_free_tx_ring(struct ice_ring *tx_ring);
-void ice_free_rx_ring(struct ice_ring *rx_ring);
+u16
+ice_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 171397dcf00a..1dd7e84f41f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_txrx_lib.h"
+#include "ice_eswitch.h"
+#include "ice_lib.h"
/**
* ice_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -66,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
struct ice_32b_rx_flex_desc_nic *nic_mdid;
@@ -93,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
* skb->protocol must be set before this function is called
*/
static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
@@ -178,14 +180,15 @@ checksum_fail:
* other fields within the skb.
*/
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
+ (rx_ring, rx_desc));
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -203,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
@@ -212,18 +215,67 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
+ * @xdp_ring: XDP ring to clean
+ */
+static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+{
+ unsigned int total_bytes = 0, total_pkts = 0;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *next_dd_desc;
+ u16 next_dd = xdp_ring->next_dd;
+ struct ice_tx_buf *tx_buf;
+ int i;
+
+ next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
+ if (!(next_dd_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ return;
+
+ for (i = 0; i < ICE_TX_THRESH; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ total_bytes += tx_buf->bytecount;
+ /* normally tx_buf->gso_segs was taken but at this point
+ * it's always 1 for us
+ */
+ total_pkts++;
+
+ page_frag_free(tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ next_dd_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ if (xdp_ring->next_dd > xdp_ring->count)
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_to_clean = ntc;
+ ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+}
+
+/**
* ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
* @data: packet data pointer
* @size: packet data size
* @xdp_ring: XDP ring for transmission
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ ice_clean_xdp_irq(xdp_ring);
+
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
@@ -244,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
- /* Make certain all of the status bits have been updated
- * before next_to_watch is written.
- */
- smp_wmb();
-
i++;
- if (i == xdp_ring->count)
+ if (i == xdp_ring->count) {
i = 0;
-
- tx_buf->next_to_watch = tx_desc;
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ }
xdp_ring->next_to_use = i;
+ if (i > xdp_ring->next_rs) {
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += ICE_TX_THRESH;
+ }
+
return ICE_XDP_TX;
}
@@ -269,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*
* Returns negative on failure, 0 on success.
*/
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
@@ -281,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
/**
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
- * @rx_ring: Rx ring
+ * @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
{
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
- struct ice_ring *xdp_ring =
- rx_ring->vsi->xdp_rings[rx_ring->q_index];
-
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 05ac30752902..11b6c1601986 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
*
* This function updates the XDP Tx ring tail register.
*/
-static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
@@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype);
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index d33d1906103c..9e0c2923c62e 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -138,7 +138,9 @@ enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
+ ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -569,6 +571,8 @@ struct ice_sched_vsi_info {
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
+ /* bw_t_info saves VSI BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
@@ -604,7 +608,8 @@ struct ice_dcb_app_priority_table {
};
#define ICE_MAX_USER_PRIORITY 8
-#define ICE_DCBX_MAX_APPS 32
+#define ICE_DCBX_MAX_APPS 64
+#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -622,7 +627,14 @@ struct ice_dcbx_cfg {
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
+#define ICE_QOS_MODE_VLAN 0x0
+#define ICE_QOS_MODE_DSCP 0x1
+ u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ /* when DSCP mapping defined by user set its bit to 1 */
+ DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
+ u8 dscp_map[ICE_DSCP_NUM_VAL];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -668,6 +680,10 @@ struct ice_port_info {
struct ice_switch_info {
struct list_head vsi_list_map_head;
struct ice_sw_recipe *recp_list;
+ u16 prof_res_bm_init;
+ u16 max_used_prof_index;
+
+ DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* FW logging configuration */
@@ -903,6 +919,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ u16 io_expander_handle;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e93430ab37f1..2ac21484b876 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,7 +5,9 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
#include "ice_flow.h"
+#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
#define FIELD_SELECTOR(proto_hdr_field) \
@@ -251,7 +253,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
* ice_get_vf_vsi - get VF's VSI based on the stored index
* @vf: VF used to get VSI
*/
-static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
{
return vf->pf->vsi[vf->lan_vsi_idx];
}
@@ -412,7 +414,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf)
*
* send a link status message to a single VF
*/
-static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
struct ice_hw *hw = &vf->pf->hw;
@@ -620,6 +622,8 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pf->vf)
return;
+ ice_eswitch_release(pf);
+
while (test_and_set_bit(ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
@@ -828,7 +832,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -855,7 +859,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -882,6 +886,40 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
* @vf: VF to add MAC filters for
*
@@ -932,6 +970,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
enum ice_status status;
u8 broadcast[ETH_ALEN];
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
@@ -1414,6 +1455,11 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
if (ice_vf_rebuild_host_vlan_cfg(vf))
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
/* rebuild aggregator node config for main VF VSI */
ice_vf_rebuild_aggregator_node_cfg(vsi);
}
@@ -1581,6 +1627,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_post_vsi_rebuild(vf);
}
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -1593,7 +1643,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*
* Returns true if the PF or VF is disabled, false otherwise.
*/
-static bool ice_is_vf_disabled(struct ice_vf *vf)
+bool ice_is_vf_disabled(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
@@ -1711,6 +1761,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
}
ice_vf_post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ ice_eswitch_update_repr(vsi);
/* if the VF has been reset allow it to come up again */
if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
@@ -1894,6 +1946,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
*/
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_fdir_init(vf);
+
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
}
@@ -1960,6 +2014,11 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
}
clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret)
+ goto err_unroll_sriov;
+
return 0;
err_unroll_sriov:
@@ -2823,7 +2882,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf)
* disabled, and initialized so it can be configured and/or queried by a host
* administrator.
*/
-static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
{
struct ice_pf *pf;
@@ -3013,7 +3072,10 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
- ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
+ if (rm_promisc)
+ ret = ice_cfg_vlan_pruning(vsi, true);
+ else
+ ret = ice_cfg_vlan_pruning(vsi, false);
if (ret) {
dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3329,7 +3391,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_ring *ring = vsi->tx_rings[vf_q_id];
+ struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
@@ -3802,6 +3864,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
}
/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+ ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+ return;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
* ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
* @vf: VF to update
* @vc_ether_addr: structure from VIRTCHNL with MAC to delete
@@ -3822,16 +3904,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
*/
eth_zero_addr(vf->dev_lan_addr.addr);
- /* only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
- if (ice_is_vc_addr_legacy(vc_ether_addr) &&
- !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
- ether_addr_copy(vf->dev_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- }
+ ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
/**
@@ -4207,7 +4280,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN pruning when non-zero VLAN is added */
if (!vlan_promisc && vid &&
!ice_vsi_is_vlan_pruning_ena(vsi)) {
- status = ice_cfg_vlan_pruning(vsi, true, false);
+ status = ice_cfg_vlan_pruning(vsi, true);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -4261,7 +4334,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Disable VLAN pruning when only VLAN 0 is left */
if (vsi->num_vlan == 1 &&
ice_vsi_is_vlan_pruning_ena(vsi))
- ice_cfg_vlan_pruning(vsi, false, false);
+ ice_cfg_vlan_pruning(vsi, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
if (vlan_promisc) {
@@ -4400,6 +4473,168 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
return ice_vsi_manage_vlan_stripping(vsi, false);
}
+static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+ .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+};
+
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
+{
+ *ops = ice_vc_vf_dflt_ops;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ pf = vf->pf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+
+ if (!is_unicast_ether_addr(mac_addr) ||
+ ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ continue;
+
+ if (vf->pf_set_mac) {
+ dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto handle_mac_exit;
+ }
+
+ ice_vfhw_mac_add(vf, &al->list[i]);
+ vf->num_mac++;
+ break;
+ }
+
+handle_mac_exit:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't enable VLAN stripping in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't disable VLAN stripping in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+static int
+ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't config promiscuous mode in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
+{
+ ops->add_mac_addr_msg = ice_vc_repr_add_mac;
+ ops->del_mac_addr_msg = ice_vc_repr_del_mac;
+ ops->add_vlan_msg = ice_vc_repr_add_vlan;
+ ops->remove_vlan_msg = ice_vc_repr_del_vlan;
+ ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
+ ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
+ ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
+}
+
/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
@@ -4413,6 +4648,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
u16 msglen = event->msg_len;
+ struct ice_vc_vf_ops *ops;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
struct device *dev;
@@ -4436,6 +4672,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
goto error_handler;
}
+ ops = &vf->vc_ops;
+
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
@@ -4463,75 +4701,75 @@ error_handler:
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
- err = ice_vc_get_ver_msg(vf, msg);
+ err = ops->get_ver_msg(vf, msg);
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
- err = ice_vc_get_vf_res_msg(vf, msg);
+ err = ops->get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- ice_vc_reset_vf_msg(vf);
+ ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
- err = ice_vc_add_mac_addr_msg(vf, msg);
+ err = ops->add_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
- err = ice_vc_del_mac_addr_msg(vf, msg);
+ err = ops->del_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- err = ice_vc_cfg_qs_msg(vf, msg);
+ err = ops->cfg_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
- err = ice_vc_ena_qs_msg(vf, msg);
+ err = ops->ena_qs_msg(vf, msg);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- err = ice_vc_dis_qs_msg(vf, msg);
+ err = ops->dis_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
- err = ice_vc_request_qs_msg(vf, msg);
+ err = ops->request_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- err = ice_vc_cfg_irq_map_msg(vf, msg);
+ err = ops->cfg_irq_map_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- err = ice_vc_config_rss_key(vf, msg);
+ err = ops->config_rss_key(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- err = ice_vc_config_rss_lut(vf, msg);
+ err = ops->config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
- err = ice_vc_get_stats_msg(vf, msg);
+ err = ops->get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ err = ops->cfg_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
- err = ice_vc_add_vlan_msg(vf, msg);
+ err = ops->add_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_VLAN:
- err = ice_vc_remove_vlan_msg(vf, msg);
+ err = ops->remove_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- err = ice_vc_ena_vlan_stripping(vf);
+ err = ops->ena_vlan_stripping(vf);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- err = ice_vc_dis_vlan_stripping(vf);
+ err = ops->dis_vlan_stripping(vf);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
- err = ice_vc_add_fdir_fltr(vf, msg);
+ err = ops->add_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
- err = ice_vc_del_fdir_fltr(vf, msg);
+ err = ops->del_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, true);
+ err = ops->handle_rss_cfg_msg(vf, msg, true);
break;
case VIRTCHNL_OP_DEL_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, false);
+ err = ops->handle_rss_cfg_msg(vf, msg, false);
break;
case VIRTCHNL_OP_UNKNOWN:
default:
@@ -4588,8 +4826,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->tx_rate;
- ivi->min_tx_rate = 0;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
return 0;
}
@@ -4699,6 +4937,11 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
@@ -4763,6 +5006,122 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
}
/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ int rate = 0, i;
+
+ ice_for_each_vf(pf, i)
+ rate += pf->vf[i].min_tx_rate;
+
+ return rate;
+}
+
+/**
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
+ *
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+ int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
+ */
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+ * check if max_tx_rate is non-zero
+ */
+ if (max_tx_rate && min_tx_rate > max_tx_rate) {
+ dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+ min_tx_rate, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
+ return -EINVAL;
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_vf_stats - populate some stats for the VF
* @netdev: the netdev of the PF
* @vf_id: the host OS identifier (0-255)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 842cb077df86..5ff93a08f54c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,6 +70,32 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+struct ice_vf;
+
+struct ice_vc_vf_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -99,7 +125,8 @@ struct ice_vf {
* the main LAN VSI for the PF.
*/
u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_inval_msgs; /* number of continuous invalid msgs */
@@ -111,9 +138,17 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+
+ struct ice_vc_vf_ops vc_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
};
#ifdef CONFIG_PCI_IOV
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
@@ -124,6 +159,9 @@ void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
@@ -135,10 +173,18 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto);
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+
+bool ice_is_vf_disabled(struct ice_vf *vf);
+
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
@@ -164,6 +210,9 @@ static inline
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
+static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
@@ -171,6 +220,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
struct ice_rq_event_info __always_unused *event,
@@ -245,6 +309,14 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
}
static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5a9f61deeb38..ff55cb415b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @q_vector: queue vector
*/
static void
-ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
@@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
ice_cfg_itr(hw, q_vector);
- ice_for_each_ring(ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
q_vector->tx.itr_idx);
- ice_for_each_ring(ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
q_vector->rx.itr_idx);
ice_flush(hw);
@@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_txq_meta txq_meta = { };
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
int timeout = 50;
int err;
@@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta));
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
@@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 size;
int err;
@@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
goto free_buf;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size);
qg_buf->num_txqs = 1;
@@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -360,56 +363,50 @@ xsk_pool_if_up:
*
* Returns true if all allocations were successful, false if any fail.
*/
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *rx_buf;
- bool ok = true;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- if (!count)
- return true;
-
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- rx_buf = &rx_ring->rx_buf[ntu];
+ xdp = &rx_ring->xdp_buf[ntu];
- do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!rx_buf->xdp) {
- ok = false;
- break;
- }
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
- dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
- rx_desc->wb.status_error0 = 0;
rx_desc++;
- rx_buf++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- rx_buf = rx_ring->rx_buf;
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->xdp_buf;
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs;
}
/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
-static void ice_bump_ntc(struct ice_ring *rx_ring)
+static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
{
int ntc = rx_ring->next_to_clean + 1;
@@ -421,19 +418,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @rx_buf: zero-copy Rx buffer
+ * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
{
- unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
- unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
- unsigned int datasize_hard = rx_buf->xdp->data_end -
- rx_buf->xdp->data_hard_start;
+ struct xdp_buff *xdp = *xdp_arr;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -441,13 +438,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- xsk_buff_free(rx_buf->xdp);
- rx_buf->xdp = NULL;
+ xsk_buff_free(xdp);
+ *xdp_arr = NULL;
return skb;
}
@@ -455,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
int err, result = ICE_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ice_ring *xdp_ring;
u32 act;
- /* ZC patch is enabled only when XDP program is set,
- * so here it can not be NULL
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -484,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_PASS:
break;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
@@ -511,17 +503,25 @@ out_failure:
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
+ /* ZC patch is enabled only when XDP program is set,
+ * so here it can not be NULL
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ xdp_ring = rx_ring->xdp_ring;
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -544,18 +544,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
+ xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
+ xsk_buff_set_size(*xdp, size);
+ xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(rx_buf->xdp);
+ xsk_buff_free(*xdp);
- rx_buf->xdp = NULL;
+ *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -565,7 +565,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf);
+ skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -596,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (cleaned_count >= ICE_RX_BUF_WRITE)
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -618,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
*
* Returns true if cleanup/transmission is done.
*/
-static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
{
struct ice_tx_desc *tx_desc = NULL;
bool work_done = true;
@@ -669,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
* @tx_buf: Tx buffer to clean
*/
static void
-ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
@@ -684,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
*
* Returns true if cleanup/tranmission is done.
*/
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
{
int total_packets = 0, total_bytes = 0;
s16 ntc = xdp_ring->next_to_clean;
@@ -757,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_q_vector *q_vector;
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
if (test_bit(ICE_DOWN, vsi->state))
return -ENETDOWN;
@@ -808,17 +808,17 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
* ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
- if (!rx_buf->xdp)
+ if (!xdp)
continue;
- rx_buf->xdp = NULL;
+ *xdp = NULL;
}
}
@@ -826,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
* ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
u32 xsk_frames = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index ea208808623a..4c7bd8e9dfc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -11,13 +11,13 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
#else
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
@@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
}
static inline int
-ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
int __always_unused budget)
{
return 0;
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
int __always_unused budget)
{
return false;
}
static inline bool
-ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
return false;
@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
-static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
+static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
+static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 751de06019a0..836be0d3b291 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -577,16 +577,15 @@ static void igb_set_i2c_data(void *data, int state)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- if (state)
- i2cctl |= E1000_I2C_DATA_OUT;
- else
+ if (state) {
+ i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
i2cctl &= ~E1000_I2C_DATA_OUT;
+ }
- i2cctl &= ~E1000_I2C_DATA_OE_N;
- i2cctl |= E1000_I2C_CLK_OE_N;
wr32(E1000_I2CPARAMS, i2cctl);
wrfl();
-
}
/**
@@ -603,8 +602,7 @@ static void igb_set_i2c_clk(void *data, int state)
s32 i2cctl = rd32(E1000_I2CPARAMS);
if (state) {
- i2cctl |= E1000_I2C_CLK_OUT;
- i2cctl &= ~E1000_I2C_CLK_OE_N;
+ i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N;
} else {
i2cctl &= ~E1000_I2C_CLK_OUT;
i2cctl &= ~E1000_I2C_CLK_OE_N;
@@ -3116,12 +3114,21 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
+ s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
+ i2cctl = rd32(E1000_I2CPARAMS);
+ i2cctl |= E1000_I2CBB_EN
+ | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
+ | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@@ -3356,7 +3363,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
@@ -4988,7 +4995,7 @@ static int igb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d32e72d953c8..74ccd622251a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1527,8 +1527,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
spin_unlock_bh(&hw->mbx_lock);
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
memcpy(netdev->perm_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
@@ -1813,7 +1812,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -2816,8 +2815,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
}
spin_unlock_bh(&hw->mbx_lock);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 84f142f5e472..f068b66b8025 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
- wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
@@ -158,11 +158,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
- if (hw->phy.media_type != igc_media_type_copper) {
- phy->type = igc_phy_none;
- goto out;
- }
-
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
@@ -207,6 +202,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I226_K:
case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I226_LMVP:
case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I226_LM:
case IGC_DEV_ID_I226_V:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index a4bbee748798..c7fe61509d5b 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -130,7 +130,7 @@
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
-#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
+#define IGC_CTRL_RST 0x04000000 /* Global reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 4461f8b9a864..587db7483f25 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -22,8 +22,9 @@
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I226_K 0x3102
#define IGC_DEV_ID_I225_LMVP 0x5502
-#define IGC_DEV_ID_I226_K 0x5504
+#define IGC_DEV_ID_I226_LMVP 0x5503
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
#define IGC_DEV_ID_I226_V 0x125C
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 0e19b4d02e62..8e448288ee26 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -56,6 +56,7 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
@@ -949,7 +950,7 @@ static int igc_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
@@ -6377,7 +6378,7 @@ static int igc_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0f021909b430..30568e3544cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -773,7 +773,7 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
-#if IS_ENABLED(CONFIG_X86_TSC)
+#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
return convert_art_ns_to_tsc(tstamp);
#else
return (struct system_counterval_t) { };
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index a430871d1c27..c8d1e815ec6b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -549,7 +549,7 @@ ixgb_mta_set(struct ixgb_hw *hw,
*****************************************************************************/
void
ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
+ const u8 *addr,
u32 index)
{
u32 rar_low, rar_high;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 6064583095da..70bcff5fb3db 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -740,7 +740,7 @@ bool ixgb_adapter_start(struct ixgb_hw *hw);
void ixgb_check_for_link(struct ixgb_hw *hw);
bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
+void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1588376d4c67..99d481904ce6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -362,6 +362,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgb_adapter *adapter;
static int cards_found = 0;
int pci_using_dac;
+ u8 addr[ETH_ALEN];
int i;
int err;
@@ -461,7 +462,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
@@ -1030,7 +1032,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
@@ -2227,6 +2229,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
if (pci_enable_device(pdev)) {
netif_err(adapter, probe, adapter->netdev,
@@ -2250,7 +2253,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
"After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a604552fa634..4a69823e6abd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -351,6 +351,7 @@ struct ixgbe_ring {
};
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
+ spinlock_t tx_lock; /* used in XDP mode */
struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
@@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -629,7 +632,7 @@ struct ixgbe_adapter {
/* XDP */
int num_xdp_queues;
- struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+ struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
/* TX */
@@ -772,6 +775,22 @@ struct ixgbe_adapter {
#endif /* CONFIG_IXGBE_IPSEC */
};
+static inline int ixgbe_determine_xdp_q_idx(int cpu)
+{
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ return cpu % IXGBE_MAX_XDP_QS;
+ else
+ return cpu;
+}
+
+static inline
+struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
+{
+ int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
+
+ return adapter->xdp_ring[index];
+}
+
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{
switch (adapter->hw.mac.type) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index beda8e0ef7d4..8362822316a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -467,9 +467,8 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (!bitmap_subset(cmd->link_modes.advertising,
- cmd->link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
+ if (!linkmode_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported))
return -EINVAL;
/* only allow one speed at a time if no autoneg */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0218f6c9b925..86b11164655e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
{
- return adapter->xdp_prog ? nr_cpu_ids : 0;
+ int queues;
+
+ queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
+ return adapter->xdp_prog ? queues : 0;
}
#define IXGBE_RSS_64Q_MASK 0x3F
@@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = xdp_idx;
set_ring_xdp(ring);
+ spin_lock_init(&ring->tx_lock);
/* assign ring to adapter */
WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
@@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
__netif_napi_del(&q_vector->napi);
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ static_branch_dec(&ixgbe_xdp_locking_key);
+
/*
* after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 13c4782b920a..0f9f022260d7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
+DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+EXPORT_SYMBOL(ixgbe_xdp_locking_key);
+
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
@@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
@@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
- adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
+ adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL);
if (!adapter->af_xdp_zc_qps)
return -ENOMEM;
@@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
u32 len, cmd_type;
@@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
ixgbe_mac_set_default_filter(adapter);
@@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -EINVAL;
}
- if (nr_cpu_ids > MAX_XDP_QUEUES)
+ /* if the number of cpus is much larger than the maximum of queues,
+ * we should stop it and then return with ENOMEM like before.
+ */
+ if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
return -ENOMEM;
+ else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+ static_branch_inc(&ixgbe_xdp_locking_key);
old_prog = xchg(&adapter->xdp_prog, prog);
need_reset = (!!prog != !!old_prog);
@@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
writel(ring->next_to_use, ring->tail);
}
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
+{
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+}
+
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
+ ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL;
if (unlikely(!ring))
return -ENXIO;
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
return -ENXIO;
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
- err = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ err = ixgbe_xmit_xdp_ring(ring, xdpf);
if (err != IXGBE_XDP_TX)
break;
nxmit++;
@@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(flags & XDP_XMIT_FLUSH))
ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+
return nxmit;
}
@@ -10903,7 +10927,7 @@ skip_sriov:
eth_platform_get_mac_address(&adapter->pdev->dev,
adapter->hw.mac.perm_addr);
- memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 2aeec78029bc..a82533f21d36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -12,7 +12,7 @@
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf);
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
@@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring);
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b1d22e4d5ec9..db2bc58dfcfd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c714e1ecd308..d81811ab4ec4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2540,7 +2540,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -3054,7 +3054,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -4231,7 +4231,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 5fc347abab3c..d459f5c8e98f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -66,9 +66,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
- s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1bdc4f23e1e5..439674fc9765 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -313,7 +313,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
+ eth_hw_addr_set(netdev, macaddr);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -2254,7 +2254,7 @@ jme_set_macaddr(struct net_device *netdev, void *p)
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 3e9f324f1061..df9a8eefa007 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1297,8 +1297,8 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
- else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0)
+ eth_hw_addr_set(dev, mac_addr);
+ else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
eth_hw_addr_random(dev);
clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 62f8c5212182..2258e3f19161 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -96,6 +96,9 @@ struct ltq_etop_priv {
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
+ int tx_burst_len;
+ int rx_burst_len;
+
spinlock_t lock;
};
@@ -259,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev)
/* enable crc generation */
ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
- ltq_dma_init_port(DMA_PORT_ETOP);
+ ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
for (i = 0; i < MAX_DMA_CHAN; i++) {
int irq = LTQ_DMA_CH0_INT + i;
@@ -472,8 +475,8 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- /* dma needs to start on a 16 byte aligned address */
- byte_offset = CPHYSADDR(skb->data) % 16;
+ /* dma needs to start on a burst length value aligned address */
+ byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4);
ch->skb[ch->dma.desc] = skb;
netif_trans_update(dev);
@@ -667,6 +670,18 @@ ltq_etop_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
SET_NETDEV_DEV(dev, &pdev->dev);
+ err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
+ return err;
+ }
+
+ err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
+ return err;
+ }
+
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
netif_napi_add(dev, &priv->ch[i].napi,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index fb78f17d734f..0da09ea81980 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -14,15 +14,18 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/if_vlan.h>
+
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <xway_dma.h>
/* DMA */
-#define XRX200_DMA_DATA_LEN 0x600
+#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
#define XRX200_DMA_RX 0
#define XRX200_DMA_TX 1
+#define XRX200_DMA_BURST_LEN 8
/* cpu port mac */
#define PMAC_RX_IPG 0x0024
@@ -106,7 +109,8 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
+ ETH_FCS_LEN);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -154,19 +158,20 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- XRX200_DMA_DATA_LEN);
+ len);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
@@ -179,8 +184,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
return ret;
}
@@ -316,8 +320,8 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(priv->dev, mapping)))
goto err_drop;
- /* dma needs to start on a 16 byte aligned address */
- byte_offset = mapping % 16;
+ /* dma needs to start on a burst length value aligned address */
+ byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
desc->addr = mapping - byte_offset;
/* Make sure the address is written before we give it to HW */
@@ -340,10 +344,57 @@ err_drop:
return NETDEV_TX_OK;
}
+static int
+xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+ struct xrx200_chan *ch_rx = &priv->chan_rx;
+ int old_mtu = net_dev->mtu;
+ bool running = false;
+ struct sk_buff *skb;
+ int curr_desc;
+ int ret = 0;
+
+ net_dev->mtu = new_mtu;
+
+ if (new_mtu <= old_mtu)
+ return ret;
+
+ running = netif_running(net_dev);
+ if (running) {
+ napi_disable(&ch_rx->napi);
+ ltq_dma_close(&ch_rx->dma);
+ }
+
+ xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
+ curr_desc = ch_rx->dma.desc;
+
+ for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
+ ch_rx->dma.desc++) {
+ skb = ch_rx->skb[ch_rx->dma.desc];
+ ret = xrx200_alloc_skb(ch_rx);
+ if (ret) {
+ net_dev->mtu = old_mtu;
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ ch_rx->dma.desc = curr_desc;
+ if (running) {
+ napi_enable(&ch_rx->napi);
+ ltq_dma_open(&ch_rx->dma);
+ ltq_dma_enable_irq(&ch_rx->dma);
+ }
+
+ return ret;
+}
+
static const struct net_device_ops xrx200_netdev_ops = {
.ndo_open = xrx200_open,
.ndo_stop = xrx200_close,
.ndo_start_xmit = xrx200_start_xmit,
+ .ndo_change_mtu = xrx200_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -369,7 +420,8 @@ static int xrx200_dma_init(struct xrx200_priv *priv)
int ret = 0;
int i;
- ltq_dma_init_port(DMA_PORT_ETOP);
+ ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
+ XRX200_DMA_BURST_LEN);
ch_rx->dma.nr = XRX200_DMA_RX;
ch_rx->dma.dev = priv->dev;
@@ -453,7 +505,7 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
@@ -474,7 +526,7 @@ static int xrx200_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- err = of_get_mac_address(np, net_dev->dev_addr);
+ err = of_get_ethdev_address(np, net_dev);
if (err)
eth_hw_addr_random(net_dev);
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index 63bf01d28f0c..f99adbf26ab4 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF_NET
+ depends on OF
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index a9bdbf0dcfe1..3d9385a4989b 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -266,7 +266,7 @@ static int liteeth_probe(struct platform_device *pdev)
priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size;
priv->tx_slot = 0;
- err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 28d5ad296646..bb14fa2241a3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1770,7 +1770,7 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
addr[5] = mac_l & 0xff;
}
-static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
+static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
{
wrlp(mp, MAC_ADDR_HIGH,
(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
@@ -1919,7 +1919,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
netif_addr_lock_bh(dev);
mv643xx_eth_program_unicast_filter(dev);
@@ -2925,10 +2925,14 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
unsigned int tx_ring_size;
- if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
- else
- uc_addr_get(mp, dev->dev_addr);
+ if (is_valid_ether_addr(pd->mac_addr)) {
+ eth_hw_addr_set(dev, pd->mac_addr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ uc_addr_get(mp, addr);
+ eth_hw_addr_set(dev, addr);
+ }
mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
if (pd->rx_queue_size)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d460a270601..5a7bdca22a63 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1623,8 +1623,8 @@ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
}
/* Set mac address */
-static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
- int queue)
+static void mvneta_mac_addr_set(struct mvneta_port *pp,
+ const unsigned char *addr, int queue)
{
unsigned int mac_h;
unsigned int mac_l;
@@ -1914,7 +1914,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
}
/* Handle tx checksum */
-static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
@@ -2595,8 +2595,7 @@ err_drop_frame:
}
static inline void
-mvneta_tso_put_hdr(struct sk_buff *skb,
- struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
@@ -2604,7 +2603,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb,
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
- tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command = mvneta_skb_tx_csum(skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
@@ -2681,7 +2680,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- mvneta_tso_put_hdr(skb, pp, txq);
+ mvneta_tso_put_hdr(skb, txq);
while (data_left > 0) {
int size;
@@ -2799,7 +2798,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_cmd = mvneta_skb_tx_csum(pp, skb);
+ tx_cmd = mvneta_skb_tx_csum(skb);
tx_desc->data_size = skb_headlen(skb);
@@ -3824,8 +3823,6 @@ static void mvneta_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
@@ -3833,16 +3830,9 @@ static void mvneta_validate(struct phylink_config *config,
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
*/
- if (phy_interface_mode_is_8023z(state->interface)) {
- if (!phylink_test(state->advertising, Autoneg)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
- }
- } else if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_QSGMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg)) {
+ linkmode_zero(supported);
return;
}
@@ -3854,11 +3844,12 @@ static void mvneta_validate(struct phylink_config *config,
phylink_set(mask, Pause);
/* Half-duplex at speeds higher than 100Mbit is unsupported */
- if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
}
- if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
@@ -3871,15 +3862,8 @@ static void mvneta_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void mvneta_mac_pcs_get_state(struct phylink_config *config,
@@ -5182,6 +5166,31 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ pp->phylink_config.supported_interfaces);
+ if (comphy) {
+ /* If a COMPHY is present, we can support any of the serdes
+ * modes and switch between them.
+ */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pp->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* No COMPHY, with only 2500BASE-X mode supported */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pp->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ /* No COMPHY, we can switch between 1000BASE-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pp->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
phy_mode, &mvneta_phylink_ops);
@@ -5242,14 +5251,14 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_ports;
}
- err = of_get_mac_address(dn, dev->dev_addr);
+ err = of_get_ethdev_address(dn, dev);
if (!err) {
mac_from = "device tree";
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
- memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, hw_mac_addr);
} else {
mac_from = "random";
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d5c92e43f89e..587def69a6f7 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6081,9 +6081,9 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
- if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
- ether_addr_copy(dev->dev_addr, fw_mac_addr);
+ eth_hw_addr_set(dev, fw_mac_addr);
return;
}
@@ -6091,7 +6091,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
- ether_addr_copy(dev->dev_addr, hw_mac_addr);
+ eth_hw_addr_set(dev, hw_mac_addr);
return;
}
}
@@ -6261,32 +6261,13 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- /* Invalid combinations */
- switch (state->interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
- if (!mvpp2_port_supports_xlg(port))
- goto empty_set;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!mvpp2_port_supports_rgmii(port))
- goto empty_set;
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* When in 802.3z mode, we must have AN enabled:
- * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
- */
- if (!phylink_test(state->advertising, Autoneg))
- goto empty_set;
- break;
- default:
- break;
- }
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ goto empty_set;
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
@@ -6299,19 +6280,12 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_NA:
if (mvpp2_port_supports_xlg(port)) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
}
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
+ break;
+
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -6323,35 +6297,28 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
+ break;
+
case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ break;
+
case PHY_INTERFACE_MODE_2500BASEX:
- if (port->comphy ||
- state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (port->comphy ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
break;
+
default:
goto empty_set;
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- phylink_helper_basex_speed(state);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
empty_set:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
}
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
@@ -6943,6 +6910,40 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
+ if (mvpp2_port_supports_xlg(port)) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (mvpp2_port_supports_rgmii(port))
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+
+ if (comphy) {
+ /* If a COMPHY is present, we can support any of the
+ * serdes modes and switch between them.
+ */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* No COMPHY, with only 2500BASE-X mode supported */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ /* No COMPHY, we can switch between 1000BASE-X and SGMII
+ */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ }
+
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 93575800ca92..75ba57bd1d46 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -2347,7 +2347,7 @@ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
return err;
/* Set addr in the device */
- ether_addr_copy(dev->dev_addr, da);
+ eth_hw_addr_set(dev, da);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 34a089b71e55..186d00a9ab35 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
if (!cgx)
return;
- if (is_dev_rpm(cgx))
- return;
-
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
@@ -1522,7 +1519,6 @@ static int cgx_lmac_exit(struct cgx *cgx)
int i;
if (cgx->cgx_cmd_workq) {
- flush_workqueue(cgx->cgx_cmd_workq);
destroy_workqueue(cgx->cgx_cmd_workq);
cgx->cgx_cmd_workq = NULL;
}
@@ -1545,9 +1541,11 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
if (is_dev_rpm(cgx))
- cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
else
- cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
static struct mac_ops cgx_mac_ops = {
@@ -1571,6 +1569,7 @@ static struct mac_ops cgx_mac_ops = {
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d9bea13f15b8..8931864ee110 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -191,6 +191,7 @@ enum nix_scheduler {
#define NIX_CHAN_SDP_CH_START (0x700ull)
#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
/* The mask is to extract lower 10-bits of channel number
* which CPT will pass to X2P.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index c38306b3384a..fc6e7423cbd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -102,6 +102,11 @@ struct mac_ops {
void (*mac_pause_frm_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 154877706a0e..4e79e918a161 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -84,7 +84,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0009)
+#define OTX2_MBOX_VERSION (0x000a)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
-M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
-M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
-M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
-M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
-M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
- cgx_set_link_mode_rsp) \
-M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
- cgx_features_info_msg) \
-M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
- cgx_mac_addr_add_rsp) \
-M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
msg_rsp) \
-M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
cgx_max_dmac_entries_get_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
-M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
@@ -186,9 +186,12 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -229,6 +232,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
@@ -270,6 +275,10 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
@@ -284,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
#undef M
};
@@ -575,10 +588,13 @@ struct cgx_mac_addr_update_req {
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
-#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
-#define RVU_MAC_VERSION BIT_ULL(2)
-#define RVU_MAC_CGX BIT_ULL(3)
-#define RVU_MAC_RPM BIT_ULL(4)
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
struct cgx_features_info_msg {
struct mbox_msghdr hdr;
@@ -593,6 +609,22 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -698,6 +730,8 @@ enum nix_af_status {
NIX_AF_ERR_INVALID_BANDPROF = -426,
NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
};
/* For NIX RX vtag action */
@@ -1065,6 +1099,40 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
struct nix_hw_info {
struct mbox_msghdr hdr;
u16 rsvs16;
@@ -1357,12 +1425,15 @@ struct npc_mcam_get_stats_rsp {
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
+ u64 thresh;
};
struct ptp_rsp {
@@ -1399,7 +1470,9 @@ enum cpt_af_status {
CPT_AF_ERR_LF_INVALID = -903,
CPT_AF_ERR_ACCESS_DENIED = -904,
CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
- CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
};
/* CPT mbox message formats */
@@ -1420,6 +1493,22 @@ struct cpt_lf_alloc_req_msg {
int blkaddr;
};
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+/* Mailbox message request format for CPT IPsec
+ * inline inbound and outbound configuration.
+ */
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
/* Mailbox message request and response format for CPT stats. */
struct cpt_sts_req {
struct mbox_msghdr hdr;
@@ -1478,6 +1567,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3a819b24accc..77fd39e2c8db 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -8,6 +8,8 @@
#ifndef NPC_H
#define NPC_H
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -25,15 +27,12 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
NPC_LT_LA_IH_NIX_ETHER,
- NPC_LT_LA_IH_8_ETHER,
- NPC_LT_LA_IH_4_ETHER,
- NPC_LT_LA_IH_2_ETHER,
- NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER = 7,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_LT_LA_CH_LEN_90B_ETHER,
NPC_LT_LA_CPT_HDR,
NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -148,10 +147,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
@@ -162,6 +162,10 @@ enum npc_pkind_type {
NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+};
+
/* list of known and supported fields in packet header and
* fields present in key structure.
*/
@@ -549,7 +553,7 @@ struct npc_kpu_profile_fwdata {
#define KPU_SIGN 0x00666f727075706b
#define KPU_NAME_LEN 32
/** Maximum number of custom KPU entries supported by the built-in profile. */
-#define KPU_MAX_CST_ENT 2
+#define KPU_MAX_CST_ENT 6
/* KPU Profle Header */
__le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
u8 name[KPU_NAME_LEN]; /* KPU Profile name */
@@ -589,6 +593,8 @@ struct rvu_npc_mcam_rule {
u8 default_rule;
bool enable;
bool vfvlan_cfg;
+ u16 chan;
+ u16 chan_mask;
};
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 588822a0cf21..0fe7ad35e36f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -8,7 +8,7 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100060000
+#define NPC_KPU_PROFILE_VER 0x0000000100070000
#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
@@ -176,18 +176,18 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
- NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
NPC_S_KPU1_CPT_HDR,
- NPC_S_KPU1_CUSTOM_L2_24B,
NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_NGIO,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -979,8 +979,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
0,
0, 0, 0, 0,
@@ -996,27 +996,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 36, 40, 44, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 40, 54, 58, 0, 0,
- NPC_S_KPU1_CPT_HDR, 0, 0,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 102, 106, 110, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
0,
0, 0, 0, 0,
@@ -1062,6 +1062,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1379,33 +1383,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W | NPC_IH_UTAG,
- NPC_IH_W | NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- NPC_IH_W,
- NPC_IH_W | NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- 0x0000,
- NPC_IH_W | NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1711,7 +1688,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP,
0xffff,
0x0000,
@@ -1720,7 +1697,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP6,
0xffff,
0x0000,
@@ -1729,7 +1706,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ARP,
0xffff,
0x0000,
@@ -1738,7 +1715,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_RARP,
0xffff,
0x0000,
@@ -1747,7 +1724,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_PTP,
0xffff,
0x0000,
@@ -1756,7 +1733,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_FCOE,
0xffff,
0x0000,
@@ -1765,7 +1742,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
NPC_ETYPE_CTAG,
@@ -1774,7 +1751,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1783,7 +1760,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
0x0000,
@@ -1792,7 +1769,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -1801,7 +1778,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ETAG,
0xffff,
0x0000,
@@ -1810,7 +1787,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1819,7 +1796,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
0x0000,
@@ -1828,7 +1805,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1837,7 +1814,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1847,150 +1824,24 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
NPC_ETYPE_IP,
0xffff,
0x0000,
0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
0x0000,
0x0000,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
NPC_ETYPE_IP6,
0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
- 0xffff,
0x0000,
0x0000,
- NPC_ETYPE_CTAG,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
0x0000,
- 0x0000,
- NPC_ETYPE_QINQ,
- 0xffff,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1999,16 +1850,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_S_KPU1_CPT_HDR, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -2017,51 +1859,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ETAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU1_VLAN_EXDSA, 0xff,
NPC_ETYPE_CTAG,
0xffff,
@@ -2084,6 +1881,10 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -2805,114 +2606,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_EXDSA, 0xff,
NPC_DSA_EDSA,
NPC_DSA_EDSA,
@@ -3066,6 +2759,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3079,6 +2808,10 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -4056,6 +3789,10 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4367,6 +4104,10 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -5362,6 +5103,10 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -6033,6 +5778,10 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -6353,6 +6102,10 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -7096,6 +6849,10 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -7496,15 +7253,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU9_GTPU, 0xff,
- 0x0000,
- 0x0000,
NPC_GTP_PT_GTP | NPC_GTP_VER1,
NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
@@ -7569,6 +7317,10 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -7736,6 +7488,10 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -8047,6 +7803,10 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -8304,6 +8064,10 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -8318,6 +8082,10 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -8332,6 +8100,10 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -8535,6 +8307,10 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -8594,6 +8370,10 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
static struct npc_kpu_profile_action kpu1_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -8880,30 +8660,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 8, 1,
- NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 4, 1,
- NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 2, 1,
- NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -9192,159 +8948,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_S_KPU5_CPT_IP, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9352,7 +9076,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9360,7 +9084,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 58, 1,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
@@ -9368,141 +9092,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 58, 1,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 0, 0, 1, 0,
NPC_S_KPU3_VLAN_EXDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -9522,6 +9118,10 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
static struct npc_kpu_profile_action kpu2_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -10165,102 +9765,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_PTP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_FCOE, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_CTAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 20, 0, 0,
- NPC_S_KPU3_STAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_QINQ_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 1, 0,
- NPC_S_KPU4_NSH, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -10395,6 +9899,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10407,6 +9943,10 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
static struct npc_kpu_profile_action kpu3_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -11276,6 +10816,10 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
static struct npc_kpu_profile_action kpu4_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -11553,6 +11097,10 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
static struct npc_kpu_profile_action kpu5_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -12438,6 +11986,10 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
static struct npc_kpu_profile_action kpu6_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13035,6 +12587,10 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
static struct npc_kpu_profile_action kpu7_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13320,6 +12876,10 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
static struct npc_kpu_profile_action kpu8_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13981,6 +13541,10 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
static struct npc_kpu_profile_action kpu9_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -14335,16 +13899,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_G_PDU,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
+ 8, 0, 6, 2, 1,
+ NPC_S_NA, 0, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
0,
0, 0, 0, 0,
@@ -14402,6 +13958,10 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
static struct npc_kpu_profile_action kpu10_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -14551,6 +14111,10 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = {
static struct npc_kpu_profile_action kpu11_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -14828,6 +14392,10 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = {
static struct npc_kpu_profile_action kpu12_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -15057,6 +14625,10 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = {
static struct npc_kpu_profile_action kpu13_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -15070,6 +14642,10 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = {
static struct npc_kpu_profile_action kpu14_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -15083,6 +14659,10 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = {
static struct npc_kpu_profile_action kpu15_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -15264,6 +14844,10 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = {
static struct npc_kpu_profile_action kpu16_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 9b8e59f4c206..d6321de3cc17 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -27,54 +27,29 @@
#define PCI_DEVID_CN10K_PTP 0xA09E
#define PCI_PTP_BAR_NO 0
-#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
-
-#define RST_BOOT 0x1600ULL
-#define RST_MUL_BITS GENMASK_ULL(38, 33)
-#define CLOCK_BASE_RATE 50000000ULL
+#define PTP_TIMESTAMP 0xF20ULL
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
-static u64 get_clock_rate(void)
-{
- u64 cfg, ret = CLOCK_BASE_RATE * 16;
- struct pci_dev *pdev;
- void __iomem *base;
-
- /* To get the input clock frequency with which PTP co-processor
- * block is running the base frequency(50 MHz) needs to be multiplied
- * with multiplier bits present in RST_BOOT register of RESET block.
- * Hence below code gets the multiplier bits from the RESET PCI
- * device present in the system.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RST, NULL);
- if (!pdev)
- goto error;
-
- base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
- if (!base)
- goto error_put_pdev;
-
- cfg = readq(base + RST_BOOT);
- ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
-
- iounmap(base);
-
-error_put_pdev:
- pci_dev_put(pdev);
-
-error:
- return ret;
-}
-
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk)
return 0;
}
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
- u64 clock_comp;
- u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
@@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev,
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
- ptp->clock_rate = get_clock_rate();
-
- /* Enable PTP clock */
- clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
- clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
-
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- /* Initial compensation value to start the nanosecs counter */
- writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
-
pci_set_drvdata(pdev, ptp);
if (!first_ptp_block)
first_ptp_block = ptp;
@@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_GET_CLOCK:
err = ptp_get_clock(rvu->ptp, &rsp->clk);
break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 76d404b24552..1b81a0493cd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -20,6 +20,7 @@ struct ptp {
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 07b0eafccad8..e695fa0e82a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
return 0;
}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable)
+ cfg |= RPMX_RX_TS_PREPEND;
+ else
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index f0b069442dcc..57c8a687b488 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -14,6 +14,8 @@
#define PCI_DEVID_CN10K_RPM 0xA060
/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause);
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 35836903b7fb..cb56e171ddd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -854,6 +854,7 @@ static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = NIX_PRIV_LFX_CFG;
block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NIX%d", blkid);
rvu->nix_blkaddr[blkid] = blkaddr;
return rvu_alloc_bitmap(&block->lf);
@@ -883,6 +884,7 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = CPT_PRIV_LFX_CFG;
block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "CPT%d", blkid);
return rvu_alloc_bitmap(&block->lf);
}
@@ -940,6 +942,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -979,6 +982,7 @@ nix:
block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1003,6 +1007,7 @@ ssow:
block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1028,6 +1033,7 @@ tim:
block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1287,6 +1293,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
return (val & 0xFFF);
}
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -2345,7 +2405,6 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
int devid;
if (mw->mbox_wq) {
- flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
@@ -2473,7 +2532,8 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
else if ((block->addr == BLKADDR_CPT0) ||
(block->addr == BLKADDR_CPT1))
- rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
@@ -2671,6 +2731,8 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_cpt_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2880,6 +2942,11 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
return 0;
fail:
@@ -2890,7 +2957,6 @@ fail:
static void rvu_flr_wq_destroy(struct rvu *rvu)
{
if (rvu->flr_wq) {
- flush_workqueue(rvu->flr_wq);
destroy_workqueue(rvu->flr_wq);
rvu->flr_wq = NULL;
}
@@ -3186,6 +3252,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&rvu->rswitch.switch_lock);
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 1d9411232f1d..66e45d733824 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -101,6 +101,7 @@ struct rvu_block {
u64 msixcfg_reg;
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
};
struct nix_mcast {
@@ -220,6 +221,7 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
@@ -237,6 +239,7 @@ struct rvu_pfvf {
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+ int intf_mode;
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
@@ -394,7 +397,9 @@ struct rvu_fwdata {
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
-#define FWDATA_RESERVED_MEM 1023
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 5
#define CGX_LMACS_MAX 4
@@ -656,6 +661,8 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
/* RVU HW reg validation */
enum regmap_block {
@@ -794,6 +801,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
@@ -805,7 +813,11 @@ bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
/* CPT APIs */
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
@@ -827,4 +839,7 @@ void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 81e8ea9ee30e..2ca182a4ce82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -324,7 +324,6 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
- flush_workqueue(rvu->cgx_evh_wq);
destroy_workqueue(rvu->cgx_evh_wq);
rvu->cgx_evh_wq = NULL;
}
@@ -411,7 +410,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -694,7 +693,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -711,13 +712,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
- cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
return 0;
}
@@ -725,6 +729,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ return -EPERM;
+
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 46a41cfff575..7dbbc115cde4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -334,8 +334,8 @@ int rvu_set_channels_base(struct rvu *rvu)
/* Out of 4096 channels start CPT from 2048 so
* that MSB for CPT channels is always set
*/
- if (cpt_chan_base <= 0x800) {
- hw->cpt_chan_base = 0x800;
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
} else {
dev_err(rvu->dev,
"CPT channels could not fit in the range 2048-4095\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 1f90a7403392..45357deecabb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -37,6 +37,236 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ char irq_name[16];
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
static int get_cpt_pf_num(struct rvu *rvu)
{
int i, domain_nr, cpt_pf_num = -1;
@@ -147,9 +377,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
- /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- val = (u64)req->nix_pf_func << 48 |
- (u64)req->sso_pf_func << 32;
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
@@ -159,7 +393,7 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
- int num_lfs, cptlf, slot;
+ int num_lfs, cptlf, slot, err;
struct rvu_block *block;
block = &rvu->hw->block[blkaddr];
@@ -173,10 +407,15 @@ static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
- /* Reset CPT LF group and priority */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0);
- /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0);
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
}
return 0;
@@ -197,6 +436,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
return ret;
}
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
{
u64 offset = req->reg_offset;
@@ -421,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+}
+
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
@@ -485,14 +911,12 @@ static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
}
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
{
- int blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
- if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
- return -EINVAL;
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
@@ -509,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
return 0;
}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 9338765da048..c7fd466a0efd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -95,7 +95,7 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
- [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
@@ -125,7 +125,7 @@ static char *rpm_rx_stats_fields[] = {
"Total frames received on interface",
"Packets received with an octet count < 64",
"Packets received with an octet count == 64",
- "Packets received with an octet count of 65–127",
+ "Packets received with an octet count of 65-127",
"Packets received with an octet count of 128-255",
"Packets received with an octet count of 256-511",
"Packets received with an octet count of 512-1023",
@@ -164,7 +164,7 @@ static char *rpm_tx_stats_fields[] = {
"Packets sent to the multicast DMAC",
"Packets sent to a broadcast DMAC",
"Packets sent with an octet count == 64",
- "Packets sent with an octet count of 65–127",
+ "Packets sent with an octet count of 65-127",
"Packets sent with an octet count of 128-255",
"Packets sent with an octet count of 256-511",
"Packets sent with an octet count of 512-1023",
@@ -226,18 +226,175 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int index = 0, off = 0;
+ int bytes_not_copied;
+ int buf_size = 10240;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
+static void get_lf_str_list(struct rvu_block block, int pcifunc,
+ char *lfs)
+{
+ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+
+ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
+ if (lf >= block.lf.max)
+ break;
+
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+
+ if (lf == prev_lf + 1) {
+ prev_lf = lf;
+ seq = 1;
+ continue;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
+ else
+ len += (len ? sprintf(lfs + len, ",%d", lf) :
+ sprintf(lfs + len, "%d", lf));
+
+ prev_lf = lf;
+ seq = 0;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d", prev_lf);
+
+ lfs[len] = '\0';
+}
+
+static int get_max_column_width(struct rvu *rvu)
+{
+ int index, pf, vf, lf_str_size = 12, buf_size = 256;
+ struct rvu_block block;
+ u16 pcifunc;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+
+ get_lf_str_list(block, pcifunc, buf);
+ if (lf_str_size <= strlen(buf))
+ lf_str_size = strlen(buf) + 1;
+ }
+ }
+ }
+
+ kfree(buf);
+ return lf_str_size;
+}
+
/* Dumps current provisioning status of all RVU block LFs */
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
- int index, off = 0, flag = 0, go_back = 0, len = 0;
+ int index, off = 0, flag = 0, len = 0, i = 0;
struct rvu *rvu = filp->private_data;
- int lf, pf, vf, pcifunc;
+ int bytes_not_copied = 0;
struct rvu_block block;
- int bytes_not_copied;
- int lf_str_size = 12;
+ int pf, vf, pcifunc;
int buf_size = 2048;
+ int lf_str_size;
char *lfs;
char *buf;
@@ -249,6 +406,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
if (!buf)
return -ENOSPC;
+ /* Get the maximum width of a column */
+ lf_str_size = get_max_column_width(rvu);
+
lfs = kzalloc(lf_str_size, GFP_KERNEL);
if (!lfs) {
kfree(buf);
@@ -262,65 +422,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
"%-*s", lf_str_size,
rvu->hw->block[index].name);
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ off = 0;
+ flag = 0;
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
if (vf) {
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
} else {
sprintf(lfs, "PF%d", pf);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
}
- off += go_back;
- for (index = 0; index < BLKTYPE_MAX; index++) {
+ for (index = 0; index < BLK_COUNT; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
len = 0;
lfs[len] = '\0';
- for (lf = 0; lf < block.lf.max; lf++) {
- if (block.fn_map[lf] != pcifunc)
- continue;
+ get_lf_str_list(block, pcifunc, lfs);
+ if (strlen(lfs))
flag = 1;
- len += sprintf(&lfs[len], "%d,", lf);
- }
- if (flag)
- len--;
- lfs[len] = '\0';
off += scnprintf(&buf[off], buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
- if (!strlen(lfs))
- go_back += lf_str_size;
}
- if (!flag)
- off -= go_back;
- else
- flag = 0;
- off--;
- off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ if (flag) {
+ off += scnprintf(&buf[off],
+ buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer +
+ (i * off),
+ buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ }
}
}
- bytes_not_copied = copy_to_user(buffer, buf, off);
+out:
kfree(lfs);
kfree(buf);
-
if (bytes_not_copied)
return -EFAULT;
- *ppos = off;
- return off;
+ return *ppos;
}
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
@@ -504,7 +668,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
@@ -1719,6 +1883,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
u16 pcifunc;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
@@ -1768,6 +1936,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
int layer;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
seq_puts(m, "\nBandwidth profile resource free count\n");
seq_puts(m, "=====================================\n");
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
@@ -1878,7 +2050,7 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return -ENODEV;
mac_ops = get_mac_ops(cgxd);
-
+ /* There can be no CGX devices at all */
if (!mac_ops)
return 0;
@@ -1956,13 +2128,13 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (err)
return err;
- if (is_rvu_otx2(rvu))
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
- tx_stat);
- else
- seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
- tx_stat);
- stat++;
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
}
return err;
@@ -2400,6 +2572,8 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
seq_printf(s, "VF%d", vf);
}
seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
}
rvu_dbg_npc_mcam_show_action(s, iter);
@@ -2672,6 +2846,10 @@ void rvu_dbg_init(struct rvu *rvu)
debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
&rvu_dbg_rsrc_status_fops);
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 274d3abe30eb..70bacd38a6d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1510,13 +1510,6 @@ int rvu_register_dl(struct rvu *rvu)
return -ENOMEM;
}
- err = devlink_register(dl);
- if (err) {
- dev_err(rvu->dev, "devlink register failed with error %d\n", err);
- devlink_free(dl);
- return err;
- }
-
rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
@@ -1537,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl_health:
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -1553,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu)
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
struct devlink *dl = rvu_dl->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
devlink_params_unregister(dl, rvu_af_dl_params,
ARRAY_SIZE(rvu_af_dl_params));
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 9ef4e942e31e..d8b1948aaa0a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -28,6 +28,7 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
+static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -1061,10 +1062,68 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
+ int err, retries = 5;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
@@ -1075,7 +1134,24 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
}
static const char *nix_get_ctx_name(int ctype)
@@ -2507,6 +2583,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
return;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);
@@ -4436,10 +4515,17 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
@@ -4476,9 +4562,33 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -4579,6 +4689,119 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ val = 0;
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val |= BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_lf_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 5efb4174e82d..bb6b42bbefa4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
struct msg_req *req,
struct npc_mcam_read_base_rule_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 51ddc7b81d0b..ff2b21999f36 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1119,6 +1119,9 @@ find_rule:
rule->default_rule = req->default_rule;
rule->owner = owner;
rule->enable = enable;
+ rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ rule->chan &= rule->chan_mask;
if (is_npc_intf_tx(req->intf))
rule->intf = pfvf->nix_tx_intf;
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 21f1ed4e222f..22cd751613cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -236,6 +236,8 @@
#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -525,6 +527,7 @@
#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
@@ -542,6 +545,7 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
#define NPC_AF_BLK_RST (0x00040)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac96693f04..edc9367b1b95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -62,6 +62,24 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+
/* NPA Admin function Interrupt Vector Enumeration */
enum npa_af_int_vec_e {
NPA_AF_INT_VEC_RVU = 0x0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b92c267628b8..0048b5946712 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -3,11 +3,11 @@
# Makefile for Marvell's RVU Ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 95f21dfdba48..fd4f083c699e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 78df173e6df2..66da31f30d3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -188,7 +188,7 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
- ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ eth_hw_addr_set(netdev, rsp->mac_addr);
mutex_unlock(&pfvf->mbox.lock);
return 0;
@@ -203,7 +203,7 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* update dmac field in vlan offload rule */
if (netif_running(netdev) &&
pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
@@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->max_frs;
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
u64 schq, parent;
u64 dwrr_val;
- dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
- | OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -718,7 +716,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
@@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -835,17 +833,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
- err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
- TSO_HEADER_SIZE);
- if (err)
- return err;
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;
- if (pfvf->ptp) {
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
@@ -871,20 +871,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
- int err, pool_id;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- } else {
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ } else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;
@@ -991,7 +998,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -1006,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err;
}
+ pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
+ NIX_LF_CQ_OP_STATUS);
+
/* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
@@ -1035,7 +1045,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
@@ -1073,7 +1083,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@@ -1285,7 +1295,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1305,7 +1315,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index a51ecd771d07..61e52812983f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -171,6 +171,8 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
+ u16 xdp_queues;
+ u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
@@ -223,6 +225,7 @@ struct otx2_hw {
#define HW_TSO 0
#define CN10K_MBOX 1
#define CN10K_LMTST 2
+#define CN10K_RPM 3
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -263,6 +266,12 @@ struct otx2_ptp {
struct cyclecounter cycle_counter;
struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -317,7 +326,7 @@ struct otx2_nic {
struct net_device *netdev;
struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
@@ -336,7 +345,9 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
+ u64 *cq_op_addr;
+ struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
@@ -452,6 +463,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
if (!is_dev_otx2(pfvf->pdev)) {
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
+ __set_bit(CN10K_RPM, &hw->cap_flag);
}
}
@@ -825,6 +837,9 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
@@ -845,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 7ac3ef2fa06a..777a27047c8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -108,13 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf)
return -ENOMEM;
}
- err = devlink_register(dl);
- if (err) {
- dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
- devlink_free(dl);
- return err;
- }
-
otx2_dl = devlink_priv(dl);
otx2_dl->dl = dl;
otx2_dl->pfvf = pfvf;
@@ -128,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf)
goto err_dl;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl:
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -141,16 +132,10 @@ err_dl:
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
struct otx2_devlink *otx2_dl = pfvf->dl;
- struct devlink *dl;
-
- if (!otx2_dl || !otx2_dl->dl)
- return;
-
- dl = otx2_dl->dl;
+ struct devlink *dl = otx2_dl->dl;
+ devlink_unregister(dl);
devlink_params_unregister(dl, otx2_dl_params,
ARRAY_SIZE(otx2_dl_params));
-
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index dbfa3bc39e34..80d4ce61f442 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -121,14 +121,16 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
otx2_get_qset_strings(pfvf, &data, 0);
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
}
strcpy(data, "reset_count");
@@ -205,11 +207,15 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
[otx2_drv_stats[stat].index]);
otx2_get_qset_stats(pfvf, stats, &data);
- otx2_update_lmac_stats(pfvf);
- for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_rx_stats[stat];
- for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_tx_stats[stat];
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ }
+
*(data++) = pfvf->reset_count;
fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
@@ -242,18 +248,19 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
static int otx2_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int qstats_count;
+ int qstats_count, mac_stats = 0;
if (sset != ETH_SS_STATS)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
- + 1;
+ mac_stats + OTX2_FEC_STATS_CNT + 1;
}
/* Get no of queues device supports and current queue count */
@@ -1168,9 +1175,8 @@ static int otx2_set_link_ksettings(struct net_device *netdev,
otx2_get_link_ksettings(netdev, &cur_ks);
/* Check requested modes against supported modes by hardware */
- if (!bitmap_subset(cmd->link_modes.advertising,
- cur_ks.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
+ if (!linkmode_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported))
return -EINVAL;
mutex_lock(&mbox->lock);
@@ -1340,6 +1346,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 53df7fff92c4..1e0d0c9c1dac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
+ if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
if (if_up)
otx2_stop(netdev);
@@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
@@ -1304,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
* NIX transfers entire data using 6 segments/buffers and writes
* a CQE_RX descriptor with those segment addresses. First segment
* has additional data prepended to packet. Also software omits a
- * headroom of 128 bytes and sizeof(struct skb_shared_info) in
- * each segment. Hence the total size of memory needed
- * to receive a packet with 'mtu' is:
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
* frame size = mtu + additional data;
- * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * memory = frame_size + headroom * 6;
* each receive buffer size = memory / 6;
*/
frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
- total_size = frame_size + (OTX2_HEAD_ROOM +
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
rbuf_size = total_size / 6;
return ALIGN(rbuf_size, 2048);
@@ -1332,10 +1338,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tx_queues;
+ hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
@@ -1493,6 +1500,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+{
+ struct net_device *netdev = pf->netdev;
+ struct nix_rx_mode *req;
+ bool promisc = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (promisc)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1503,7 +1548,7 @@ int otx2_open(struct net_device *netdev)
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
@@ -1523,7 +1568,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tx_queues,
+ qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
@@ -1544,11 +1589,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -1646,6 +1700,8 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_tx_stop_queues;
+ otx2_do_set_rx_mode(pf);
+
return 0;
err_tx_stop_queues:
@@ -1750,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1791,43 +1847,11 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
{
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
- struct net_device *netdev = pf->netdev;
- struct nix_rx_mode *req;
- bool promisc = false;
-
- if (!(netdev->flags & IFF_UP))
- return;
-
- if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
- promisc = true;
- }
-
- /* Write unicast address to mcam entries or del from mcam */
- if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
- __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
-
- mutex_lock(&pf->mbox.lock);
- req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
- if (!req) {
- mutex_unlock(&pf->mbox.lock);
- return;
- }
-
- req->mode = NIX_RX_MODE_UCAST;
-
- if (promisc)
- req->mode |= NIX_RX_MODE_PROMISC;
- if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
- req->mode |= NIX_RX_MODE_ALLMULTI;
- req->mode |= NIX_RX_MODE_USE_MCE;
-
- otx2_sync_mbox_msg(&pf->mbox);
- mutex_unlock(&pf->mbox.lock);
+ otx2_do_set_rx_mode(pf);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1967,7 +1991,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -2023,8 +2047,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
-static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
@@ -2039,6 +2064,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_ioctl);
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
{
@@ -2281,6 +2307,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ struct net_device *dev = pf->netdev;
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MAX_XDP_MTU) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog)
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog)
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ else
+ pf->hw.xdp_queues = 0;
+
+ pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return 0;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
int req_perm)
{
@@ -2348,6 +2479,8 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
@@ -2358,7 +2491,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
INIT_WORK(&pf->reset_task, otx2_reset_task);
return 0;
}
@@ -2489,6 +2622,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
num_vec = pci_msix_vec_count(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index ec9e49985c2c..0ef68fdd1f26 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
return rsp->clk;
}
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_TSTMP;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
return 0;
}
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ ptp->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&ptp->extts_work);
+ return 0;
+ default:
+ break;
+ }
return -EOPNOTSUPP;
}
@@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
struct ptp_req *req;
int err;
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
@@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
+
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
- .n_ext_ts = 0,
- .n_pins = 0,
+ .n_ext_ts = 1,
+ .n_pins = 1,
.pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
};
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
@@ -176,6 +297,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
error:
return err;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
@@ -188,6 +310,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
kfree(ptp);
pfvf->ptp = NULL;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
@@ -196,6 +319,7 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf)
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
{
@@ -206,3 +330,8 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
return 0;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time);
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f42b1d4e0c67..0cc6353254bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -8,6 +8,8 @@
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -17,6 +19,35 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq);
+
+static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
@@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
sg->num_segs = 0;
}
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
@@ -132,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -154,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -285,6 +346,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
return;
}
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ return;
+
skb = napi_get_frags(napi);
if (unlikely(!skb))
return;
@@ -296,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
seg_addr = &sg->seg_addr;
seg_size = (void *)sg;
for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
- otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
- parse);
- cq->pool_ptrs++;
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
}
start += sizeof(*sg);
}
@@ -318,7 +383,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) {
@@ -334,17 +406,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (unlikely(!cq->pool_ptrs))
- return 0;
- /* Refill pool with new buffers */
- pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
-
return processed_cqe;
}
@@ -364,22 +432,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
- int tx_pkts = 0, tx_bytes = 0;
+ int tx_pkts = 0, tx_bytes = 0, qidx;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
if (!processed_cqe)
return 0;
break;
}
- otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
-
+ if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
+ cqe);
+ } else {
+ otx2_snd_pkt_handler(pfvf, cq,
+ &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+ }
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
@@ -402,6 +484,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
+ struct otx2_cq_queue *rx_cq = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
@@ -412,17 +495,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
+ rx_cq = cq;
workdone += otx2_rx_napi_handler(pfvf, napi,
cq, budget);
} else {
@@ -430,6 +509,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
}
+ if (rx_cq && rx_cq->pool_ptrs)
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -936,10 +1017,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0;
u64 iova, pa;
- while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
- if (!cqe->sg.subdc)
- continue;
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_unreg(&cq->xdp_rxq);
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
@@ -965,7 +1055,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx];
- while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
@@ -973,7 +1072,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -1001,3 +1099,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq)
+{
+ unsigned char *hard_start, *data;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
+
+ data = (unsigned char *)phys_to_virt(pa);
+ hard_start = page_address(page);
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ cqe->sg.seg_size, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err)
+ return true;
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3ff1ad79c001..f1a04cf9210c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -11,6 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/iommu.h>
#include <linux/if_vlan.h>
+#include <net/xdp.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -25,6 +26,8 @@
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
+#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN)
+
/* Rx buffer size should be in multiples of 128bytes */
#define RCV_FRAG_LEN1(x) \
((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
@@ -36,9 +39,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.
@@ -56,6 +57,9 @@
*/
#define CQ_QCOUNT_DEFAULT 1
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -96,7 +100,8 @@ struct otx2_snd_queue {
enum cq_type {
CQ_RX,
CQ_TX,
- CQS_PER_CINT = 2, /* RQ + SQ */
+ CQ_XDP,
+ CQS_PER_CINT = 3, /* RQ + SQ + XDP */
};
struct otx2_cq_poll {
@@ -122,9 +127,12 @@ struct otx2_cq_queue {
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct otx2_qset {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 03b4ec630432..e6cb8cd0787d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -8,9 +8,11 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
#include "cn10k.h"
#define DRV_NAME "rvu_nicvf"
@@ -277,7 +279,6 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
struct mbox *mbox = &vf->mbox;
if (vf->mbox_wq) {
- flush_workqueue(vf->mbox_wq);
destroy_workqueue(vf->mbox_wq);
vf->mbox_wq = NULL;
}
@@ -500,6 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -583,6 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -640,6 +643,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
+
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index f18fe664b373..2a4c14c704c0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -53,6 +53,8 @@ struct prestera_port_stats {
u64 good_octets_sent;
};
+#define PRESTERA_AP_PORT_MAX (10)
+
struct prestera_port_caps {
u64 supp_link_modes;
u8 supp_fec;
@@ -69,6 +71,39 @@ struct prestera_lag {
struct prestera_flow_block;
+struct prestera_port_mac_state {
+ u32 mode;
+ u32 speed;
+ bool oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+};
+
+struct prestera_port_phy_state {
+ u64 lmode_bmap;
+ struct {
+ bool pause;
+ bool asym_pause;
+ } remote_fc;
+ u8 mdix;
+};
+
+struct prestera_port_mac_config {
+ u32 mode;
+ u32 speed;
+ bool admin;
+ u8 inband;
+ u8 duplex;
+ u8 fec;
+};
+
+struct prestera_port_phy_config {
+ u32 mode;
+ bool admin;
+ u8 mdix;
+};
+
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
@@ -91,6 +126,10 @@ struct prestera_port {
struct prestera_port_stats stats;
struct delayed_work caching_dw;
} cached_hw_stats;
+ struct prestera_port_mac_config cfg_mac;
+ struct prestera_port_phy_config cfg_phy;
+ struct prestera_port_mac_state state_mac;
+ struct prestera_port_phy_state state_phy;
};
struct prestera_device {
@@ -107,7 +146,7 @@ struct prestera_device {
int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
/* called by higher layer to send request to the firmware */
- int (*send_req)(struct prestera_device *dev, void *in_msg,
+ int (*send_req)(struct prestera_device *dev, int qid, void *in_msg,
size_t in_size, void *out_msg, size_t out_size,
unsigned int wait);
};
@@ -129,13 +168,28 @@ enum prestera_rxtx_event_id {
enum prestera_port_event_id {
PRESTERA_PORT_EVENT_UNSPEC,
- PRESTERA_PORT_EVENT_STATE_CHANGED,
+ PRESTERA_PORT_EVENT_MAC_STATE_CHANGED,
};
struct prestera_port_event {
u32 port_id;
union {
- u32 oper_state;
+ struct {
+ u32 mode;
+ u32 speed;
+ u8 oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+ } mac;
+ struct {
+ u64 lmode_bmap;
+ struct {
+ bool pause;
+ bool asym_pause;
+ } remote_fc;
+ u8 mdix;
+ } phy;
} data;
};
@@ -223,11 +277,16 @@ void prestera_device_unregister(struct prestera_device *dev);
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id);
-int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
- u64 adver_link_modes, u8 adver_fec);
+int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes);
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+int prestera_port_cfg_mac_read(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg);
+
+int prestera_port_cfg_mac_write(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg);
+
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 68b442eb6d69..06279cd6da67 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = {
},
};
-static void prestera_devlink_traps_fini(struct prestera_switch *sw);
-
static int prestera_drop_counter_get(struct devlink *devlink,
const struct devlink_trap *trap,
u64 *p_drops);
@@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink,
enum devlink_trap_action action,
struct netlink_ext_ack *extack);
-static int prestera_devlink_traps_register(struct prestera_switch *sw);
-
static const struct devlink_ops prestera_dl_ops = {
.info_get = prestera_dl_info_get,
.trap_init = prestera_trap_init,
@@ -407,38 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw)
devlink_free(dl);
}
-int prestera_devlink_register(struct prestera_switch *sw)
+void prestera_devlink_register(struct prestera_switch *sw)
{
struct devlink *dl = priv_to_devlink(sw);
- int err;
-
- err = devlink_register(dl);
- if (err) {
- dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
- return err;
- }
- err = prestera_devlink_traps_register(sw);
- if (err) {
- devlink_unregister(dl);
- dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n",
- err);
- return err;
- }
-
- return 0;
+ devlink_register(dl);
}
void prestera_devlink_unregister(struct prestera_switch *sw)
{
- struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
- prestera_devlink_traps_fini(sw);
devlink_unregister(dl);
-
- kfree(trap_data->trap_items_arr);
- kfree(trap_data);
}
int prestera_devlink_port_register(struct prestera_port *port)
@@ -486,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
return &port->dl_port;
}
-static int prestera_devlink_traps_register(struct prestera_switch *sw)
+int prestera_devlink_traps_register(struct prestera_switch *sw)
{
const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
@@ -625,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink,
cpu_code_type, p_drops);
}
-static void prestera_devlink_traps_fini(struct prestera_switch *sw)
+void prestera_devlink_traps_unregister(struct prestera_switch *sw)
{
+ struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
const struct devlink_trap *trap;
int i;
@@ -638,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw)
devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
ARRAY_SIZE(prestera_trap_groups_arr));
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index cc34c3db13a2..b322295bad3a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -9,7 +9,7 @@
struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev);
void prestera_devlink_free(struct prestera_switch *sw);
-int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_register(struct prestera_switch *sw);
void prestera_devlink_unregister(struct prestera_switch *sw);
int prestera_devlink_port_register(struct prestera_port *port);
@@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
void prestera_devlink_trap_report(struct prestera_port *port,
struct sk_buff *skb, u8 cpu_code);
+int prestera_devlink_traps_register(struct prestera_switch *sw);
+void prestera_devlink_traps_unregister(struct prestera_switch *sw);
#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 93a5e2baf808..6011454dba71 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -323,7 +323,6 @@ static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
{
u32 new_mode = PRESTERA_LINK_MODE_MAX;
u32 type, mode;
- int err;
for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
if (port_types[type].eth_type == ecmd->base.port &&
@@ -348,13 +347,8 @@ static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
}
}
- if (new_mode < PRESTERA_LINK_MODE_MAX)
- err = prestera_hw_port_link_mode_set(port, new_mode);
- else
- err = -EINVAL;
-
- if (err)
- return err;
+ if (new_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
port->caps.type = type;
port->autoneg = false;
@@ -434,27 +428,33 @@ static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
struct prestera_port *port)
{
+ struct prestera_port_phy_state *state = &port->state_phy;
bool asym_pause;
bool pause;
u64 bitmap;
int err;
- err = prestera_hw_port_remote_cap_get(port, &bitmap);
- if (!err) {
- prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
- bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+ err = prestera_hw_port_phy_mode_get(port, NULL, &state->lmode_bmap,
+ &state->remote_fc.pause,
+ &state->remote_fc.asym_pause);
+ if (err)
+ netdev_warn(port->dev, "Remote link caps get failed %d",
+ port->caps.transceiver);
- if (!bitmap_empty(ecmd->link_modes.lp_advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS)) {
- ethtool_link_ksettings_add_link_mode(ecmd,
- lp_advertising,
- Autoneg);
- }
+ bitmap = state->lmode_bmap;
+
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
}
- err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause);
- if (err)
- return;
+ pause = state->remote_fc.pause;
+ asym_pause = state->remote_fc.asym_pause;
if (pause)
ethtool_link_ksettings_add_link_mode(ecmd,
@@ -466,30 +466,46 @@ static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
Asym_Pause);
}
-static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd,
- struct prestera_port *port)
+static void prestera_port_link_mode_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
{
+ struct prestera_port_mac_state *state = &port->state_mac;
u32 speed;
+ u8 duplex;
int err;
- err = prestera_hw_port_speed_get(port, &speed);
- ecmd->base.speed = err ? SPEED_UNKNOWN : speed;
+ if (!port->state_mac.oper)
+ return;
+
+ if (state->speed == SPEED_UNKNOWN || state->duplex == DUPLEX_UNKNOWN) {
+ err = prestera_hw_port_mac_mode_get(port, NULL, &speed,
+ &duplex, NULL);
+ if (err) {
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ } else {
+ state->speed = speed;
+ state->duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ }
+
+ ecmd->base.speed = port->state_mac.speed;
+ ecmd->base.duplex = port->state_mac.duplex;
}
-static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd,
- struct prestera_port *port)
+static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
{
- u8 duplex;
- int err;
+ struct prestera_port_phy_state *state = &port->state_phy;
- err = prestera_hw_port_duplex_get(port, &duplex);
- if (err) {
- ecmd->base.duplex = DUPLEX_UNKNOWN;
- return;
+ if (prestera_hw_port_phy_mode_get(port, &state->mdix, NULL, NULL, NULL)) {
+ netdev_warn(port->dev, "MDIX params get failed");
+ state->mdix = ETH_TP_MDI_INVALID;
}
- ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
- DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->base.eth_tp_mdix = port->state_phy.mdix;
+ ecmd->base.eth_tp_mdix_ctrl = port->cfg_phy.mdix;
}
static int
@@ -501,6 +517,8 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
ethtool_link_ksettings_zero_link_mode(ecmd, supported);
ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
@@ -521,13 +539,8 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
prestera_port_supp_types_get(ecmd, port);
- if (netif_carrier_ok(dev)) {
- prestera_port_speed_get(ecmd, port);
- prestera_port_duplex_get(ecmd, port);
- } else {
- ecmd->base.speed = SPEED_UNKNOWN;
- ecmd->base.duplex = DUPLEX_UNKNOWN;
- }
+ if (netif_carrier_ok(dev))
+ prestera_port_link_mode_get(ecmd, port);
ecmd->base.port = prestera_port_type_get(port);
@@ -545,8 +558,7 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
- prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix,
- &ecmd->base.eth_tp_mdix_ctrl);
+ prestera_port_mdix_get(ecmd, port);
return 0;
}
@@ -555,12 +567,17 @@ static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
struct prestera_port *port)
{
if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
- port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
- port->caps.type == PRESTERA_PORT_TYPE_TP)
- return prestera_hw_port_mdix_set(port,
- ecmd->base.eth_tp_mdix_ctrl);
-
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ port->cfg_phy.mdix = ecmd->base.eth_tp_mdix_ctrl;
+ return prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
return 0;
+
}
static int prestera_port_link_mode_set(struct prestera_port *port,
@@ -568,12 +585,15 @@ static int prestera_port_link_mode_set(struct prestera_port *port,
{
u32 new_mode = PRESTERA_LINK_MODE_MAX;
u32 mode;
+ int err;
for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
- if (speed != port_link_modes[mode].speed)
+ if (speed != SPEED_UNKNOWN &&
+ speed != port_link_modes[mode].speed)
continue;
- if (duplex != port_link_modes[mode].duplex)
+ if (duplex != DUPLEX_UNKNOWN &&
+ duplex != port_link_modes[mode].duplex)
continue;
if (!(port_link_modes[mode].pr_mask &
@@ -590,36 +610,31 @@ static int prestera_port_link_mode_set(struct prestera_port *port,
if (new_mode == PRESTERA_LINK_MODE_MAX)
return -EOPNOTSUPP;
- return prestera_hw_port_link_mode_set(port, new_mode);
+ err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ false, new_mode, 0,
+ port->cfg_phy.mdix);
+ if (err)
+ return err;
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ port->adver_link_modes = 0;
+ port->cfg_phy.mode = new_mode;
+ port->autoneg = false;
+
+ return 0;
}
static int
prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
struct prestera_port *port)
{
- u32 curr_mode;
- u8 duplex;
- u32 speed;
- int err;
-
- err = prestera_hw_port_link_mode_get(port, &curr_mode);
- if (err)
- return err;
- if (curr_mode >= PRESTERA_LINK_MODE_MAX)
- return -EINVAL;
+ u8 duplex = DUPLEX_UNKNOWN;
if (ecmd->base.duplex != DUPLEX_UNKNOWN)
duplex = ecmd->base.duplex == DUPLEX_FULL ?
PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
- else
- duplex = port_link_modes[curr_mode].duplex;
- if (ecmd->base.speed != SPEED_UNKNOWN)
- speed = ecmd->base.speed;
- else
- speed = port_link_modes[curr_mode].speed;
-
- return prestera_port_link_mode_set(port, speed, duplex,
+ return prestera_port_link_mode_set(port, ecmd->base.speed, duplex,
port->caps.type);
}
@@ -645,19 +660,12 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev,
prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
&adver_fec, port->caps.type);
- err = prestera_port_autoneg_set(port,
- ecmd->base.autoneg == AUTONEG_ENABLE,
- adver_modes, adver_fec);
- if (err)
- return err;
-
- if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ if (ecmd->base.autoneg == AUTONEG_ENABLE)
+ err = prestera_port_autoneg_set(port, adver_modes);
+ else
err = prestera_port_speed_duplex_set(ecmd, port);
- if (err)
- return err;
- }
- return 0;
+ return err;
}
static int prestera_ethtool_get_fecparam(struct net_device *dev,
@@ -668,7 +676,7 @@ static int prestera_ethtool_get_fecparam(struct net_device *dev,
u32 mode;
int err;
- err = prestera_hw_port_fec_get(port, &active);
+ err = prestera_hw_port_mac_mode_get(port, NULL, NULL, NULL, &active);
if (err)
return err;
@@ -693,18 +701,19 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev,
struct ethtool_fecparam *fecparam)
{
struct prestera_port *port = netdev_priv(dev);
- u8 fec, active;
+ struct prestera_port_mac_config cfg_mac;
u32 mode;
- int err;
+ u8 fec;
if (port->autoneg) {
netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
return -EINVAL;
}
- err = prestera_hw_port_fec_get(port, &active);
- if (err)
- return err;
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ netdev_err(dev, "FEC set is not allowed on non-SFP ports\n");
+ return -EINVAL;
+ }
fec = PRESTERA_PORT_FEC_MAX;
for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
@@ -715,13 +724,19 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev,
}
}
- if (fec == active)
+ prestera_port_cfg_mac_read(port, &cfg_mac);
+
+ if (fec == cfg_mac.fec)
return 0;
- if (fec == PRESTERA_PORT_FEC_MAX)
- return -EOPNOTSUPP;
+ if (fec == PRESTERA_PORT_FEC_MAX) {
+ netdev_err(dev, "Unsupported FEC requested");
+ return -EINVAL;
+ }
+
+ cfg_mac.fec = fec;
- return prestera_hw_port_fec_set(port, fec);
+ return prestera_port_cfg_mac_write(port, &cfg_mac);
}
static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
@@ -766,6 +781,28 @@ static int prestera_ethtool_nway_reset(struct net_device *dev)
return -EINVAL;
}
+void prestera_ethtool_port_state_changed(struct prestera_port *port,
+ struct prestera_port_event *evt)
+{
+ struct prestera_port_mac_state *smac = &port->state_mac;
+
+ smac->oper = evt->data.mac.oper;
+
+ if (smac->oper) {
+ smac->mode = evt->data.mac.mode;
+ smac->speed = evt->data.mac.speed;
+ smac->duplex = evt->data.mac.duplex;
+ smac->fc = evt->data.mac.fc;
+ smac->fec = evt->data.mac.fec;
+ } else {
+ smac->mode = PRESTERA_MAC_MODE_MAX;
+ smac->speed = SPEED_UNKNOWN;
+ smac->duplex = DUPLEX_UNKNOWN;
+ smac->fc = 0;
+ smac->fec = 0;
+ }
+}
+
const struct ethtool_ops prestera_ethtool_ops = {
.get_drvinfo = prestera_ethtool_get_drvinfo,
.get_link_ksettings = prestera_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
index 523ef1f592ce..9eb18e99dea6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -6,6 +6,12 @@
#include <linux/ethtool.h>
+struct prestera_port_event;
+struct prestera_port;
+
extern const struct ethtool_ops prestera_ethtool_ops;
+void prestera_ethtool_port_state_changed(struct prestera_port *port,
+ struct prestera_port_event *evt);
+
#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index c1297859e471..41ba17cb2965 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -47,7 +47,6 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
- PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901,
@@ -76,16 +75,12 @@ enum {
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
- PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10,
- PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11,
- PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
- PRESTERA_CMD_PORT_ATTR_FEC = 14,
- PRESTERA_CMD_PORT_ATTR_AUTONEG = 15,
- PRESTERA_CMD_PORT_ATTR_DUPLEX = 16,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
- PRESTERA_CMD_PORT_ATTR_MDIX = 18,
- PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19,
+ PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18,
+ PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19,
+ PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22,
};
enum {
@@ -169,12 +164,12 @@ struct prestera_fw_event_handler {
};
struct prestera_msg_cmd {
- u32 type;
+ __le32 type;
};
struct prestera_msg_ret {
struct prestera_msg_cmd cmd;
- u32 status;
+ __le32 status;
};
struct prestera_msg_common_req {
@@ -187,102 +182,144 @@ struct prestera_msg_common_resp {
union prestera_msg_switch_param {
u8 mac[ETH_ALEN];
- u32 ageing_timeout_ms;
-};
+ __le32 ageing_timeout_ms;
+} __packed;
struct prestera_msg_switch_attr_req {
struct prestera_msg_cmd cmd;
- u32 attr;
+ __le32 attr;
union prestera_msg_switch_param param;
};
struct prestera_msg_switch_init_resp {
struct prestera_msg_ret ret;
- u32 port_count;
- u32 mtu_max;
+ __le32 port_count;
+ __le32 mtu_max;
u8 switch_id;
u8 lag_max;
u8 lag_member_max;
-};
+ __le32 size_tbl_router_nexthop;
+} __packed __aligned(4);
-struct prestera_msg_port_autoneg_param {
- u64 link_mode;
- u8 enable;
- u8 fec;
-};
+struct prestera_msg_event_port_param {
+ union {
+ struct {
+ u8 oper;
+ __le32 mode;
+ __le32 speed;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+ } __packed mac;
+ struct {
+ u8 mdix;
+ __le64 lmode_bmap;
+ u8 fc;
+ } __packed phy;
+ } __packed;
+} __packed __aligned(4);
struct prestera_msg_port_cap_param {
- u64 link_mode;
+ __le64 link_mode;
u8 type;
u8 fec;
+ u8 fc;
u8 transceiver;
};
-struct prestera_msg_port_mdix_param {
- u8 status;
- u8 admin_mode;
-};
-
struct prestera_msg_port_flood_param {
u8 type;
u8 enable;
};
union prestera_msg_port_param {
- u8 admin_state;
- u8 oper_state;
- u32 mtu;
- u8 mac[ETH_ALEN];
- u8 accept_frm_type;
- u32 speed;
+ u8 admin_state;
+ u8 oper_state;
+ __le32 mtu;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ __le32 speed;
u8 learning;
u8 flood;
- u32 link_mode;
- u8 type;
- u8 duplex;
- u8 fec;
- u8 fc;
- struct prestera_msg_port_mdix_param mdix;
- struct prestera_msg_port_autoneg_param autoneg;
+ __le32 link_mode;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+
+ union {
+ struct {
+ u8 admin:1;
+ u8 fc;
+ u8 ap_enable;
+ union {
+ struct {
+ __le32 mode;
+ u8 inband:1;
+ __le32 speed;
+ u8 duplex;
+ u8 fec;
+ u8 fec_supp;
+ } __packed reg_mode;
+ struct {
+ __le32 mode;
+ __le32 speed;
+ u8 fec;
+ u8 fec_supp;
+ } __packed ap_modes[PRESTERA_AP_PORT_MAX];
+ } __packed;
+ } __packed mac;
+ struct {
+ u8 admin:1;
+ u8 adv_enable;
+ __le64 modes;
+ __le32 mode;
+ u8 mdix;
+ } __packed phy;
+ } __packed link;
+
struct prestera_msg_port_cap_param cap;
struct prestera_msg_port_flood_param flood_ext;
-};
+ struct prestera_msg_event_port_param link_evt;
+} __packed;
struct prestera_msg_port_attr_req {
struct prestera_msg_cmd cmd;
- u32 attr;
- u32 port;
- u32 dev;
+ __le32 attr;
+ __le32 port;
+ __le32 dev;
union prestera_msg_port_param param;
-};
+} __packed __aligned(4);
+
struct prestera_msg_port_attr_resp {
struct prestera_msg_ret ret;
union prestera_msg_port_param param;
-};
+} __packed __aligned(4);
+
struct prestera_msg_port_stats_resp {
struct prestera_msg_ret ret;
- u64 stats[PRESTERA_PORT_CNT_MAX];
+ __le64 stats[PRESTERA_PORT_CNT_MAX];
};
struct prestera_msg_port_info_req {
struct prestera_msg_cmd cmd;
- u32 port;
+ __le32 port;
};
struct prestera_msg_port_info_resp {
struct prestera_msg_ret ret;
- u32 hw_id;
- u32 dev_id;
- u16 fp_id;
+ __le32 hw_id;
+ __le32 dev_id;
+ __le16 fp_id;
};
struct prestera_msg_vlan_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 vid;
+ __le32 port;
+ __le32 dev;
+ __le16 vid;
u8 is_member;
u8 is_tagged;
};
@@ -292,113 +329,114 @@ struct prestera_msg_fdb_req {
u8 dest_type;
union {
struct {
- u32 port;
- u32 dev;
+ __le32 port;
+ __le32 dev;
};
- u16 lag_id;
+ __le16 lag_id;
} dest;
u8 mac[ETH_ALEN];
- u16 vid;
+ __le16 vid;
u8 dynamic;
- u32 flush_mode;
-};
+ __le32 flush_mode;
+} __packed __aligned(4);
struct prestera_msg_bridge_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 bridge;
+ __le32 port;
+ __le32 dev;
+ __le16 bridge;
};
struct prestera_msg_bridge_resp {
struct prestera_msg_ret ret;
- u16 bridge;
+ __le16 bridge;
};
struct prestera_msg_acl_action {
- u32 id;
+ __le32 id;
+ __le32 reserved[5];
};
struct prestera_msg_acl_match {
- u32 type;
+ __le32 type;
union {
struct {
u8 key;
u8 mask;
- } u8;
+ } __packed u8;
struct {
- u16 key;
- u16 mask;
+ __le16 key;
+ __le16 mask;
} u16;
struct {
- u32 key;
- u32 mask;
+ __le32 key;
+ __le32 mask;
} u32;
struct {
- u64 key;
- u64 mask;
+ __le64 key;
+ __le64 mask;
} u64;
struct {
u8 key[ETH_ALEN];
u8 mask[ETH_ALEN];
- } mac;
- } __packed keymask;
+ } __packed mac;
+ } keymask;
};
struct prestera_msg_acl_rule_req {
struct prestera_msg_cmd cmd;
- u32 id;
- u32 priority;
- u16 ruleset_id;
+ __le32 id;
+ __le32 priority;
+ __le16 ruleset_id;
u8 n_actions;
u8 n_matches;
};
struct prestera_msg_acl_rule_resp {
struct prestera_msg_ret ret;
- u32 id;
+ __le32 id;
};
struct prestera_msg_acl_rule_stats_resp {
struct prestera_msg_ret ret;
- u64 packets;
- u64 bytes;
+ __le64 packets;
+ __le64 bytes;
};
struct prestera_msg_acl_ruleset_bind_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 ruleset_id;
+ __le32 port;
+ __le32 dev;
+ __le16 ruleset_id;
};
struct prestera_msg_acl_ruleset_req {
struct prestera_msg_cmd cmd;
- u16 id;
+ __le16 id;
};
struct prestera_msg_acl_ruleset_resp {
struct prestera_msg_ret ret;
- u16 id;
+ __le16 id;
};
struct prestera_msg_span_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
+ __le32 port;
+ __le32 dev;
u8 id;
-} __packed __aligned(4);
+};
struct prestera_msg_span_resp {
struct prestera_msg_ret ret;
u8 id;
-} __packed __aligned(4);
+};
struct prestera_msg_stp_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 vid;
+ __le32 port;
+ __le32 dev;
+ __le16 vid;
u8 state;
};
@@ -409,20 +447,14 @@ struct prestera_msg_rxtx_req {
struct prestera_msg_rxtx_resp {
struct prestera_msg_ret ret;
- u32 map_addr;
-};
-
-struct prestera_msg_rxtx_port_req {
- struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
+ __le32 map_addr;
};
struct prestera_msg_lag_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 lag_id;
+ __le32 port;
+ __le32 dev;
+ __le16 lag_id;
};
struct prestera_msg_cpu_code_counter_req {
@@ -433,22 +465,18 @@ struct prestera_msg_cpu_code_counter_req {
struct mvsw_msg_cpu_code_counter_ret {
struct prestera_msg_ret ret;
- u64 packet_count;
+ __le64 packet_count;
};
struct prestera_msg_event {
- u16 type;
- u16 id;
-};
-
-union prestera_msg_event_port_param {
- u32 oper_state;
+ __le16 type;
+ __le16 id;
};
struct prestera_msg_event_port {
struct prestera_msg_event id;
- u32 port_id;
- union prestera_msg_event_port_param param;
+ __le32 port_id;
+ struct prestera_msg_event_port_param param;
};
union prestera_msg_event_fdb_param {
@@ -459,12 +487,52 @@ struct prestera_msg_event_fdb {
struct prestera_msg_event id;
u8 dest_type;
union {
- u32 port_id;
- u16 lag_id;
+ __le32 port_id;
+ __le16 lag_id;
} dest;
- u32 vid;
+ __le32 vid;
union prestera_msg_event_fdb_param param;
-};
+} __packed __aligned(4);
+
+static inline void prestera_hw_build_tests(void)
+{
+ /* check requests */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 120);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
+
+ /* check responses */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 112);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
+
+ /* check events */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20);
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode);
+static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause);
static int __prestera_cmd_ret(struct prestera_switch *sw,
enum prestera_cmd_type_t type,
@@ -475,15 +543,15 @@ static int __prestera_cmd_ret(struct prestera_switch *sw,
struct prestera_device *dev = sw->dev;
int err;
- cmd->type = type;
+ cmd->type = __cpu_to_le32(type);
- err = dev->send_req(dev, cmd, clen, ret, rlen, waitms);
+ err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms);
if (err)
return err;
- if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK)
+ if (__le32_to_cpu(ret->cmd.type) != PRESTERA_CMD_TYPE_ACK)
return -EBADE;
- if (ret->status != PRESTERA_CMD_ACK_OK)
+ if (__le32_to_cpu(ret->status) != PRESTERA_CMD_ACK_OK)
return -EINVAL;
return 0;
@@ -517,13 +585,24 @@ static int prestera_cmd(struct prestera_switch *sw,
static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
{
- struct prestera_msg_event_port *hw_evt = msg;
+ struct prestera_msg_event_port *hw_evt;
- if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED)
- return -EINVAL;
+ hw_evt = (struct prestera_msg_event_port *)msg;
- evt->port_evt.data.oper_state = hw_evt->param.oper_state;
- evt->port_evt.port_id = hw_evt->port_id;
+ evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id);
+
+ if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ evt->port_evt.data.mac.oper = hw_evt->param.mac.oper;
+ evt->port_evt.data.mac.mode =
+ __le32_to_cpu(hw_evt->param.mac.mode);
+ evt->port_evt.data.mac.speed =
+ __le32_to_cpu(hw_evt->param.mac.speed);
+ evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex;
+ evt->port_evt.data.mac.fc = hw_evt->param.mac.fc;
+ evt->port_evt.data.mac.fec = hw_evt->param.mac.fec;
+ } else {
+ return -EINVAL;
+ }
return 0;
}
@@ -535,17 +614,17 @@ static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
switch (hw_evt->dest_type) {
case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT:
evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT;
- evt->fdb_evt.dest.port_id = hw_evt->dest.port_id;
+ evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id);
break;
case PRESTERA_HW_FDB_ENTRY_TYPE_LAG:
evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG;
- evt->fdb_evt.dest.lag_id = hw_evt->dest.lag_id;
+ evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id);
break;
default:
return -EINVAL;
}
- evt->fdb_evt.vid = hw_evt->vid;
+ evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid);
ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
@@ -597,20 +676,22 @@ static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size
struct prestera_msg_event *msg = buf;
struct prestera_fw_event_handler eh;
struct prestera_event evt;
+ u16 msg_type;
int err;
- if (msg->type >= PRESTERA_EVENT_TYPE_MAX)
+ msg_type = __le16_to_cpu(msg->type);
+ if (msg_type >= PRESTERA_EVENT_TYPE_MAX)
return -EINVAL;
- if (!fw_event_parsers[msg->type].func)
+ if (!fw_event_parsers[msg_type].func)
return -ENOENT;
- err = prestera_find_event_handler(sw, msg->type, &eh);
+ err = prestera_find_event_handler(sw, msg_type, &eh);
if (err)
return err;
- evt.id = msg->id;
+ evt.id = __le16_to_cpu(msg->id);
- err = fw_event_parsers[msg->type].func(buf, &evt);
+ err = fw_event_parsers[msg_type].func(buf, &evt);
if (err)
return err;
@@ -635,11 +716,39 @@ static void prestera_pkt_recv(struct prestera_device *dev)
eh.func(sw, &ev, eh.arg);
}
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
int prestera_hw_port_info_get(const struct prestera_port *port,
u32 *dev_id, u32 *hw_id, u16 *fp_id)
{
struct prestera_msg_port_info_req req = {
- .port = port->id,
+ .port = __cpu_to_le32(port->id),
};
struct prestera_msg_port_info_resp resp;
int err;
@@ -649,9 +758,9 @@ int prestera_hw_port_info_get(const struct prestera_port *port,
if (err)
return err;
- *dev_id = resp.dev_id;
- *hw_id = resp.hw_id;
- *fp_id = resp.fp_id;
+ *dev_id = __le32_to_cpu(resp.dev_id);
+ *hw_id = __le32_to_cpu(resp.hw_id);
+ *fp_id = __le16_to_cpu(resp.fp_id);
return 0;
}
@@ -659,7 +768,7 @@ int prestera_hw_port_info_get(const struct prestera_port *port,
int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
{
struct prestera_msg_switch_attr_req req = {
- .attr = PRESTERA_CMD_SWITCH_ATTR_MAC,
+ .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC),
};
ether_addr_copy(req.param.mac, mac);
@@ -676,6 +785,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
INIT_LIST_HEAD(&sw->event_handlers);
+ prestera_hw_build_tests();
+
err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
&req.cmd, sizeof(req),
&resp.ret, sizeof(resp),
@@ -685,9 +796,9 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->dev->recv_msg = prestera_evt_recv;
sw->dev->recv_pkt = prestera_pkt_recv;
- sw->port_count = resp.port_count;
+ sw->port_count = __le32_to_cpu(resp.port_count);
sw->mtu_min = PRESTERA_MIN_MTU;
- sw->mtu_max = resp.mtu_max;
+ sw->mtu_max = __le32_to_cpu(resp.mtu_max);
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
@@ -703,9 +814,9 @@ void prestera_hw_switch_fini(struct prestera_switch *sw)
int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
{
struct prestera_msg_switch_attr_req req = {
- .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING,
+ .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING),
.param = {
- .ageing_timeout_ms = ageing_ms,
+ .ageing_timeout_ms = __cpu_to_le32(ageing_ms),
},
};
@@ -713,15 +824,56 @@ int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
&req.cmd, sizeof(req));
}
-int prestera_hw_port_state_set(const struct prestera_port *port,
- bool admin_state)
+int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
+ u32 *mode, u32 *speed, u8 *duplex, u8 *fec)
+{
+ struct prestera_msg_port_attr_resp resp;
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id)
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ if (mode)
+ *mode = __le32_to_cpu(resp.param.link_evt.mac.mode);
+
+ if (speed)
+ *speed = __le32_to_cpu(resp.param.link_evt.mac.speed);
+
+ if (duplex)
+ *duplex = resp.param.link_evt.mac.duplex;
+
+ if (fec)
+ *fec = resp.param.link_evt.mac.fec;
+
+ return err;
+}
+
+int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
+ bool admin, u32 mode, u8 inband,
+ u32 speed, u8 duplex, u8 fec)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
- .admin_state = admin_state,
+ .link = {
+ .mac = {
+ .admin = admin,
+ .reg_mode.mode = __cpu_to_le32(mode),
+ .reg_mode.inband = inband,
+ .reg_mode.speed = __cpu_to_le32(speed),
+ .reg_mode.duplex = duplex,
+ .reg_mode.fec = fec
+ }
+ }
}
};
@@ -729,14 +881,70 @@ int prestera_hw_port_state_set(const struct prestera_port *port,
&req.cmd, sizeof(req));
}
+int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
+ u8 *mdix, u64 *lmode_bmap,
+ bool *fc_pause, bool *fc_asym)
+{
+ struct prestera_msg_port_attr_resp resp;
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id)
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ if (mdix)
+ *mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix);
+
+ if (lmode_bmap)
+ *lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap);
+
+ if (fc_pause && fc_asym)
+ prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc,
+ fc_pause, fc_asym);
+
+ return err;
+}
+
+int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
+ bool admin, bool adv, u32 mode, u64 modes,
+ u8 mdix)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .link = {
+ .phy = {
+ .admin = admin,
+ .adv_enable = adv ? 1 : 0,
+ .mode = __cpu_to_le32(mode),
+ .modes = __cpu_to_le64(modes),
+ }
+ }
+ }
+ };
+
+ req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MTU,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
- .mtu = mtu,
+ .mtu = __cpu_to_le32(mtu),
}
};
@@ -747,9 +955,9 @@ int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MAC,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
ether_addr_copy(req.param.mac, mac);
@@ -762,9 +970,9 @@ int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.accept_frm_type = type,
}
@@ -778,9 +986,9 @@ int prestera_hw_port_cap_get(const struct prestera_port *port,
struct prestera_port_caps *caps)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
@@ -790,7 +998,7 @@ int prestera_hw_port_cap_get(const struct prestera_port *port,
if (err)
return err;
- caps->supp_link_modes = resp.param.cap.link_mode;
+ caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode);
caps->transceiver = resp.param.cap.transceiver;
caps->supp_fec = resp.param.cap.fec;
caps->type = resp.param.cap.type;
@@ -798,44 +1006,9 @@ int prestera_hw_port_cap_get(const struct prestera_port *port,
return err;
}
-int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
- u64 *link_mode_bitmap)
+static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *link_mode_bitmap = resp.param.cap.link_mode;
-
- return 0;
-}
-
-int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
- bool *pause, bool *asym_pause)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- switch (resp.param.fc) {
+ switch (fc) {
case PRESTERA_FC_SYMMETRIC:
*pause = true;
*asym_pause = false;
@@ -852,8 +1025,6 @@ int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
*pause = false;
*asym_pause = false;
}
-
- return 0;
}
int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
@@ -867,7 +1038,7 @@ int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
if (err)
return err;
- *ruleset_id = resp.id;
+ *ruleset_id = __le16_to_cpu(resp.id);
return 0;
}
@@ -875,7 +1046,7 @@ int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
{
struct prestera_msg_acl_ruleset_req req = {
- .id = ruleset_id,
+ .id = __cpu_to_le16(ruleset_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
@@ -890,7 +1061,7 @@ static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
int i = 0;
list_for_each_entry(a_entry, a_list, list) {
- action[i].id = a_entry->id;
+ action[i].id = __cpu_to_le32(a_entry->id);
switch (a_entry->id) {
case PRESTERA_ACL_RULE_ACTION_ACCEPT:
@@ -916,7 +1087,7 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
int i = 0;
list_for_each_entry(m_entry, m_list, list) {
- match[i].type = m_entry->type;
+ match[i].type = __cpu_to_le32(m_entry->type);
switch (m_entry->type) {
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
@@ -924,8 +1095,10 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
- match[i].keymask.u16.key = m_entry->keymask.u16.key;
- match[i].keymask.u16.mask = m_entry->keymask.u16.mask;
+ match[i].keymask.u16.key =
+ __cpu_to_le16(m_entry->keymask.u16.key);
+ match[i].keymask.u16.mask =
+ __cpu_to_le16(m_entry->keymask.u16.mask);
break;
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
@@ -946,12 +1119,16 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
- match[i].keymask.u32.key = m_entry->keymask.u32.key;
- match[i].keymask.u32.mask = m_entry->keymask.u32.mask;
+ match[i].keymask.u32.key =
+ __cpu_to_le32(m_entry->keymask.u32.key);
+ match[i].keymask.u32.mask =
+ __cpu_to_le32(m_entry->keymask.u32.mask);
break;
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
- match[i].keymask.u64.key = m_entry->keymask.u64.key;
- match[i].keymask.u64.mask = m_entry->keymask.u64.mask;
+ match[i].keymask.u64.key =
+ __cpu_to_le64(m_entry->keymask.u64.key);
+ match[i].keymask.u64.mask =
+ __cpu_to_le64(m_entry->keymask.u64.mask);
break;
default:
return -EINVAL;
@@ -1001,8 +1178,8 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw,
if (err)
goto free_buff;
- req->ruleset_id = prestera_acl_rule_ruleset_id_get(rule);
- req->priority = prestera_acl_rule_priority_get(rule);
+ req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule));
+ req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule));
req->n_actions = prestera_acl_rule_action_len(rule);
req->n_matches = prestera_acl_rule_match_len(rule);
@@ -1011,7 +1188,7 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw,
if (err)
goto free_buff;
- *rule_id = resp.id;
+ *rule_id = __le32_to_cpu(resp.id);
free_buff:
kfree(buff);
return err;
@@ -1020,7 +1197,7 @@ free_buff:
int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
{
struct prestera_msg_acl_rule_req req = {
- .id = rule_id
+ .id = __cpu_to_le32(rule_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
@@ -1032,7 +1209,7 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
{
struct prestera_msg_acl_rule_stats_resp resp;
struct prestera_msg_acl_rule_req req = {
- .id = rule_id
+ .id = __cpu_to_le32(rule_id)
};
int err;
@@ -1041,8 +1218,8 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
if (err)
return err;
- *packets = resp.packets;
- *bytes = resp.bytes;
+ *packets = __le64_to_cpu(resp.packets);
+ *bytes = __le64_to_cpu(resp.bytes);
return 0;
}
@@ -1050,9 +1227,9 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
{
struct prestera_msg_acl_ruleset_bind_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .ruleset_id = ruleset_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .ruleset_id = __cpu_to_le16(ruleset_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
@@ -1063,9 +1240,9 @@ int prestera_hw_acl_port_unbind(const struct prestera_port *port,
u16 ruleset_id)
{
struct prestera_msg_acl_ruleset_bind_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .ruleset_id = ruleset_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .ruleset_id = __cpu_to_le16(ruleset_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
@@ -1076,8 +1253,8 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
{
struct prestera_msg_span_resp resp;
struct prestera_msg_span_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
int err;
@@ -1094,8 +1271,8 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
{
struct prestera_msg_span_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
@@ -1106,8 +1283,8 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
int prestera_hw_span_unbind(const struct prestera_port *port)
{
struct prestera_msg_span_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
@@ -1127,9 +1304,9 @@ int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_TYPE,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
@@ -1144,146 +1321,12 @@ int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
return 0;
}
-int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FEC,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *fec = resp.param.fec;
-
- return 0;
-}
-
-int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FEC,
- .port = port->hw_id,
- .dev = port->dev_id,
- .param = {
- .fec = fec,
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-static u8 prestera_hw_mdix_to_eth(u8 mode)
-{
- switch (mode) {
- case PRESTERA_PORT_TP_MDI:
- return ETH_TP_MDI;
- case PRESTERA_PORT_TP_MDIX:
- return ETH_TP_MDI_X;
- case PRESTERA_PORT_TP_AUTO:
- return ETH_TP_MDI_AUTO;
- default:
- return ETH_TP_MDI_INVALID;
- }
-}
-
-static u8 prestera_hw_mdix_from_eth(u8 mode)
-{
- switch (mode) {
- case ETH_TP_MDI:
- return PRESTERA_PORT_TP_MDI;
- case ETH_TP_MDI_X:
- return PRESTERA_PORT_TP_MDIX;
- case ETH_TP_MDI_AUTO:
- return PRESTERA_PORT_TP_AUTO;
- default:
- return PRESTERA_PORT_TP_NA;
- }
-}
-
-int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
- u8 *admin_mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *status = prestera_hw_mdix_to_eth(resp.param.mdix.status);
- *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode);
-
- return 0;
-}
-
-int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
-
- req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode);
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
- .port = port->hw_id,
- .dev = port->dev_id,
- .param = {
- .link_mode = mode,
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *mode = resp.param.link_mode;
-
- return 0;
-}
-
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_SPEED,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
@@ -1293,73 +1336,33 @@ int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
if (err)
return err;
- *speed = resp.param.speed;
+ *speed = __le32_to_cpu(resp.param.speed);
return 0;
}
-int prestera_hw_port_autoneg_set(const struct prestera_port *port,
- bool autoneg, u64 link_modes, u8 fec)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG,
- .port = port->hw_id,
- .dev = port->dev_id,
- .param = {
- .autoneg = {
- .link_mode = link_modes,
- .enable = autoneg,
- .fec = fec,
- }
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
int prestera_hw_port_autoneg_restart(struct prestera_port *port)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
-int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *duplex = resp.param.duplex;
-
- return 0;
-}
-
int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *st)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_STATS,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_stats_resp resp;
- u64 *hw = resp.stats;
+ __le64 *hw = resp.stats;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
@@ -1367,36 +1370,56 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
if (err)
return err;
- st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT];
- st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT];
- st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT];
- st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT];
- st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT];
- st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT];
- st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT];
- st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT];
- st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT];
- st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT];
- st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT];
- st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT];
- st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT];
- st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT];
- st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT];
- st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT];
- st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT];
- st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT];
- st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT];
- st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT];
- st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT];
- st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT];
- st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT];
- st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT];
- st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT];
- st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT];
- st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT];
- st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT];
- st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT];
- st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT];
+ st->good_octets_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]);
+ st->bad_octets_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]);
+ st->mac_trans_error =
+ __le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]);
+ st->broadcast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]);
+ st->multicast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]);
+ st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]);
+ st->frames_65_to_127_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]);
+ st->frames_128_to_255_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]);
+ st->frames_256_to_511_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]);
+ st->frames_512_to_1023_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]);
+ st->frames_1024_to_max_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]);
+ st->excessive_collision =
+ __le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]);
+ st->multicast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]);
+ st->broadcast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]);
+ st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]);
+ st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]);
+ st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]);
+ st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]);
+ st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]);
+ st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]);
+ st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]);
+ st->rx_error_frame_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]);
+ st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]);
+ st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]);
+ st->late_collision =
+ __le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]);
+ st->unicast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]);
+ st->unicast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]);
+ st->sent_multiple =
+ __le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]);
+ st->sent_deferred =
+ __le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]);
+ st->good_octets_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]);
return 0;
}
@@ -1404,9 +1427,9 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_LEARNING,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.learning = enable,
}
@@ -1419,9 +1442,9 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.flood_ext = {
.type = PRESTERA_PORT_FLOOD_TYPE_UC,
@@ -1437,9 +1460,9 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.flood_ext = {
.type = PRESTERA_PORT_FLOOD_TYPE_MC,
@@ -1455,9 +1478,9 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.flood = flood,
}
@@ -1505,7 +1528,7 @@ err_uc_flood:
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
@@ -1515,7 +1538,7 @@ int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
@@ -1526,9 +1549,9 @@ int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
bool is_member, bool untagged)
{
struct prestera_msg_vlan_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .vid = vid,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
.is_member = is_member,
.is_tagged = !untagged,
};
@@ -1540,9 +1563,9 @@ int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
{
struct prestera_msg_vlan_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .vid = vid,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
@@ -1552,9 +1575,9 @@ int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
{
struct prestera_msg_stp_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .vid = vid,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
.state = state,
};
@@ -1567,10 +1590,10 @@ int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
.dynamic = dynamic,
};
@@ -1585,10 +1608,10 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
ether_addr_copy(req.mac, mac);
@@ -1603,9 +1626,9 @@ int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
.dynamic = dynamic,
};
@@ -1621,9 +1644,9 @@ int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
ether_addr_copy(req.mac, mac);
@@ -1636,10 +1659,10 @@ int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .flush_mode = mode,
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
@@ -1649,8 +1672,8 @@ int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
{
struct prestera_msg_fdb_req req = {
- .vid = vid,
- .flush_mode = mode,
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
@@ -1662,11 +1685,11 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .vid = vid,
- .flush_mode = mode,
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
@@ -1679,9 +1702,9 @@ int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .flush_mode = mode,
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
@@ -1694,10 +1717,10 @@ int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .vid = vid,
- .flush_mode = mode,
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
@@ -1716,7 +1739,7 @@ int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
if (err)
return err;
- *bridge_id = resp.bridge;
+ *bridge_id = __le16_to_cpu(resp.bridge);
return 0;
}
@@ -1724,7 +1747,7 @@ int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
- .bridge = bridge_id,
+ .bridge = __cpu_to_le16(bridge_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
@@ -1734,9 +1757,9 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
- .bridge = bridge_id,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .bridge = __cpu_to_le16(bridge_id),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
@@ -1746,9 +1769,9 @@ int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
- .bridge = bridge_id,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .bridge = __cpu_to_le16(bridge_id),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
@@ -1769,28 +1792,17 @@ int prestera_hw_rxtx_init(struct prestera_switch *sw,
if (err)
return err;
- params->map_addr = resp.map_addr;
+ params->map_addr = __le32_to_cpu(resp.map_addr);
return 0;
}
-int prestera_hw_rxtx_port_init(struct prestera_port *port)
-{
- struct prestera_msg_rxtx_port_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT,
- &req.cmd, sizeof(req));
-}
-
int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
{
struct prestera_msg_lag_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .lag_id = lag_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD,
@@ -1800,9 +1812,9 @@ int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id)
{
struct prestera_msg_lag_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .lag_id = lag_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE,
@@ -1813,9 +1825,9 @@ int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
bool enable)
{
struct prestera_msg_lag_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .lag_id = lag_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
};
u32 cmd;
@@ -1842,7 +1854,7 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
if (err)
return err;
- *packet_count = resp.packet_count;
+ *packet_count = __le64_to_cpu(resp.packet_count);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 546d5fd8240d..57a3c2e5b112 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -20,6 +20,23 @@ enum prestera_fdb_flush_mode {
};
enum {
+ PRESTERA_MAC_MODE_INTERNAL,
+ PRESTERA_MAC_MODE_SGMII,
+ PRESTERA_MAC_MODE_1000BASE_X,
+ PRESTERA_MAC_MODE_KR,
+ PRESTERA_MAC_MODE_KR2,
+ PRESTERA_MAC_MODE_KR4,
+ PRESTERA_MAC_MODE_CR,
+ PRESTERA_MAC_MODE_CR2,
+ PRESTERA_MAC_MODE_CR4,
+ PRESTERA_MAC_MODE_SR_LR,
+ PRESTERA_MAC_MODE_SR_LR2,
+ PRESTERA_MAC_MODE_SR_LR4,
+
+ PRESTERA_MAC_MODE_MAX
+};
+
+enum {
PRESTERA_LINK_MODE_10baseT_Half,
PRESTERA_LINK_MODE_10baseT_Full,
PRESTERA_LINK_MODE_100baseT_Half,
@@ -116,32 +133,29 @@ int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
/* Port API */
int prestera_hw_port_info_get(const struct prestera_port *port,
u32 *dev_id, u32 *hw_id, u16 *fp_id);
-int prestera_hw_port_state_set(const struct prestera_port *port,
- bool admin_state);
+
+int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
+ u32 *mode, u32 *speed, u8 *duplex, u8 *fec);
+int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
+ bool admin, u32 mode, u8 inband,
+ u32 speed, u8 duplex, u8 fec);
+int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
+ u8 *mdix, u64 *lmode_bmap,
+ bool *fc_pause, bool *fc_asym);
+int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
+ bool admin, bool adv, u32 mode, u64 modes,
+ u8 mdix);
+
int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
int prestera_hw_port_cap_get(const struct prestera_port *port,
struct prestera_port_caps *caps);
-int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
- u64 *link_mode_bitmap);
-int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
- bool *pause, bool *asym_pause);
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
-int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec);
-int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec);
-int prestera_hw_port_autoneg_set(const struct prestera_port *port,
- bool autoneg, u64 link_modes, u8 fec);
int prestera_hw_port_autoneg_restart(struct prestera_port *port);
-int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex);
int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *stats);
-int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode);
-int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode);
-int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
- u8 *admin_mode);
-int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
@@ -206,7 +220,6 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
/* RX/TX */
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params);
-int prestera_hw_rxtx_port_init(struct prestera_port *port);
/* LAG API */
int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 44c670807fb3..625b40149fac 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -80,27 +80,76 @@ struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
return port;
}
-static int prestera_port_open(struct net_device *dev)
+int prestera_port_cfg_mac_read(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg)
+{
+ *cfg = port->cfg_mac;
+ return 0;
+}
+
+int prestera_port_cfg_mac_write(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg)
{
- struct prestera_port *port = netdev_priv(dev);
int err;
- err = prestera_hw_port_state_set(port, true);
+ err = prestera_hw_port_mac_mode_set(port, cfg->admin,
+ cfg->mode, cfg->inband, cfg->speed,
+ cfg->duplex, cfg->fec);
if (err)
return err;
+ port->cfg_mac = *cfg;
+ return 0;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ int err = 0;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+
netif_start_queue(dev);
- return 0;
+ return err;
}
static int prestera_port_close(struct net_device *dev)
{
struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ int err = 0;
netif_stop_queue(dev);
- return prestera_hw_port_state_set(port, false);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+
+ return err;
}
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
@@ -137,7 +186,7 @@ static int prestera_port_set_mac_address(struct net_device *dev, void *p)
if (err)
return err;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -228,46 +277,23 @@ static const struct net_device_ops prestera_netdev_ops = {
.ndo_get_devlink_port = prestera_devlink_get_port,
};
-int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
- u64 adver_link_modes, u8 adver_fec)
+int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes)
{
- bool refresh = false;
- u64 link_modes;
int err;
- u8 fec;
-
- if (port->caps.type != PRESTERA_PORT_TYPE_TP)
- return enable ? -EINVAL : 0;
-
- if (!enable)
- goto set_autoneg;
-
- link_modes = port->caps.supp_link_modes & adver_link_modes;
- fec = port->caps.supp_fec & adver_fec;
-
- if (!link_modes && !fec)
- return -EOPNOTSUPP;
-
- if (link_modes && port->adver_link_modes != link_modes) {
- port->adver_link_modes = link_modes;
- refresh = true;
- }
-
- if (fec && port->adver_fec != fec) {
- port->adver_fec = fec;
- refresh = true;
- }
-set_autoneg:
- if (port->autoneg == enable && !refresh)
+ if (port->autoneg && port->adver_link_modes == link_modes)
return 0;
- err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes,
- port->adver_fec);
+ err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ true, 0, link_modes,
+ port->cfg_phy.mdix);
if (err)
return err;
- port->autoneg = enable;
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ port->adver_link_modes = link_modes;
+ port->cfg_phy.mode = 0;
+ port->autoneg = true;
return 0;
}
@@ -288,6 +314,7 @@ static void prestera_port_list_del(struct prestera_port *port)
static int prestera_port_create(struct prestera_switch *sw, u32 id)
{
+ struct prestera_port_mac_config cfg_mac;
struct prestera_port *port;
struct net_device *dev;
int err;
@@ -338,11 +365,14 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
+ eth_hw_addr_gen(dev, sw->base_mac, port->fp_id);
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
*/
- memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
- dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+ if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) {
+ dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id);
+ dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1);
+ }
err = prestera_hw_port_mac_set(port, dev->dev_addr);
if (err) {
@@ -356,16 +386,43 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
- port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
- prestera_port_autoneg_set(port, true, port->caps.supp_link_modes,
- port->caps.supp_fec);
+ port->adver_link_modes = port->caps.supp_link_modes;
+ port->adver_fec = 0;
+ port->autoneg = true;
+
+ /* initialize config mac */
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
+ cfg_mac.admin = true;
+ cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL;
+ } else {
+ cfg_mac.admin = false;
+ cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
+ }
+ cfg_mac.inband = false;
+ cfg_mac.speed = 0;
+ cfg_mac.duplex = DUPLEX_UNKNOWN;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
- err = prestera_hw_port_state_set(port, false);
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
if (err) {
- dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id);
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac mode\n", id);
goto err_port_init;
}
+ /* initialize config phy (if this is inegral) */
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
+ port->cfg_phy.mdix = ETH_TP_MDI_AUTO;
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port,
+ port->cfg_phy.admin,
+ false, 0, 0,
+ port->cfg_phy.mdix);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) phy mode\n", id);
+ goto err_port_init;
+ }
+ }
+
err = prestera_rxtx_port_init(port);
if (err)
goto err_port_init;
@@ -446,8 +503,10 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
caching_dw = &port->cached_hw_stats.caching_dw;
- if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) {
- if (evt->port_evt.data.oper_state) {
+ prestera_ethtool_port_state_changed(port, &evt->port_evt);
+
+ if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ if (port->state_mac.oper) {
netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
@@ -851,7 +910,7 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_span_init;
- err = prestera_devlink_register(sw);
+ err = prestera_devlink_traps_register(sw);
if (err)
goto err_dl_register;
@@ -863,12 +922,13 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_ports_create;
+ prestera_devlink_register(sw);
return 0;
err_ports_create:
prestera_lag_fini(sw);
err_lag_init:
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
err_dl_register:
prestera_span_fini(sw);
err_span_init:
@@ -888,9 +948,10 @@ err_swdev_register:
static void prestera_switch_fini(struct prestera_switch *sw)
{
+ prestera_devlink_unregister(sw);
prestera_destroy_ports(sw);
prestera_lag_fini(sw);
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
prestera_event_handlers_unregister(sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index a250d394da38..5d4d410b07c8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -14,10 +14,10 @@
#define PRESTERA_MSG_MAX_SIZE 1500
-#define PRESTERA_SUPP_FW_MAJ_VER 3
+#define PRESTERA_SUPP_FW_MAJ_VER 4
#define PRESTERA_SUPP_FW_MIN_VER 0
-#define PRESTERA_PREV_FW_MAJ_VER 2
+#define PRESTERA_PREV_FW_MAJ_VER 4
#define PRESTERA_PREV_FW_MIN_VER 0
#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
@@ -102,23 +102,30 @@ struct prestera_fw_evtq_regs {
u32 len;
};
+#define PRESTERA_CMD_QNUM_MAX 4
+
+struct prestera_fw_cmdq_regs {
+ u32 req_ctl;
+ u32 req_len;
+ u32 rcv_ctl;
+ u32 rcv_len;
+ u32 offs;
+ u32 len;
+};
+
struct prestera_fw_regs {
u32 fw_ready;
- u32 pad;
u32 cmd_offs;
u32 cmd_len;
+ u32 cmd_qnum;
u32 evt_offs;
u32 evt_qnum;
- u32 cmd_req_ctl;
- u32 cmd_req_len;
- u32 cmd_rcv_ctl;
- u32 cmd_rcv_len;
-
u32 fw_status;
u32 rx_status;
- struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX];
+ struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_EVT_QNUM_MAX];
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_CMD_QNUM_MAX];
};
#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
@@ -130,14 +137,22 @@ struct prestera_fw_regs {
#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum)
#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
-#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl)
-#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len)
+#define PRESTERA_CMDQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(cmdq_list) + \
+ (q) * sizeof(struct prestera_fw_cmdq_regs) + \
+ offsetof(struct prestera_fw_cmdq_regs, f))
+
+#define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl)
+#define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len)
+#define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl)
+#define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len)
+#define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs)
+#define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len)
-#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl)
-#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len)
#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
@@ -174,6 +189,13 @@ struct prestera_fw_evtq {
size_t len;
};
+struct prestera_fw_cmdq {
+ /* serialize access to dev->send_req */
+ struct mutex cmd_mtx;
+ u8 __iomem *addr;
+ size_t len;
+};
+
struct prestera_fw {
struct prestera_fw_rev rev_supp;
const struct firmware *bin;
@@ -183,9 +205,10 @@ struct prestera_fw {
u8 __iomem *ldr_ring_buf;
u32 ldr_buf_len;
u32 ldr_wr_idx;
- struct mutex cmd_mtx; /* serialize access to dev->send_req */
size_t cmd_mbox_len;
u8 __iomem *cmd_mbox;
+ struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX];
+ u8 cmd_qnum;
struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
u8 evt_qnum;
struct work_struct evt_work;
@@ -324,7 +347,27 @@ static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
}
-static int prestera_fw_cmd_send(struct prestera_fw *fw,
+static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid)
+{
+ mutex_lock(&fw->cmd_queue[qid].cmd_mtx);
+}
+
+static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid)
+{
+ mutex_unlock(&fw->cmd_queue[qid].cmd_mtx);
+}
+
+static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->cmd_queue[qid].len;
+}
+
+static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->cmd_queue[qid].addr;
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid,
void *in_msg, size_t in_size,
void *out_msg, size_t out_size,
unsigned int waitms)
@@ -335,30 +378,32 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw,
if (!waitms)
waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
- if (ALIGN(in_size, 4) > fw->cmd_mbox_len)
+ if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid))
return -EMSGSIZE;
/* wait for finish previous reply from FW */
- err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30);
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30);
if (err) {
dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
return err;
}
- prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size);
- memcpy_toio(fw->cmd_mbox, in_msg, in_size);
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size);
+
+ memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size);
- prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT);
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
+ PRESTERA_CMD_F_REQ_SENT);
/* wait for reply from FW */
- err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG,
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid),
PRESTERA_CMD_F_REPL_SENT, waitms);
if (err) {
dev_err(fw->dev.dev, "reply from FW is timed out\n");
goto cmd_exit;
}
- ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG);
+ ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid));
if (ret_size > out_size) {
dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
ret_size, out_size);
@@ -366,14 +411,15 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw,
goto cmd_exit;
}
- memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size);
+ memcpy_fromio(out_msg, prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size);
cmd_exit:
- prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD);
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
+ PRESTERA_CMD_F_REPL_RCVD);
return err;
}
-static int prestera_fw_send_req(struct prestera_device *dev,
+static int prestera_fw_send_req(struct prestera_device *dev, int qid,
void *in_msg, size_t in_size, void *out_msg,
size_t out_size, unsigned int waitms)
{
@@ -382,9 +428,10 @@ static int prestera_fw_send_req(struct prestera_device *dev,
fw = container_of(dev, struct prestera_fw, dev);
- mutex_lock(&fw->cmd_mtx);
- ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms);
- mutex_unlock(&fw->cmd_mtx);
+ prestera_fw_cmdq_lock(fw, qid);
+ ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size,
+ waitms);
+ prestera_fw_cmdq_unlock(fw, qid);
return ret;
}
@@ -414,7 +461,16 @@ static int prestera_fw_init(struct prestera_fw *fw)
fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
- mutex_init(&fw->cmd_mtx);
+ fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG);
+
+ for (qid = 0; qid < fw->cmd_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid));
+ struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid];
+
+ cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid));
+ cmdq->addr = fw->cmd_mbox + offs;
+ mutex_init(&cmdq->cmd_mtx);
+ }
fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 73d2eba5262f..e452cdeaf703 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -794,14 +794,7 @@ void prestera_rxtx_switch_fini(struct prestera_switch *sw)
int prestera_rxtx_port_init(struct prestera_port *port)
{
- int err;
-
- err = prestera_hw_rxtx_port_init(port);
- if (err)
- return err;
-
port->dev->needed_headroom = PRESTERA_DSA_HLEN;
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fab53c9b8380..1d607bc6b59e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -389,7 +389,7 @@ static void inverse_every_nibble(unsigned char *mac_addr)
* Outputs
* return the calculated entry.
*/
-static u32 hash_function(unsigned char *mac_addr_orig)
+static u32 hash_function(const unsigned char *mac_addr_orig)
{
u32 hash_result;
u32 addr0;
@@ -434,7 +434,7 @@ static u32 hash_function(unsigned char *mac_addr_orig)
* -ENOSPC if table full
*/
static int add_del_hash_entry(struct pxa168_eth_private *pep,
- unsigned char *mac_addr,
+ const unsigned char *mac_addr,
u32 rd, u32 skip, int del)
{
struct addr_table_entry *entry, *start;
@@ -521,7 +521,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep,
*/
static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
unsigned char *oaddr,
- unsigned char *addr)
+ const unsigned char *addr)
{
/* Delete old entry */
if (oaddr)
@@ -607,7 +607,7 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
mac_h = dev->dev_addr[0] << 24;
mac_h |= dev->dev_addr[1] << 16;
@@ -977,8 +977,7 @@ static int pxa168_init_phy(struct net_device *dev)
cmd.base.phy_address = pep->phy_addr;
cmd.base.speed = pep->phy_speed;
cmd.base.duplex = pep->phy_duplex;
- bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES);
cmd.base.autoneg = AUTONEG_ENABLE;
if (cmd.base.speed != 0)
@@ -1434,11 +1433,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
- err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, dev);
if (err) {
+ u8 addr[ETH_ALEN];
+
/* try reading the mac address, if set by the bootloader */
- pxa168_eth_get_mac_address(dev, dev->dev_addr);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ pxa168_eth_get_mac_address(dev, addr);
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(dev, addr);
+ } else {
dev_info(&pdev->dev, "Using random mac address\n");
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 051dd3fb5b03..0c864e5bf0a6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3459,7 +3459,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev)) {
memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
@@ -3810,6 +3810,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
{
struct skge_port *skge;
struct net_device *dev = alloc_etherdev(sizeof(*skge));
+ u8 addr[ETH_ALEN];
if (!dev)
return NULL;
@@ -3862,7 +3863,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
}
/* read the mac address */
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
return dev;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e9fc74e54b22..28b5b9341145 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3817,7 +3817,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
dev->dev_addr, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
@@ -4440,86 +4440,6 @@ static const struct ethtool_ops sky2_ethtool_ops = {
static struct dentry *sky2_debug;
-
-/*
- * Read and parse the first part of Vital Product Data
- */
-#define VPD_SIZE 128
-#define VPD_MAGIC 0x82
-
-static const struct vpd_tag {
- char tag[2];
- char *label;
-} vpd_tags[] = {
- { "PN", "Part Number" },
- { "EC", "Engineering Level" },
- { "MN", "Manufacturer" },
- { "SN", "Serial Number" },
- { "YA", "Asset Tag" },
- { "VL", "First Error Log Message" },
- { "VF", "Second Error Log Message" },
- { "VB", "Boot Agent ROM Configuration" },
- { "VE", "EFI UNDI Configuration" },
-};
-
-static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
-{
- size_t vpd_size;
- loff_t offs;
- u8 len;
- unsigned char *buf;
- u16 reg2;
-
- reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
- vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
-
- seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
- buf = kmalloc(vpd_size, GFP_KERNEL);
- if (!buf) {
- seq_puts(seq, "no memory!\n");
- return;
- }
-
- if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
- seq_puts(seq, "VPD read failed\n");
- goto out;
- }
-
- if (buf[0] != VPD_MAGIC) {
- seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
- goto out;
- }
- len = buf[1];
- if (len == 0 || len > vpd_size - 4) {
- seq_printf(seq, "Invalid id length: %d\n", len);
- goto out;
- }
-
- seq_printf(seq, "%.*s\n", len, buf + 3);
- offs = len + 3;
-
- while (offs < vpd_size - 4) {
- int i;
-
- if (!memcmp("RW", buf + offs, 2)) /* end marker */
- break;
- len = buf[offs + 2];
- if (offs + len + 3 >= vpd_size)
- break;
-
- for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
- if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
- seq_printf(seq, " %s: %.*s\n",
- vpd_tags[i].label, len, buf + offs + 3);
- break;
- }
- }
- offs += len + 3;
- }
-out:
- kfree(buf);
-}
-
static int sky2_debug_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
@@ -4529,9 +4449,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
unsigned idx, last;
int sop;
- sky2_show_vpd(seq, hw);
-
- seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
+ seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
sky2_read32(hw, B0_ISRC),
sky2_read32(hw, B0_IMSK),
sky2_read32(hw, B0_Y2_SP_ICR));
@@ -4802,10 +4720,13 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 1) from device tree data
* 2) from internal registers set by bootloader
*/
- ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr);
- if (ret)
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
- ETH_ALEN);
+ ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
+ if (ret) {
+ u8 addr[ETH_ALEN];
+
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ }
/* if the address is invalid, use a random value */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -4989,7 +4910,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
if (sizeof(dma_addr_t) > sizeof(u32) &&
- !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
using_dac = 1;
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err < 0) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 398c23cec815..75d67d1b5f6b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2588,7 +2588,7 @@ static int __init mtk_init(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
int ret;
- ret = of_get_mac_address(mac->of_node, dev->dev_addr);
+ ret = of_get_ethdev_address(mac->of_node, dev);
if (ret) {
/* If the mac address is invalid, use random mac address */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 1d5dd2015453..89ca7960b225 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -523,7 +523,7 @@ static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
static void mtk_star_set_mac_addr(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
- u8 *mac_addr = ndev->dev_addr;
+ const u8 *mac_addr = ndev->dev_addr;
unsigned int high, low;
high = mac_addr[0] << 8 | mac_addr[1] << 0;
@@ -1544,7 +1544,7 @@ static int mtk_star_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
+ ret = platform_get_ethdev_address(dev, ndev);
if (ret || !is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 8d751383530b..e10b7b04b894 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2480,7 +2480,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
return 0;
err_thread:
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
while (i--) {
@@ -2587,7 +2586,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
int i, port;
if (mlx4_is_master(dev)) {
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
for (i = 0; i < dev->num_slaves; i++) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
@@ -3009,7 +3007,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
return -EPERM;
}
- s_info->mac = mlx4_mac_to_u64(mac);
+ s_info->mac = ether_addr_to_u64(mac);
mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
@@ -3195,7 +3193,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
- mlx4_u64_to_mac(mac, s_info->mac);
+ u64_to_ether_addr(s_info->mac, mac);
if (setting && !is_valid_ether_addr(mac)) {
mlx4_info(dev, "Illegal MAC with spoofchk\n");
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index f7053a74e6a8..4d4f9cf9facb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
buf += PAGE_SIZE;
}
} else {
- err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
+ err = copy_to_user((void __user *)buf, init_ents,
+ array_size(entries, cqe_size)) ?
-EFAULT : 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ef518b1040f7..066d79e4ecfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -39,6 +39,7 @@
#include <linux/in.h>
#include <net/ip.h>
#include <linux/bitmap.h>
+#include <linux/mii.h>
#include "mlx4_en.h"
#include "en_port.h"
@@ -197,6 +198,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* xdp statistics */
"rx_xdp_drop",
+ "rx_xdp_redirect",
+ "rx_xdp_redirect_fail",
"rx_xdp_tx",
"rx_xdp_tx_full",
@@ -428,6 +431,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
data[index++] = priv->rx_ring[i]->xdp_drop;
+ data[index++] = priv->rx_ring[i]->xdp_redirect;
+ data[index++] = priv->rx_ring[i]->xdp_redirect_fail;
data[index++] = priv->rx_ring[i]->xdp_tx;
data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
@@ -520,6 +525,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_drop", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect_fail", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx_full", i);
@@ -643,10 +652,8 @@ static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
unsigned int i; \
cfg = &ptys2ethtool_map[reg_]; \
cfg->speed = speed_; \
- bitmap_zero(cfg->supported, \
- __ETHTOOL_LINK_MODE_MASK_NBITS); \
- bitmap_zero(cfg->advertised, \
- __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ linkmode_zero(cfg->supported); \
+ linkmode_zero(cfg->advertised); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
__set_bit(modes[i], cfg->supported); \
__set_bit(modes[i], cfg->advertised); \
@@ -702,10 +709,8 @@ static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
int i;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (eth_proto & MLX4_PROT_MASK(i))
- bitmap_or(link_modes, link_modes,
- ptys2ethtool_link_mode(&ptys2ethtool_map[i],
- report),
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_or(link_modes, link_modes,
+ ptys2ethtool_link_mode(&ptys2ethtool_map[i], report));
}
}
@@ -716,11 +721,9 @@ static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
- if (bitmap_intersects(
- ptys2ethtool_link_mode(&ptys2ethtool_map[i],
- report),
- link_modes,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
+ ulong *map_mode = ptys2ethtool_link_mode(&ptys2ethtool_map[i],
+ report);
+ if (linkmode_intersects(map_mode, link_modes))
ptys_modes |= 1 << i;
}
return ptys_modes;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 109472d6b61f..f1259bdb1a29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -237,7 +237,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
- flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8af7f2827322..3f6d5c384637 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -527,18 +527,17 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
return err;
}
-static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac)
{
- int i;
- for (i = ETH_ALEN - 1; i >= 0; --i) {
- dst_mac[i] = src_mac & 0xff;
- src_mac >>= 8;
- }
- memset(&dst_mac[ETH_ALEN], 0, 2);
+ u8 addr[ETH_ALEN];
+
+ u64_to_ether_addr(src_mac, addr);
+ eth_hw_addr_set(dev, addr);
}
-static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv,
+ const unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
@@ -559,7 +558,7 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
- unsigned char *mac, int *qpn, u64 *reg_id)
+ const unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -611,7 +610,8 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
- unsigned char *mac, int qpn, u64 reg_id)
+ const unsigned char *mac,
+ int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -644,7 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -683,7 +683,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -701,14 +701,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
+ u64 new_mac_u64 = ether_addr_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = ether_addr_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -797,7 +797,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (err)
goto out;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
mlx4_en_update_user_mac(priv, new_mac);
out:
mutex_unlock(&mdev->state_lock);
@@ -1076,7 +1076,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_mac_to_u64(mclist->addr);
+ mcast_addr = ether_addr_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1169,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1212,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_mac_to_u64(ha->addr);
+ mac = ether_addr_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -1348,7 +1348,7 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -3267,7 +3267,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Set default MAC */
dev->addr_len = ETH_ALEN;
- mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0158b88bea5b..532997eba698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete = 0;
priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
+ priv->xdp_stats.rx_xdp_redirect = 0;
+ priv->xdp_stats.rx_xdp_redirect_fail = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
+ priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect);
+ priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7f6d3b82c29b..650e6a1844ae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct bpf_prog *xdp_prog;
int cq_ring = cq->ring;
bool doorbell_pending;
+ bool xdp_redir_flush;
struct mlx4_cqe *cqe;
struct xdp_buff xdp;
int polled = 0;
@@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
+ xdp_redir_flush = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
switch (act) {
case XDP_PASS:
break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ ring->xdp_redirect++;
+ xdp_redir_flush = true;
+ frags[0].page = NULL;
+ goto next;
+ }
+ ring->xdp_redirect_fail++;
+ trace_xdp_exception(dev, xdp_prog, act);
+ goto xdp_drop_no_cnt;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
@@ -897,6 +909,9 @@ next:
break;
}
+ if (xdp_redir_flush)
+ xdp_do_flush();
+
if (likely(polled)) {
if (doorbell_pending) {
priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c56b9dba4c71..817f4154b86d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -130,6 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = !!(priv->pflags &
MLX4_EN_PRIV_FLAGS_BLUEFLAME);
}
+ ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
@@ -753,8 +754,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- (__force u32)ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ (__force u32)ring->doorbell_qpn, ring->doorbell_address);
}
static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dc4ac1a2b6b6..42c96c9d7fb1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -3105,7 +3105,7 @@ void mlx4_replace_zero_macs(struct mlx4_dev *dev)
dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
eth_random_addr(mac_addr);
dev->port_random_macs |= 1 << i;
- dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr);
}
}
EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5a6b0fcaf7f8..b187c210d4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4015,9 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_register(devlink);
- if (ret)
- goto err_persist_free;
ret = devlink_params_register(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
if (ret)
@@ -4027,17 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_params_unregister;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
pci_save_state(pdev);
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_params_unregister:
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
- devlink_unregister(devlink);
-err_persist_free:
kfree(dev->persist);
err_devlink_free:
devlink_free(devlink);
@@ -4140,7 +4135,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4176,7 +4171,6 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_pci_disable_device(dev);
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
- devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f1b4ad9c66d2..f1716a83a4d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1046,7 +1046,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6bf558c5ec10..e132ff4c82f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -283,6 +283,7 @@ struct mlx4_en_tx_ring {
struct mlx4_bf bf;
/* Following part should be mostly read */
+ void __iomem *doorbell_address;
__be32 doorbell_qpn;
__be32 mr_key;
u32 size; /* number of TXBBs */
@@ -340,6 +341,8 @@ struct mlx4_en_rx_ring {
unsigned long csum_complete;
unsigned long rx_alloc_pages;
unsigned long xdp_drop;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_fail;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 7b51ae8cf759..e9cd4bb6f83d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -42,9 +42,11 @@ struct mlx4_en_port_stats {
struct mlx4_en_xdp_stats {
unsigned long rx_xdp_drop;
+ unsigned long rx_xdp_redirect;
+ unsigned long rx_xdp_redirect_fail;
unsigned long rx_xdp_tx;
unsigned long rx_xdp_tx_full;
-#define NUM_XDP_STATS 3
+#define NUM_XDP_STATS 5
};
struct mlx4_en_phy_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 63032cd6efb1..e63bb9ceb9c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o
+ fw_reset.o qos.o lib/tout.o
#
# Netdev basic
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
@@ -45,7 +45,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o
+ en/tc/post_act.o en/tc/int_port.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index db5dfff585c9..f71ec4d9d68e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -45,6 +45,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
CMD_IF_REV = 5,
@@ -225,9 +226,13 @@ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
{
- unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
+ u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
+ unsigned long poll_end;
u8 own;
+ poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
+
do {
own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
@@ -925,15 +930,18 @@ static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
- struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
- unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
+ struct mlx5_core_dev *dev;
+ unsigned long cb_timeout;
struct semaphore *sem;
unsigned long flags;
- bool poll_cmd = ent->polling;
int alloc_ret;
int cmd_mode;
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+
complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -1073,7 +1081,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
struct mlx5_cmd *cmd = &dev->cmd;
int err;
@@ -2058,7 +2066,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
+ cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
if (!cmd->stats)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index e8093c4e09d4..a8b84d53dfb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -33,6 +33,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
/* intf dev list mutex */
@@ -537,6 +538,16 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
+static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+{
+ u64 fsystem_guid, psystem_guid;
+
+ fsystem_guid = mlx5_query_nic_system_image_guid(dev);
+ psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
+
+ return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
+}
+
static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
{
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
@@ -556,7 +567,8 @@ static int next_phys_dev(struct device *dev, const void *data)
if (mdev == curr)
return 0;
- if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
+ if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
+ mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
return 0;
return 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index dcf9f27ba2ef..1c98652b244a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
@@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (pci_num_vf(pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
+ }
+
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
mlx5_unload_one(dev);
@@ -449,7 +454,8 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
bool new_state = val.vbool;
- if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ if (new_state && !MLX5_CAP_GEN(dev, roce) &&
+ !MLX5_CAP_GEN(dev, roce_rw_supported)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
@@ -625,7 +631,6 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
value);
- devlink_param_publish(devlink, &enable_eth_param);
return 0;
}
@@ -636,7 +641,6 @@ static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
if (!mlx5_eth_supported(dev))
return;
- devlink_param_unpublish(devlink, &enable_eth_param);
devlink_param_unregister(devlink, &enable_eth_param);
}
@@ -672,7 +676,6 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
value);
- devlink_param_publish(devlink, &enable_rdma_param);
return 0;
}
@@ -681,7 +684,6 @@ static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
- devlink_param_unpublish(devlink, &enable_rdma_param);
devlink_param_unregister(devlink, &enable_rdma_param);
}
@@ -706,7 +708,6 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
value);
- devlink_param_publish(devlink, &enable_rdma_param);
return 0;
}
@@ -717,7 +718,6 @@ static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
if (!mlx5_vnet_supported(dev))
return;
- devlink_param_unpublish(devlink, &enable_vnet_param);
devlink_param_unregister(devlink, &enable_vnet_param);
}
@@ -797,18 +797,15 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink)
int mlx5_devlink_register(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = devlink_register(devlink);
- if (err)
- return err;
-
err = devlink_params_register(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
if (err)
- goto params_reg_err;
+ return err;
+
mlx5_devlink_set_params_init_values(devlink);
- devlink_params_publish(devlink);
err = mlx5_devlink_auxdev_params_register(devlink);
if (err)
@@ -818,6 +815,9 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto traps_reg_err;
+ if (!mlx5_core_is_mp_slave(dev))
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+
return 0;
traps_reg_err:
@@ -825,8 +825,6 @@ traps_reg_err:
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
-params_reg_err:
- devlink_unregister(devlink);
return err;
}
@@ -834,8 +832,6 @@ void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
- devlink_params_unpublish(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
- devlink_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 87d65f6b5310..7841ef6c193c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -235,6 +235,9 @@ const char *parse_fs_dst(struct trace_seq *p,
const char *ret = trace_seq_buffer_ptr(p);
switch (dst->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ trace_seq_printf(p, "uplink\n");
+ break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f9cf9fb31547..eae9aa9c0811 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -745,7 +745,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
ilog2(TRACER_BUFFER_PAGE_NUM));
- MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CONF, 0, 1);
@@ -1028,7 +1028,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err_notifier_unregister:
mlx5_eq_notifier_unregister(dev, &tracer->nb);
- mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(dev, tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
@@ -1051,7 +1051,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
- mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
}
@@ -1069,7 +1069,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
- flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 97252a85d65e..4762b55b0b0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -89,7 +89,7 @@ struct mlx5_fw_tracer {
void *log_buf;
dma_addr_t dma;
u32 size;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
u32 consumer_index;
} buff;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index ed4fb79b4db7..538adab6878b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -30,7 +30,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@@ -89,7 +89,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump
return -ENOMEM;
in_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
- MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey.key);
+ MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey);
MLX5_SET64(resource_dump, cmd->cmd, address, dma);
err = mlx5_core_access_reg(dev, cmd->cmd, sizeof(cmd->cmd), cmd->cmd,
@@ -202,7 +202,7 @@ free_page:
}
static int mlx5_rsc_dump_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@@ -276,7 +276,7 @@ int mlx5_rsc_dump_init(struct mlx5_core_dev *dev)
return err;
destroy_mkey:
- mlx5_core_destroy_mkey(dev, &rsc_dump->mkey);
+ mlx5_core_destroy_mkey(dev, rsc_dump->mkey);
free_pd:
mlx5_core_dealloc_pd(dev, rsc_dump->pdn);
return err;
@@ -287,6 +287,6 @@ void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev)
if (IS_ERR_OR_NULL(dev->rsc_dump))
return;
- mlx5_core_destroy_mkey(dev, &dev->rsc_dump->mkey);
+ mlx5_core_destroy_mkey(dev, dev->rsc_dump->mkey);
mlx5_core_dealloc_pd(dev, dev->rsc_dump->pdn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 03a7a4ce5cd5..f0ac6b0d9653 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -79,6 +79,11 @@ struct page_pool;
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
+#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
+#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -152,6 +157,25 @@ struct page_pool;
#define MLX5E_UMR_WQEBBS \
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
+#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_klm) * (sgl_len)))
+
+#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
+ (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
+ (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
+
+#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
+
+#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+
+#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
#define mlx5e_dbg(mlevel, priv, format, ...) \
@@ -217,11 +241,12 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
- struct mlx5_mtt inline_mtts[0];
+ union {
+ struct mlx5_mtt inline_mtts[0];
+ struct mlx5_klm inline_klms[0];
+ };
};
-extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
@@ -244,6 +269,21 @@ enum mlx5e_priv_flag {
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
+enum packet_merge {
+ MLX5E_PACKET_MERGE_NONE,
+ MLX5E_PACKET_MERGE_LRO,
+ MLX5E_PACKET_MERGE_SHAMPO,
+};
+
+struct mlx5e_packet_merge_param {
+ enum packet_merge type;
+ u32 timeout;
+ struct {
+ u8 match_criteria_type;
+ u8 alignment_granularity;
+ } shampo;
+};
+
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -253,18 +293,20 @@ struct mlx5e_params {
u16 mode;
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ struct {
+ struct mlx5e_mqprio_rl *rl;
+ } channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
- bool lro_en;
+ struct mlx5e_packet_merge_param packet_merge;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
- u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
@@ -286,7 +328,8 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
- MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
+ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
struct mlx5e_cq {
@@ -577,6 +620,7 @@ typedef struct sk_buff *
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
@@ -598,6 +642,25 @@ struct mlx5e_rq_frags_info {
u8 wqe_bulk;
};
+struct mlx5e_shampo_hd {
+ u32 mkey;
+ struct mlx5e_dma_info *info;
+ struct page *last_page;
+ u16 hd_per_wq;
+ u16 hd_per_wqe;
+ unsigned long *bitmap;
+ u16 pi;
+ u16 ci;
+ __be32 key;
+ u64 last_addr;
+};
+
+struct mlx5e_hw_gro_data {
+ struct sk_buff *skb;
+ struct flow_keys fk;
+ int second_ip_id;
+};
+
struct mlx5e_rq {
/* data path */
union {
@@ -619,6 +682,7 @@ struct mlx5e_rq {
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
struct {
@@ -638,6 +702,8 @@ struct mlx5e_rq {
struct mlx5e_icosq *icosq;
struct mlx5e_priv *priv;
+ struct mlx5e_hw_gro_data *hw_gro_data;
+
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
mlx5e_fp_dealloc_wqe dealloc_wqe;
@@ -665,7 +731,7 @@ struct mlx5e_rq {
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey umr_mkey;
+ u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@ -879,11 +945,13 @@ struct mlx5e_priv {
#endif
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb;
+ struct mlx5e_mqprio_rl *mqprio_rl;
};
struct mlx5e_rx_handlers {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo;
};
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
@@ -913,11 +981,13 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
@@ -1003,7 +1073,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 86e079310ac3..ae52e7f38306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -24,7 +24,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+ attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 41684a6c44e9..678ffbb48a25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -125,15 +125,15 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
@@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+int mlx5e_fs_init(struct mlx5e_priv *priv);
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 018262d0164b..d5b7110a4265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -32,7 +32,6 @@ void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
-#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3cbb596821e8..f8c29022dbb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -87,7 +87,8 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
mlx5e_rx_get_linear_frag_sz(params, NULL));
- return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
+ return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
+ linear_frag_sz <= PAGE_SIZE;
}
bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
@@ -138,6 +139,27 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
+u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
+}
+
+u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
+}
+
+u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+ PAGE_SIZE;
+
+ return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+}
+
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -164,19 +186,8 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
mlx5e_rx_is_linear_skb(params, xsk) :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
- return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
-}
-
-struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
-{
- struct mlx5e_lro_param lro_param;
-
- lro_param = (struct mlx5e_lro_param) {
- .enabled = params->lro_en,
- .timeout = params->lro_timeout,
- };
-
- return lro_param;
+ return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
+ mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -453,6 +464,23 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
+static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ int wqe_size = BIT(log_stride_sz) * num_strides;
+
+ /* +1 is for the case that the pkt_per_rsrv dont consume the reservation
+ * so we get a filler cqe for the rest of the reservation.
+ */
+ return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
+}
+
static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -464,9 +492,12 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
+ else
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -485,10 +516,11 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
+ bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
- return ro && params->lro_en ?
+ return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
}
@@ -520,6 +552,22 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ mlx5e_shampo_get_log_rsrv_size(mdev, params));
+ MLX5_SET(wq, wq,
+ log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ mlx5e_shampo_get_log_hd_entry_size(mdev, params));
+ MLX5_SET(rqc, rqc, reservation_timeout,
+ params->packet_merge.timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ params->packet_merge.shampo.match_criteria_type);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ params->packet_merge.shampo.alignment_granularity);
+ }
break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -620,17 +668,80 @@ static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
return MLX5_GET(wq, wq, log_wq_sz);
}
-static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+/* This function calculates the maximum number of headers entries that are needed
+ * per WQE, the formula is based on the size of the reservations and the
+ * restriction we have about max packets for reservation that is equal to max
+ * headers per reservation.
+ */
+u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
+ int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+ int wqe_size = BIT(log_stride_sz) * num_strides;
+ u32 hd_per_wqe;
+
+ /* Assumption: hd_per_wqe % 8 == 0. */
+ hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
+ mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
+ __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
+ return hd_per_wqe;
+}
+
+/* This function calculates the maximum number of headers entries that are needed
+ * for the WQ, this value is uesed to allocate the header buffer in HW, thus
+ * must be a pow of 2.
+ */
+u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
+ int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ u32 hd_per_wqe, hd_per_wq;
+
+ hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
+ hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
+ return hd_per_wq;
+}
+
+static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
+ void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
+ int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ u32 wqebbs;
+
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
+ max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
+ rest = max_hd_per_wqe % max_klm_per_umr;
+ wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
+ if (rest)
+ wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
+ wqebbs *= wq_size;
+ return wqebbs;
+}
+
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_rq_param *rqp)
{
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
- order_base_2(MLX5E_UMR_WQEBBS) +
- mlx5e_get_rq_log_wq_sz(rqp->rqc));
- default: /* MLX5_WQ_TYPE_CYCLIC */
+ u32 wqebbs;
+
+ /* MLX5_WQ_TYPE_CYCLIC */
+ if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- }
+
+ wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+ return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
@@ -697,7 +808,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
if (err)
return err;
- icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+ icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 879ad46d754e..433e6967692d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -11,11 +11,6 @@ struct mlx5e_xsk_param {
u16 chunk_size;
};
-struct mlx5e_lro_param {
- bool enabled;
- u32 timeout;
-};
-
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
@@ -116,6 +111,18 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param);
+u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
@@ -125,7 +132,6 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
/* Build queue parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 3a86f66d1295..18d542b1c5cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -682,7 +682,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->tstamp = &priv->tstamp;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e8a8d78e3e4d..50977f01a050 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -7,6 +7,21 @@
#define BYTES_IN_MBIT 125000
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ if (nbytes < BYTES_IN_MBIT) {
+ qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
+ nbytes, BYTES_IN_MBIT);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ return div_u64(nbytes, BYTES_IN_MBIT);
+}
+
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
@@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id, node->qid);
+ &param_sq, sq, 0, node->hw_id,
+ priv->htb.qos_sq_stats[node->qid]);
if (err)
goto err_close_cq;
@@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce
return err;
}
+
+struct mlx5e_mqprio_rl {
+ struct mlx5_core_dev *mdev;
+ u32 root_id;
+ u32 *leaves_id;
+ u8 num_tc;
+};
+
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
+}
+
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
+{
+ kvfree(rl);
+}
+
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[])
+{
+ int err;
+ int tc;
+
+ if (!mlx5_qos_is_supported(mdev)) {
+ qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+ if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
+ return -EINVAL;
+
+ rl->mdev = mdev;
+ rl->num_tc = num_tc;
+ rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
+ if (!rl->leaves_id)
+ return -ENOMEM;
+
+ err = mlx5_qos_create_root_node(mdev, &rl->root_id);
+ if (err)
+ goto err_free_leaves;
+
+ qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
+
+ for (tc = 0; tc < num_tc; tc++) {
+ u32 max_average_bw;
+
+ max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
+ err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
+ &rl->leaves_id[tc]);
+ if (err)
+ goto err_destroy_leaves;
+
+ qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
+ tc, rl->leaves_id[tc], max_average_bw);
+ }
+ return 0;
+
+err_destroy_leaves:
+ while (--tc >= 0)
+ mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(mdev, rl->root_id);
+err_free_leaves:
+ kvfree(rl->leaves_id);
+ return err;
+}
+
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < rl->num_tc; tc++)
+ mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(rl->mdev, rl->root_id);
+ kvfree(rl->leaves_id);
+}
+
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
+{
+ if (tc >= rl->num_tc)
+ return -EINVAL;
+
+ *hw_id = rl->leaves_id[tc];
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 757682b7c0e0..b7558907ba20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -12,6 +12,7 @@ struct mlx5e_priv;
struct mlx5e_channels;
struct mlx5e_channel;
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
+/* MQPRIO TX rate limit */
+struct mlx5e_mqprio_rl;
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void);
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[]);
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index de03684528bb..fcb0892c08a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -3,6 +3,7 @@
#include <net/dst_metadata.h>
#include <linux/netdevice.h>
+#include <linux/if_macvlan.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
@@ -18,10 +19,13 @@
#include "en/tc_tun.h"
#include "lib/port_tun.h"
#include "en/tc/sample.h"
+#include "en_accel/ipsec_rxtx.h"
+#include "en/tc/int_port.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
struct mlx5e_rep_priv *rpriv;
+ enum flow_block_binder_type binder_type;
struct list_head list;
};
@@ -296,14 +300,16 @@ int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
static struct mlx5e_rep_indr_block_priv *
mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
+ struct net_device *netdev,
+ enum flow_block_binder_type binder_type)
{
struct mlx5e_rep_indr_block_priv *cb_priv;
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
- if (cb_priv->netdev == netdev)
+ if (cb_priv->netdev == netdev &&
+ cb_priv->binder_type == binder_type)
return cb_priv;
return NULL;
@@ -341,9 +347,13 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
void *type_data, void *indr_priv)
{
- unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
+ unsigned long flags = MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+ flags |= (priv->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) ?
+ MLX5_TC_FLAG(EGRESS) :
+ MLX5_TC_FLAG(INGRESS);
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
@@ -409,6 +419,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
+static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PASSTHRU;
+}
+
static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct mlx5e_rep_priv *rpriv,
@@ -418,14 +435,30 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool is_ovs_int_port = netif_is_ovs_master(netdev);
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
+ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) &&
+ !is_ovs_int_port) {
+ if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
+ return -EOPNOTSUPP;
+ if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
+ netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw))
return -EOPNOTSUPP;
f->unlocked_driver_cb = true;
@@ -433,7 +466,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
switch (f->command) {
case FLOW_BLOCK_BIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
if (indr_priv)
return -EEXIST;
@@ -443,6 +476,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
indr_priv->netdev = netdev;
indr_priv->rpriv = rpriv;
+ indr_priv->binder_type = f->binder_type;
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
@@ -460,7 +494,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
case FLOW_BLOCK_UNBIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
if (!indr_priv)
return -ENOENT;
@@ -597,8 +631,8 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
return false;
}
- /* Set tun_dev so we do dev_put() after datapath */
- tc_priv->tun_dev = dev;
+ /* Set fwd_dev so we do dev_put() after datapath */
+ tc_priv->fwd_dev = dev;
skb->dev = dev;
@@ -638,6 +672,12 @@ static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
}
+static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->fwd_dev)
+ dev_put(tc_priv->fwd_dev);
+}
+
static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_mapped_obj *mapped_obj,
struct mlx5e_tc_update_priv *tc_priv)
@@ -647,25 +687,54 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk
"Failed to restore tunnel info for sampled packet\n");
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_skb(skb, mapped_obj);
-#endif /* CONFIG_MLX5_TC_SAMPLE */
mlx5_rep_tc_post_napi_receive(tc_priv);
}
-bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv)
+static bool mlx5e_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv,
+ bool *forward_tx,
+ u32 reg_c1)
+{
+ u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ /* Tunnel restore takes precedence over int port restore */
+ if (tunnel_id)
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
+ mapped_obj->int_port_metadata, forward_tx)) {
+ /* Set fwd_dev for future dev_put */
+ tc_priv->fwd_dev = skb->dev;
+
+ return true;
+ }
+
+ return false;
+}
+
+void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct sk_buff *skb)
{
+ u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_mapped_obj mapped_obj;
struct mlx5_eswitch *esw;
+ bool forward_tx = false;
struct mlx5e_priv *priv;
u32 reg_c0;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
- return true;
+ goto forward;
/* If reg_c0 is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
@@ -679,26 +748,35 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
netdev_dbg(priv->netdev,
"Couldn't find mapped object for reg_c0: %d, err: %d\n",
reg_c0, err);
- return false;
+ goto free_skb;
}
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
-
- return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv);
+ if (!mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, &tc_priv) &&
+ !mlx5_ipsec_is_rx_flow(cqe))
+ goto free_skb;
} else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
- mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
- return false;
+ mlx5e_restore_skb_sample(priv, skb, &mapped_obj, &tc_priv);
+ goto free_skb;
+ } else if (mapped_obj.type == MLX5_MAPPED_OBJ_INT_PORT_METADATA) {
+ if (!mlx5e_restore_skb_int_port(priv, skb, &mapped_obj, &tc_priv,
+ &forward_tx, reg_c1))
+ goto free_skb;
} else {
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
- return false;
+ goto free_skb;
}
- return true;
-}
+forward:
+ if (forward_tx)
+ dev_queue_xmit(skb);
+ else
+ napi_gro_receive(rq->cq.napi, skb);
-void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->tun_dev)
- dev_put(tc_priv->tun_dev);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
+
+ return;
+
+free_skb:
+ dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index d0661578467b..d6c7c81690eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -36,10 +36,8 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
-bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv);
-void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
@@ -66,13 +64,9 @@ static inline int
mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) { return -EOPNOTSUPP; }
-struct mlx5e_tc_update_priv;
-static inline bool
-mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv) { return true; }
static inline void
-mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {}
+mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct sk_buff *skb) {}
#endif /* CONFIG_MLX5_CLS_ACT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 0eb125316fe2..74086eb556ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -6,6 +6,7 @@
#include "txrx.h"
#include "devlink.h"
#include "ptp.h"
+#include "lib/tout.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
@@ -32,8 +33,10 @@ out:
static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = icosq->channel->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (icosq->cc == icosq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bb682fd751c9..4f4bc8726ec4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -4,11 +4,14 @@
#include "health.h"
#include "en/ptp.h"
#include "en/devlink.h"
+#include "lib/tout.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = sq->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (sq->cc == sq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 625cd49ef96c..c1cdd8c2e37a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -127,7 +127,7 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
struct mlx5e_rss_params_traffic_type rss_tt;
@@ -161,7 +161,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
rqtn, rss->inner_ft_support);
- mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
@@ -198,14 +198,14 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
}
static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -297,7 +297,7 @@ int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_lro_param *init_lro_param)
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param)
{
int err;
@@ -305,12 +305,12 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
if (err)
goto err_out;
- err = mlx5e_rss_create_tirs(rss, init_lro_param, false);
+ err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err)
goto err_destroy_rqt;
if (inner_ft_support) {
- err = mlx5e_rss_create_tirs(rss, init_lro_param, true);
+ err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
if (err)
goto err_destroy_tirs;
}
@@ -372,7 +372,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
*/
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -381,7 +381,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
- err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
{
int err;
@@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
+ return err;
}
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
@@ -418,7 +419,8 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
}
-int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param)
+int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
+ struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
enum mlx5_traffic_types tt;
@@ -428,7 +430,7 @@ int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_p
if (!builder)
return -ENOMEM;
- mlx5e_tir_builder_build_lro(builder, lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -490,6 +492,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
{
bool changed_indir = false;
bool changed_hash = false;
+ struct mlx5e_rss *old_rss;
+ int err = 0;
+
+ old_rss = mlx5e_rss_alloc();
+ if (!old_rss)
+ return -ENOMEM;
+
+ *old_rss = *rss;
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
@@ -497,7 +507,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
case ETH_RSS_HASH_TOP:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
changed_hash = true;
changed_indir = true;
@@ -520,13 +531,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.table[i] = indir[i];
}
- if (changed_indir && rss->enabled)
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (changed_indir && rss->enabled) {
+ err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (err) {
+ *rss = *old_rss;
+ goto out;
+ }
+ }
if (changed_hash)
mlx5e_rss_update_tirs(rss);
- return 0;
+out:
+ mlx5e_rss_free(old_rss);
+ return err;
}
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d522a10dadf3..c6b216416344 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -17,7 +17,7 @@ struct mlx5e_rss *mlx5e_rss_alloc(void);
void mlx5e_rss_free(struct mlx5e_rss *rss);
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_lro_param *init_lro_param);
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param);
int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
@@ -30,13 +30,14 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
-int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param);
+int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
+ struct mlx5e_packet_merge_param *pkt_merge_param);
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 13056cb9757d..142953847996 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -34,7 +34,7 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
@@ -49,7 +49,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return -ENOMEM;
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- init_lro_param);
+ init_pkt_merge_param);
if (err)
goto err_rss_free;
@@ -275,7 +275,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
}
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
- const struct mlx5e_lro_param *init_lro_param)
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
@@ -306,7 +306,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -336,7 +336,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
@@ -437,7 +437,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
int err;
@@ -447,11 +447,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
- err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch);
+ err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
- err = mlx5e_rx_res_channels_init(res, init_lro_param);
+ err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
if (err)
goto err_rss_destroy;
@@ -645,7 +645,8 @@ int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
return err;
}
-int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
+int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
+ struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
int err, final_err;
@@ -655,7 +656,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
if (!builder)
return -ENOMEM;
- mlx5e_tir_builder_build_lro(builder, lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -665,7 +666,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
if (!rss)
continue;
- err = mlx5e_rss_lro_set_param(rss, lro_param);
+ err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
if (err)
final_err = final_err ? : err;
}
@@ -673,7 +674,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
if (err) {
- mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
+ mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
if (!final_err)
final_err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 4a15942d79f7..d09f7d174a51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -25,7 +25,7 @@ enum mlx5e_rx_res_features {
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
@@ -57,7 +57,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
u8 rx_hash_fields);
-int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
+int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
+ struct mlx5e_packet_merge_param *pkt_merge_param);
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
new file mode 100644
index 000000000000..ca834bbcb44f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/fs.h>
+#include "en/mapping.h"
+#include "en/tc/int_port.h"
+#include "en.h"
+#include "en_rep.h"
+#include "en_tc.h"
+
+struct mlx5e_tc_int_port {
+ enum mlx5e_tc_int_port_type type;
+ int ifindex;
+ u32 match_metadata;
+ u32 mapping;
+ struct list_head list;
+ struct mlx5_flow_handle *rx_rule;
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+};
+
+struct mlx5e_tc_int_port_priv {
+ struct mlx5_core_dev *dev;
+ struct mutex int_ports_lock; /* Protects int ports list */
+ struct list_head int_ports; /* Uses int_ports_lock */
+ u16 num_ports;
+ bool ul_rep_rx_ready; /* Set when uplink is performing teardown */
+ struct mapping_ctx *metadata_mapping; /* Metadata for source port rewrite and matching */
+};
+
+bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw)
+{
+ return mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_GEN(esw->dev, reg_c_preserve);
+}
+
+u32 mlx5e_tc_int_port_get_metadata(struct mlx5e_tc_int_port *int_port)
+{
+ return int_port->match_metadata;
+}
+
+int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port)
+{
+ /* For egress forwarding we can have the case
+ * where the packet came from a vport and redirected
+ * to int port or it came from the uplink, going
+ * via internal port and hairpinned back to uplink
+ * so we set the source to any port in this case.
+ */
+ return int_port->type == MLX5E_TC_INT_PORT_EGRESS ?
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT :
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
+
+u32 mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port)
+{
+ return int_port->match_metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+static struct mlx5_flow_handle *
+mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_int_port *int_port,
+ struct mlx5_flow_destination *dest)
+
+{
+ struct mlx5_flow_context *flow_context;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5e_tc_int_port_get_metadata_for_match(int_port));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Overwrite flow tag with the int port metadata mapping
+ * instead of the chain mapping.
+ */
+ flow_context = &spec->flow_context;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ flow_context->flow_tag = int_port->mapping;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
+ &flow_act, dest, 1);
+ if (IS_ERR(flow_rule))
+ mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
+ PTR_ERR(flow_rule));
+
+ kvfree(spec);
+
+ return flow_rule;
+}
+
+static struct mlx5e_tc_int_port *
+mlx5e_int_port_lookup(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type)
+{
+ struct mlx5e_tc_int_port *int_port;
+
+ if (!priv->ul_rep_rx_ready)
+ goto not_found;
+
+ list_for_each_entry(int_port, &priv->int_ports, list)
+ if (int_port->ifindex == ifindex && int_port->type == type) {
+ refcount_inc(&int_port->refcnt);
+ return int_port;
+ }
+
+not_found:
+ return NULL;
+}
+
+static int mlx5e_int_port_metadata_alloc(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex, enum mlx5e_tc_int_port_type type,
+ u32 *id)
+{
+ u32 mapped_key[2] = {type, ifindex};
+ int err;
+
+ err = mapping_add(priv->metadata_mapping, mapped_key, id);
+ if (err)
+ return err;
+
+ /* Fill upper 4 bits of PFNUM with reserved value */
+ *id |= 0xf << ESW_VPORT_BITS;
+
+ return 0;
+}
+
+static void mlx5e_int_port_metadata_free(struct mlx5e_tc_int_port_priv *priv,
+ u32 id)
+{
+ id &= (1 << ESW_VPORT_BITS) - 1;
+ mapping_remove(priv->metadata_mapping, id);
+}
+
+/* Must be called with priv->int_ports_lock held */
+static struct mlx5e_tc_int_port *
+mlx5e_int_port_add(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type)
+{
+ struct mlx5_eswitch *esw = priv->dev->priv.eswitch;
+ struct mlx5_mapped_obj mapped_obj = {};
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_int_port *int_port;
+ struct mlx5_flow_destination dest;
+ struct mapping_ctx *ctx;
+ u32 match_metadata;
+ u32 mapping;
+ int err;
+
+ if (priv->num_ports == MLX5E_TC_MAX_INT_PORT_NUM) {
+ mlx5_core_dbg(priv->dev, "Cannot add a new int port, max supported %d",
+ MLX5E_TC_MAX_INT_PORT_NUM);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ int_port = kzalloc(sizeof(*int_port), GFP_KERNEL);
+ if (!int_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_int_port_metadata_alloc(priv, ifindex, type, &match_metadata);
+ if (err) {
+ mlx5_core_warn(esw->dev, "Cannot add a new internal port, metadata allocation failed for ifindex %d",
+ ifindex);
+ goto err_metadata;
+ }
+
+ /* map metadata to reg_c0 object for miss handling */
+ ctx = esw->offloads.reg_c0_obj_pool;
+ mapped_obj.type = MLX5_MAPPED_OBJ_INT_PORT_METADATA;
+ mapped_obj.int_port_metadata = match_metadata;
+ err = mapping_add(ctx, &mapped_obj, &mapping);
+ if (err)
+ goto err_map;
+
+ int_port->type = type;
+ int_port->ifindex = ifindex;
+ int_port->match_metadata = match_metadata;
+ int_port->mapping = mapping;
+
+ /* Create a match on internal vport metadata in vport table */
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = uplink_rpriv->root_ft;
+
+ int_port->rx_rule = mlx5e_int_port_create_rx_rule(esw, int_port, &dest);
+ if (IS_ERR(int_port->rx_rule)) {
+ err = PTR_ERR(int_port->rx_rule);
+ mlx5_core_warn(esw->dev, "Can't add internal port rx rule, err %d", err);
+ goto err_rx_rule;
+ }
+
+ refcount_set(&int_port->refcnt, 1);
+ list_add_rcu(&int_port->list, &priv->int_ports);
+ priv->num_ports++;
+
+ return int_port;
+
+err_rx_rule:
+ mapping_remove(ctx, int_port->mapping);
+
+err_map:
+ mlx5e_int_port_metadata_free(priv, match_metadata);
+
+err_metadata:
+ kfree(int_port);
+
+ return ERR_PTR(err);
+}
+
+/* Must be called with priv->int_ports_lock held */
+static void
+mlx5e_int_port_remove(struct mlx5e_tc_int_port_priv *priv,
+ struct mlx5e_tc_int_port *int_port)
+{
+ struct mlx5_eswitch *esw = priv->dev->priv.eswitch;
+ struct mapping_ctx *ctx;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+
+ list_del_rcu(&int_port->list);
+
+ /* The following parameters are not used by the
+ * rcu readers of this int_port object so it is
+ * safe to release them.
+ */
+ if (int_port->rx_rule)
+ mlx5_del_flow_rules(int_port->rx_rule);
+ mapping_remove(ctx, int_port->mapping);
+ mlx5e_int_port_metadata_free(priv, int_port->match_metadata);
+ kfree_rcu(int_port);
+ priv->num_ports--;
+}
+
+/* Must be called with rcu_read_lock held */
+static struct mlx5e_tc_int_port *
+mlx5e_int_port_get_from_metadata(struct mlx5e_tc_int_port_priv *priv,
+ u32 metadata)
+{
+ struct mlx5e_tc_int_port *int_port;
+
+ list_for_each_entry_rcu(int_port, &priv->int_ports, list)
+ if (int_port->match_metadata == metadata)
+ return int_port;
+
+ return NULL;
+}
+
+struct mlx5e_tc_int_port *
+mlx5e_tc_int_port_get(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type)
+{
+ struct mlx5e_tc_int_port *int_port;
+
+ if (!priv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&priv->int_ports_lock);
+
+ /* Reject request if ul rep not ready */
+ if (!priv->ul_rep_rx_ready) {
+ int_port = ERR_PTR(-EOPNOTSUPP);
+ goto done;
+ }
+
+ int_port = mlx5e_int_port_lookup(priv, ifindex, type);
+ if (int_port)
+ goto done;
+
+ /* Alloc and add new int port to list */
+ int_port = mlx5e_int_port_add(priv, ifindex, type);
+
+done:
+ mutex_unlock(&priv->int_ports_lock);
+
+ return int_port;
+}
+
+void
+mlx5e_tc_int_port_put(struct mlx5e_tc_int_port_priv *priv,
+ struct mlx5e_tc_int_port *int_port)
+{
+ if (!refcount_dec_and_mutex_lock(&int_port->refcnt, &priv->int_ports_lock))
+ return;
+
+ mlx5e_int_port_remove(priv, int_port);
+ mutex_unlock(&priv->int_ports_lock);
+}
+
+struct mlx5e_tc_int_port_priv *
+mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_int_port_priv *int_port_priv;
+ u64 mapping_id;
+
+ if (!mlx5e_tc_int_port_supported(esw))
+ return NULL;
+
+ int_port_priv = kzalloc(sizeof(*int_port_priv), GFP_KERNEL);
+ if (!int_port_priv)
+ return NULL;
+
+ mapping_id = mlx5_query_nic_system_image_guid(priv->mdev);
+
+ int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT,
+ sizeof(u32) * 2,
+ (1 << ESW_VPORT_BITS) - 1, true);
+ if (IS_ERR(int_port_priv->metadata_mapping)) {
+ mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n",
+ PTR_ERR(int_port_priv->metadata_mapping));
+ goto err_mapping;
+ }
+
+ int_port_priv->dev = priv->mdev;
+ mutex_init(&int_port_priv->int_ports_lock);
+ INIT_LIST_HEAD(&int_port_priv->int_ports);
+
+ return int_port_priv;
+
+err_mapping:
+ kfree(int_port_priv);
+
+ return NULL;
+}
+
+void
+mlx5e_tc_int_port_cleanup(struct mlx5e_tc_int_port_priv *priv)
+{
+ if (!priv)
+ return;
+
+ mutex_destroy(&priv->int_ports_lock);
+ mapping_destroy(priv->metadata_mapping);
+ kfree(priv);
+}
+
+/* Int port rx rules reside in ul rep rx tables.
+ * It is possible the ul rep will go down while there are
+ * still int port rules in its rx table so proper cleanup
+ * is required to free resources.
+ */
+void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_tc_int_port_priv *ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ ppriv = uplink_priv->int_port_priv;
+
+ if (!ppriv)
+ return;
+
+ mutex_lock(&ppriv->int_ports_lock);
+ ppriv->ul_rep_rx_ready = true;
+ mutex_unlock(&ppriv->int_ports_lock);
+}
+
+void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_tc_int_port_priv *ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_int_port *int_port;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ ppriv = uplink_priv->int_port_priv;
+
+ if (!ppriv)
+ return;
+
+ mutex_lock(&ppriv->int_ports_lock);
+
+ ppriv->ul_rep_rx_ready = false;
+
+ list_for_each_entry(int_port, &ppriv->int_ports, list) {
+ if (!IS_ERR_OR_NULL(int_port->rx_rule))
+ mlx5_del_flow_rules(int_port->rx_rule);
+
+ int_port->rx_rule = NULL;
+ }
+
+ mutex_unlock(&ppriv->int_ports_lock);
+}
+
+bool
+mlx5e_tc_int_port_dev_fwd(struct mlx5e_tc_int_port_priv *priv,
+ struct sk_buff *skb, u32 int_vport_metadata,
+ bool *forward_tx)
+{
+ enum mlx5e_tc_int_port_type fwd_type;
+ struct mlx5e_tc_int_port *int_port;
+ struct net_device *dev;
+ int ifindex;
+
+ if (!priv)
+ return false;
+
+ rcu_read_lock();
+ int_port = mlx5e_int_port_get_from_metadata(priv, int_vport_metadata);
+ if (!int_port) {
+ rcu_read_unlock();
+ mlx5_core_dbg(priv->dev, "Unable to find int port with metadata 0x%.8x\n",
+ int_vport_metadata);
+ return false;
+ }
+
+ ifindex = int_port->ifindex;
+ fwd_type = int_port->type;
+ rcu_read_unlock();
+
+ dev = dev_get_by_index(&init_net, ifindex);
+ if (!dev) {
+ mlx5_core_dbg(priv->dev,
+ "Couldn't find internal port device with ifindex: %d\n",
+ ifindex);
+ return false;
+ }
+
+ skb->skb_iif = dev->ifindex;
+ skb->dev = dev;
+
+ if (fwd_type == MLX5E_TC_INT_PORT_INGRESS) {
+ skb->pkt_type = PACKET_HOST;
+ skb_set_redirected(skb, true);
+ *forward_tx = false;
+ } else {
+ skb_reset_network_header(skb);
+ skb_push_rcsum(skb, skb->mac_len);
+ skb_set_redirected(skb, false);
+ *forward_tx = true;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h
new file mode 100644
index 000000000000..e72c79d308d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_INT_PORT_H__
+#define __MLX5_EN_TC_INT_PORT_H__
+
+#include "en.h"
+
+struct mlx5e_tc_int_port;
+struct mlx5e_tc_int_port_priv;
+
+enum mlx5e_tc_int_port_type {
+ MLX5E_TC_INT_PORT_INGRESS,
+ MLX5E_TC_INT_PORT_EGRESS,
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw);
+
+struct mlx5e_tc_int_port_priv *
+mlx5e_tc_int_port_init(struct mlx5e_priv *priv);
+void
+mlx5e_tc_int_port_cleanup(struct mlx5e_tc_int_port_priv *priv);
+
+void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv);
+void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv);
+
+bool
+mlx5e_tc_int_port_dev_fwd(struct mlx5e_tc_int_port_priv *priv,
+ struct sk_buff *skb, u32 int_vport_metadata,
+ bool *forward_tx);
+struct mlx5e_tc_int_port *
+mlx5e_tc_int_port_get(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type);
+void
+mlx5e_tc_int_port_put(struct mlx5e_tc_int_port_priv *priv,
+ struct mlx5e_tc_int_port *int_port);
+
+u32 mlx5e_tc_int_port_get_metadata(struct mlx5e_tc_int_port *int_port);
+u32 mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port);
+int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port);
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline u32
+mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port)
+{
+ return 0;
+}
+
+static inline int
+mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port)
+{
+ return 0;
+}
+
+static inline bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw)
+{
+ return false;
+}
+
+static inline void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv) {}
+static inline void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+#endif /* __MLX5_EN_TC_INT_PORT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index a3e43e898a56..31b4e39be2d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -4,6 +4,7 @@
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
+#include "fs_core.h"
struct mlx5e_post_act {
enum mlx5_flow_namespace_type ns_type;
@@ -28,16 +29,14 @@ struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
enum mlx5_flow_namespace_type ns_type)
{
+ enum fs_flow_table_type table_type = ns_type == MLX5_FLOW_NAMESPACE_FDB ?
+ FS_FT_FDB : FS_FT_NIC_RX;
struct mlx5e_post_act *post_act;
int err;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ignore_flow_level)) {
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
- err = -EOPNOTSUPP;
- goto err_check;
- } else if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
+ if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index 6552ecee3f9b..df6888c4793c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -509,13 +509,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
if (IS_ERR_OR_NULL(tc_psample))
return ERR_PTR(-EOPNOTSUPP);
- /* If slow path flag is set, eg. when the neigh is invalid for encap,
- * don't offload sample action.
- */
- esw = tc_psample->esw;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
- return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
-
sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
if (!sample_flow)
return ERR_PTR(-ENOMEM);
@@ -527,6 +520,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
* Only match the fte id instead of the same match in the
* original flow table.
*/
+ esw = tc_psample->esw;
if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
struct mlx5_flow_table *ft;
@@ -602,7 +596,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
}
sample_flow->pre_attr = pre_attr;
- return sample_flow->post_rule;
+ return sample_flow->pre_rule;
err_pre_offload_rule:
kfree(pre_attr);
@@ -613,7 +607,7 @@ err_sample_restore:
err_obj_id:
sampler_put(tc_psample, sample_flow->sampler);
err_sampler:
- if (!post_act_handle)
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
if (post_act_handle)
@@ -628,45 +622,26 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_sample_flow *sample_flow;
- struct mlx5_vport_tbl_attr tbl_attr;
struct mlx5_eswitch *esw;
if (IS_ERR_OR_NULL(tc_psample))
return;
- /* If slow path flag is set, sample action is not offloaded.
- * No need to delete sample rule.
- */
- esw = tc_psample->esw;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- return;
- }
-
/* The following delete order can't be changed, otherwise,
* will hit fw syndromes.
*/
+ esw = tc_psample->esw;
sample_flow = attr->sample_attr->sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
- if (!sample_flow->post_act_handle)
- mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule,
- sample_flow->post_attr);
sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle) {
+ if (sample_flow->post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- } else {
- tbl_attr.chain = attr->chain;
- tbl_attr.prio = attr->prio;
- tbl_attr.vport = esw_attr->in_rep->vport;
- tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
- mlx5_esw_vporttbl_put(esw, &tbl_attr);
- kfree(sample_flow->post_attr);
- }
+ else
+ del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
kfree(sample_flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index db0146df9b30..9ef8a49d7801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -19,6 +19,8 @@ struct mlx5e_sample_attr {
struct mlx5e_sample_flow *sample_flow;
};
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+
void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
struct mlx5_flow_handle *
@@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
+#else /* CONFIG_MLX5_TC_SAMPLE */
+
+static inline struct mlx5_flow_handle *
+mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ u32 tunnel_id)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr) {}
+
+static inline struct mlx5e_tc_psample *
+mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {}
+
+static inline void
+mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {}
+
+#endif /* CONFIG_MLX5_TC_SAMPLE */
#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 6c949abcd2e1..c1c6e74c79c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -889,7 +889,7 @@ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
- counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ counter->counter = mlx5_fc_create_ex(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
@@ -2039,25 +2039,36 @@ mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
static int
mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
- struct mlx5e_post_act *post_act,
- const char **err_msg)
+ struct mlx5e_post_act *post_act)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ const char *err_msg = NULL;
+ int err = 0;
#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
/* cannot restore chain ID on HW miss */
- *err_msg = "tc skb extension missing";
- return -EOPNOTSUPP;
+ err_msg = "tc skb extension missing";
+ err = -EOPNOTSUPP;
+ goto out_err;
#endif
if (IS_ERR_OR_NULL(post_act)) {
- *err_msg = "tc ct offload not supported, post action is missing";
- return -EOPNOTSUPP;
+ /* Ignore_flow_level support isn't supported by default for VFs and so post_act
+ * won't be supported. Skip showing error msg.
+ */
+ if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ err_msg = "post action is missing";
+ err = -EOPNOTSUPP;
+ goto out_err;
}
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
- return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
- return 0;
+ err = mlx5_tc_ct_init_check_esw_support(esw, &err_msg);
+
+out_err:
+ if (err && err_msg)
+ netdev_dbg(priv->netdev, "tc ct offload not supported, %s\n", err_msg);
+ return err;
}
#define INIT_ERR_PREFIX "tc ct offload init failed"
@@ -2070,16 +2081,13 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
{
struct mlx5_tc_ct_priv *ct_priv;
struct mlx5_core_dev *dev;
- const char *msg;
u64 mapping_id;
int err;
dev = priv->mdev;
- err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act, &msg);
- if (err) {
- mlx5_core_warn(dev, "tc ct offload not supported, %s\n", msg);
+ err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act);
+ if (err)
goto err_support;
- }
ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
if (!ct_priv)
@@ -2127,12 +2135,21 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
- rhashtable_init(&ct_priv->zone_ht, &zone_params);
- rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
- rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
+ if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
+ goto err_ct_zone_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params))
+ goto err_ct_tuples_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
+ goto err_ct_tuples_nat_ht;
return ct_priv;
+err_ct_tuples_nat_ht:
+ rhashtable_destroy(&ct_priv->ct_tuples_ht);
+err_ct_tuples_ht:
+ rhashtable_destroy(&ct_priv->zone_ht);
+err_ct_zone_ht:
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index d1599b7b944b..8f64f2c8895a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -173,4 +173,6 @@ void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
+struct mlx5e_tc_int_port_priv *
+mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b4e986818794..a5e450973225 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -10,6 +10,8 @@
#include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
struct mlx5e_tc_tun_route_attr {
struct net_device *out_dev;
@@ -81,7 +83,8 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
*/
*route_dev = dev;
if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
- dst_is_lag_dev || is_vlan_dev(*route_dev))
+ dst_is_lag_dev || is_vlan_dev(*route_dev) ||
+ netif_is_ovs_master(*route_dev))
*out_dev = uplink_dev;
else if (mlx5e_eswitch_rep(dev) &&
mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
@@ -118,6 +121,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
+ } else {
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
}
rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
@@ -435,12 +443,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_tc_tun_route_attr *attr)
{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
struct neighbour *n;
int ret;
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
@@ -700,6 +711,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_attr *flow_attr)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
+ struct mlx5e_tc_int_port *int_port;
TC_TUN_ROUTE_ATTR_INIT(attr);
u16 vport_num;
int err = 0;
@@ -724,17 +736,25 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
if (err)
return err;
- if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops ||
- !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev))
- goto out;
-
- err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num);
- if (err)
- goto out;
+ if (attr.route_dev->netdev_ops == &mlx5e_netdev_ops &&
+ mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev)) {
+ err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num);
+ if (err)
+ goto out;
- esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
- misc_parameters.vxlan_vni);
- esw_attr->rx_tun_attr->decap_vport = vport_num;
+ esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.vxlan_vni);
+ esw_attr->rx_tun_attr->decap_vport = vport_num;
+ } else if (netif_is_ovs_master(attr.route_dev)) {
+ int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
+ attr.route_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS);
+ if (IS_ERR(int_port)) {
+ err = PTR_ERR(int_port);
+ goto out;
+ }
+ esw_attr->int_port = int_port;
+ }
out:
if (flow_attr->tun_ip_version == 4)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 9350ca05ce65..aa092eaeaec3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel {
void *headers_v);
bool (*encap_info_equal)(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
+ int (*get_remote_ifindex)(struct net_device *mirred_dev);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 1c44c6c345f5..660cca73c36c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -13,6 +13,30 @@ enum {
MLX5E_ROUTE_ENTRY_VALID = BIT(0),
};
+static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_encap_entry *e,
+ int out_index)
+{
+ struct net_device *route_dev;
+ int err = 0;
+
+ route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
+
+ if (!route_dev || !netif_is_ovs_master(route_dev))
+ goto out;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &attr->action, out_index);
+
+out:
+ if (route_dev)
+ dev_put(route_dev);
+
+ return err;
+}
+
struct mlx5e_route_key {
int ip_version;
union {
@@ -809,6 +833,17 @@ attach_flow:
if (err)
goto out_err;
+ err = mlx5e_set_int_port_tunnel(priv, attr, e, out_index);
+ if (err == -EOPNOTSUPP) {
+ /* If device doesn't support int port offload,
+ * redirect to uplink vport.
+ */
+ mlx5_core_dbg(priv->mdev, "attaching int port as encap dev not supported, using uplink\n");
+ err = 0;
+ } else if (err) {
+ goto out_err;
+ }
+
flow->encaps[out_index].e = e;
list_add(&flow->encaps[out_index].list, &e->flows);
flow->encaps[out_index].index = out_index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 4267f3a1059e..fd07c4cbfd1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0;
}
+static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
+{
+ const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
+ const struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ return dst->remote_ifindex;
+}
+
struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4,
@@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
+ .get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index de936dc4bc48..da169b816665 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -70,24 +70,30 @@ void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
}
-void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
- const struct mlx5e_lro_param *lro_param)
+void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_packet_merge_param *pkt_merge_param)
{
void *tirc = mlx5e_tir_builder_get_tirc(builder);
const unsigned int rough_max_l2_l3_hdr_sz = 256;
if (builder->modify)
- MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
-
- if (!lro_param->enabled)
- return;
-
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
+ MLX5_SET(modify_tir_in, builder->in, bitmask.packet_merge, 1);
+
+ switch (pkt_merge_param->type) {
+ case MLX5E_PACKET_MERGE_LRO:
+ MLX5_SET(tirc, tirc, packet_merge_mask,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
+ break;
+ case MLX5E_PACKET_MERGE_SHAMPO:
+ MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
+ break;
+ default:
+ break;
+ }
}
static int mlx5e_hfunc_to_hw(u8 hfunc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index e45149a78ed9..857a84bcd53a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -18,7 +18,7 @@ struct mlx5e_rss_params_traffic_type {
};
struct mlx5e_tir_builder;
-struct mlx5e_lro_param;
+struct mlx5e_packet_merge_param;
struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
@@ -27,8 +27,8 @@ void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder);
void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
u32 rqtn, bool inner_ft_support);
-void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
- const struct mlx5e_lro_param *lro_param);
+void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_packet_merge_param *pkt_merge_param);
void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const struct mlx5e_rss_params_hash *rss_hash,
const struct mlx5e_rss_params_traffic_type *rss_tt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index d54607a42740..a55b066746cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -137,7 +137,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
- t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 055c3bc23733..4cdf8e5b24c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -36,6 +36,7 @@ ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
+ MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
#ifdef CONFIG_MLX5_EN_TLS
MLX5E_ICOSQ_WQE_UMR_TLS,
MLX5E_ICOSQ_WQE_SET_PSV_TLS,
@@ -166,6 +167,10 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+struct mlx5e_shampo_umr {
+ u16 len;
+};
+
struct mlx5e_icosq_wqe_info {
u8 wqe_type;
u8 num_wqebbs;
@@ -175,6 +180,7 @@ struct mlx5e_icosq_wqe_info {
struct {
struct mlx5e_rq *rq;
} umr;
+ struct mlx5e_shampo_umr shampo;
#ifdef CONFIG_MLX5_EN_TLS
struct {
struct mlx5e_ktls_offload_context_rx *priv_rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 33de8f0092a6..fb5397324aa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* Pkt: MAC IP ESP IP L4
*
* Transport Mode:
- * SWP: OutL3 InL4
- * InL3
+ * SWP: OutL3 OutL4
* Pkt: MAC IP ESP L4
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
return;
if (!xo->inner_ipproto) {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (xo->proto == IPPROTO_UDP)
+ switch (xo->proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | TCP */
+ eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- return;
- }
-
- /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
- switch (xo->inner_ipproto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- fallthrough;
- case IPPROTO_TCP:
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- break;
- default:
- break;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
}
- return;
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 5120a59361e6..b98db50c3418 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -127,6 +127,25 @@ out_disable:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+static inline bool
+mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
+ if (!mlx5e_ipsec_eseg_meta(eseg))
+ return false;
+
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (xo->inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -143,6 +162,13 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false;
static inline netdev_features_t
mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
+
+static inline bool
+mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 84eb7201c142..c0f409c195bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -47,7 +47,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@@ -108,7 +108,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
return 0;
err_destroy_mkey:
- mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd:
@@ -121,7 +121,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
mlx5_free_bfreg(mdev, &res->bfreg);
- mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
memset(res, 0, sizeof(*res));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 9d451b8ee467..c2ea5fad48dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_TEST:
- for (i = 0; i < mlx5e_self_test_num(priv); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_self_tests[i]);
+ mlx5e_self_test_fill_strings(priv, data);
break;
case ETH_SS_STATS:
@@ -1902,6 +1900,11 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return -EINVAL;
}
+ if (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ netdev_warn(priv->netdev, "Can't set CQE compression with HW-GRO, disable it first.\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (rx_filter)
@@ -1954,8 +1957,8 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EOPNOTSUPP;
if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
return -EINVAL;
- } else if (priv->channels.params.lro_en) {
- netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n");
+ } else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
}
@@ -2139,12 +2142,14 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0;
}
- return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- return mlx5e_ethtool_set_rxnfc(dev, cmd);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index c06b4b938ae7..aeff1d972a46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -71,12 +71,12 @@ struct mlx5e_l2_hash_node {
bool mpfs;
};
-static inline int mlx5e_hash_l2(u8 *addr)
+static inline int mlx5e_hash_l2(const u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
{
struct mlx5e_l2_hash_node *hn;
int ix = mlx5e_hash_l2(addr);
@@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
struct mlx5e_flow_table *ft;
int err;
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
- return -ENOMEM;
-
ft = &priv->fs.vlan->ft;
ft->num_groups = 0;
@@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft->t)) {
- err = PTR_ERR(ft->t);
- goto err_free_t;
- }
+ if (IS_ERR(ft->t))
+ return PTR_ERR(ft->t);
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
@@ -1221,9 +1215,6 @@ err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
-err_free_t:
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
return err;
}
@@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
- kvfree(priv->fs.vlan);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
+
+int mlx5e_fs_init(struct mlx5e_priv *priv)
+{
+ priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+ if (!priv->fs.vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+{
+ kvfree(priv->fs.vlan);
+ priv->fs.vlan = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 03693fa74a70..ad0d234632a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -411,7 +411,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
u32 rss_context, u32 *tirn)
{
if (fs->flow_type & FLOW_RSS) {
- struct mlx5e_lro_param lro_param;
+ struct mlx5e_packet_merge_param pkt_merge_param;
struct mlx5e_rss *rss;
u32 flow_type;
int err;
@@ -426,8 +426,8 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
if (tt < 0)
return -EINVAL;
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
- err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn);
+ pkt_merge_param = priv->channels.params.packet_merge;
+ err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
if (err)
return err;
eth_rule->rss = rss;
@@ -937,9 +937,8 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) {
@@ -960,10 +959,9 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 09c8b71b186c..65571593ec5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -218,6 +218,45 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
+static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
+{
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo);
+}
+
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
+ node);
+ if (!shampo->bitmap)
+ return -ENOMEM;
+
+ shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
+ sizeof(*shampo->info)),
+ GFP_KERNEL, node);
+ if (!shampo->info) {
+ kvfree(shampo->bitmap);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->bitmap);
+ kvfree(rq->mpwqe.shampo->info);
+}
+
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -233,10 +272,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
-static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift,
- struct mlx5_core_mkey *umr_mkey,
- dma_addr_t filler_addr)
+static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
+ u64 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr)
{
struct mlx5_mtt *mtt;
int inlen;
@@ -284,12 +322,59 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
+static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
+ u64 nentries,
+ u32 *umr_mkey)
+{
+ int inlen;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(mkc, mkc, translations_octword_size, nentries);
+ MLX5_SET(mkc, mkc, length64, 1);
+ err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
- return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
- rq->wqe_overflow.addr);
+ return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
+ &rq->umr_mkey, rq->wqe_overflow.addr);
+}
+
+static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *rq)
+{
+ u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+
+ if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
+ mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ return -EINVAL;
+ }
+ return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ &rq->mpwqe.shampo->mkey);
}
static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
@@ -403,6 +488,65 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
}
+static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq *rq,
+ u32 *pool_size,
+ int node)
+{
+ void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ int wq_size;
+ int err;
+
+ if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ return 0;
+ err = mlx5e_rq_shampo_hd_alloc(rq, node);
+ if (err)
+ goto out;
+ rq->mpwqe.shampo->hd_per_wq =
+ mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+ if (err)
+ goto err_shampo_hd;
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ if (err)
+ goto err_shampo_info;
+ rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
+ if (!rq->hw_gro_data) {
+ err = -ENOMEM;
+ goto err_hw_gro_data;
+ }
+ rq->mpwqe.shampo->key =
+ cpu_to_be32(rq->mpwqe.shampo->mkey);
+ rq->mpwqe.shampo->hd_per_wqe =
+ mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+ return 0;
+
+err_hw_gro_data:
+ mlx5e_rq_shampo_hd_info_free(rq);
+err_shampo_info:
+ mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
+err_shampo_hd:
+ mlx5e_rq_shampo_hd_free(rq);
+out:
+ return err;
+}
+
+static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
+{
+ if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ return;
+
+ kvfree(rq->hw_gro_data);
+ mlx5e_rq_shampo_hd_info_free(rq);
+ mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
+ mlx5e_rq_shampo_hd_free(rq);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
@@ -455,11 +599,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
+ rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
goto err_rq_mkey;
+
+ err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
+ if (err)
+ goto err_free_by_rq_type;
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
@@ -487,7 +636,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_frags;
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@ -512,14 +661,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
- goto err_free_by_rq_type;
+ goto err_free_shampo;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
- goto err_free_by_rq_type;
+ goto err_free_shampo;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -528,8 +677,10 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
+ 0 : rq->buff.headroom;
- wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
+ wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
wqe->data[0].lkey = rq->mkey_be;
} else {
@@ -569,12 +720,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
return 0;
+err_free_shampo:
+ mlx5e_rq_free_shampo(rq);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
@@ -607,8 +760,9 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
mlx5e_free_mpwqe_rq_drop_page(rq);
+ mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
@@ -662,6 +816,12 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ MLX5_SET(wq, wq, log_headers_buffer_entry_num,
+ order_base_2(rq->mpwqe.shampo->hd_per_wq));
+ MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ }
+
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
@@ -801,6 +961,15 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ u16 len;
+
+ len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
+ (rq->mpwqe.shampo->hd_per_wq - 1);
+ mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
+ rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
+ }
+
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
@@ -826,6 +995,10 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
+ 0, true);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -845,6 +1018,9 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5_core_dev *mdev = rq->mdev;
int err;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
+
err = mlx5e_alloc_rq(params, xsk, param, node, rq);
if (err)
return err;
@@ -930,9 +1106,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
@@ -946,10 +1123,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
@@ -1298,7 +1476,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1308,10 +1487,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (err)
return err;
- if (qos_queue_group_id)
- sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
- else
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
@@ -1705,6 +1881,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
mlx5e_close_cq(&c->sq[tc].cq);
}
+static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+ return tc;
+
+ WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+ return -ENOENT;
+}
+
+static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+ u32 *hw_id)
+{
+ int tc;
+
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+ !params->mqprio.channel.rl) {
+ *hw_id = 0;
+ return 0;
+ }
+
+ tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+ if (tc < 0)
+ return tc;
+
+ return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+}
+
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1713,9 +1919,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
+ u32 qos_queue_group_id;
+
+ err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+ if (err)
+ goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
+ params, &cparam->txq_sq, &c->sq[tc], tc,
+ qos_queue_group_id,
+ &c->priv->channel_stats[c->ix].sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -1991,7 +2204,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
@@ -2185,17 +2398,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
chs->num = 0;
}
-static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
- struct mlx5e_lro_param lro_param;
-
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
- return mlx5e_rx_res_lro_set_param(res, &lro_param);
+ return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
}
-static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
@@ -2340,6 +2550,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
+ if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+ }
return 0;
@@ -2901,15 +3118,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
+ params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt *qopt,
+ struct mlx5e_mqprio_rl *rl)
{
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc;
+ params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
}
@@ -2969,9 +3189,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
+
if (mqprio->max_rate[i]) {
- netdev_err(netdev, "Max tx rate is not supported\n");
- return -EINVAL;
+ int err;
+
+ err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+ if (err)
+ return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
@@ -2990,11 +3214,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
+static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int tc;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+ if (mqprio->max_rate[tc])
+ return true;
+ return false;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
@@ -3002,13 +3237,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
+ rl = NULL;
+ if (mlx5e_mqprio_rate_limit(mqprio)) {
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return -ENOMEM;
+ err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+ mqprio->max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return err;
+ }
+ }
+
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
- return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ if (err && rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+
+ return err;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3226,7 +3480,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
@@ -3270,16 +3524,59 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
}
new_params = *cur_params;
- new_params.lro_en = enable;
- if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
- mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
- reset = false;
+ if (enable)
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
+ else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ else
+ goto out;
+
+ if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
+ new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
+ if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
+ reset = false;
+ }
+ }
+
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+out:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int set_feature_hw_gro(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params new_params;
+ bool reset = true;
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->channels.params;
+
+ if (enable) {
+ if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n");
+ err = -EINVAL;
+ goto out;
+ }
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
+ new_params.packet_merge.shampo.match_criteria_type =
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
+ new_params.packet_merge.shampo.alignment_granularity =
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
+ } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ } else {
+ goto out;
}
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_modify_tirs_lro_ctx, NULL, reset);
+ mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3458,6 +3755,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
@@ -3518,6 +3816,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
features &= ~NETIF_F_LRO;
}
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
@@ -3606,7 +3908,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
goto out;
}
- if (params->lro_en)
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -4063,8 +4365,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
struct net_device *netdev = priv->netdev;
struct mlx5e_params new_params;
- if (priv->channels.params.lro_en) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
return -EINVAL;
}
@@ -4321,9 +4623,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
/* No XSK params: checking the availability of striding RQ in general. */
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->lro_en = !slow_pci_heuristic(mdev);
+ params->packet_merge.type = slow_pci_heuristic(mdev) ?
+ MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
}
- params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+ params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
@@ -4351,13 +4654,17 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
- mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
- if (is_zero_ether_addr(netdev->dev_addr) &&
+ mlx5_query_mac_address(priv->mdev, addr);
+ if (is_zero_ether_addr(addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+ return;
}
+
+ eth_hw_addr_set(netdev, addr);
}
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
@@ -4454,6 +4761,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (!!MLX5_CAP_GEN(mdev, shampo) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4504,6 +4815,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
netdev->features &= ~NETIF_F_LRO;
+ netdev->features &= ~NETIF_F_GRO_HW;
netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
@@ -4578,6 +4890,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
+ err = mlx5e_fs_init(priv);
+ if (err) {
+ mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+ return err;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4595,13 +4913,13 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_fs_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
enum mlx5e_rx_res_features features;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@ -4619,9 +4937,9 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@ -4855,6 +5173,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 0684ac6699b2..e58a9ec42553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -53,6 +53,7 @@
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
+#include "en/tc/int_port.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -793,7 +794,6 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@ -808,9 +808,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return err;
}
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@ -858,12 +858,22 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
+ int err;
+
mlx5e_create_q_counters(priv);
- return mlx5e_init_rep_rx(priv);
+ err = mlx5e_init_rep_rx(priv);
+ if (err)
+ goto out;
+
+ mlx5e_tc_int_port_init_rep_rx(priv);
+
+out:
+ return err;
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
+ mlx5e_tc_int_port_cleanup_rep_rx(priv);
mlx5e_cleanup_rep_rx(priv);
mlx5e_destroy_q_counters(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 48a203a9e7d9..b01dacb6f527 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -58,6 +58,7 @@ struct mlx5e_neigh_update_table {
};
struct mlx5_tc_ct_priv;
+struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
@@ -98,6 +99,9 @@ struct mlx5_rep_uplink_priv {
/* tc tunneling encapsulation private data */
struct mlx5e_tc_tun_encap *encap;
+
+ /* OVS internal port support */
+ struct mlx5e_tc_int_port_priv *int_port_priv;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 29a6586ef28d..96967b0a2441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -33,9 +33,12 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/bitmap.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
+#include <net/udp.h>
+#include <net/tcp.h>
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
@@ -62,10 +65,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe = mlx5e_handle_rx_cqe,
.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
};
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
@@ -185,8 +190,9 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, &cqd->title);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
+ rq, &cqd->title);
}
mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
wq->cc = cqcc;
@@ -206,8 +212,9 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
mlx5e_read_title_slot(rq, wq, cc);
mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
mlx5e_decompress_cqe(rq, wq, cc);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, &cqd->title);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
+ rq, &cqd->title);
cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
@@ -448,13 +455,13 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
- int offset_from, u32 headlen)
+ int offset_from, int dma_offset, u32 headlen)
{
const void *from = page_address(dma_info->page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
+ dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, from, len);
}
@@ -494,6 +501,157 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
+/* This function returns the size of the continuous free space inside a bitmap
+ * that starts from first and no longer than len including circular ones.
+ */
+static int bitmap_find_window(unsigned long *bitmap, int len,
+ int bitmap_size, int first)
+{
+ int next_one, count;
+
+ next_one = find_next_bit(bitmap, bitmap_size, first);
+ if (next_one == bitmap_size) {
+ if (bitmap_size - first >= len)
+ return len;
+ next_one = find_next_bit(bitmap, bitmap_size, 0);
+ count = next_one + bitmap_size - first;
+ } else {
+ count = next_one - first;
+ }
+
+ return min(len, count);
+}
+
+static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+{
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ umr_wqe->ctrl.umr_mkey = key;
+ umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ | MLX5E_KLM_UMR_DS_CNT(klm_len));
+ umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
+ umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+}
+
+static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
+ struct mlx5e_icosq *sq,
+ u16 klm_entries, u16 index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+ u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
+ struct page *page = shampo->last_page;
+ u64 addr = shampo->last_addr;
+ struct mlx5e_dma_info *dma_info;
+ struct mlx5e_umr_wqe *umr_wqe;
+ int headroom;
+
+ headroom = rq->buff.headroom;
+ new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
+ entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+ wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
+ umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
+
+ for (i = 0; i < entries; i++, index++) {
+ dma_info = &shampo->info[index];
+ if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
+ MLX5_UMR_KLM_ALIGNMENT))
+ goto update_klm;
+ header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
+ MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ if (!(header_offset & (PAGE_SIZE - 1))) {
+ err = mlx5e_page_alloc(rq, dma_info);
+ if (unlikely(err))
+ goto err_unmap;
+ addr = dma_info->addr;
+ page = dma_info->page;
+ } else {
+ dma_info->addr = addr + header_offset;
+ dma_info->page = page;
+ }
+
+update_klm:
+ umr_wqe->inline_klms[i].bcount =
+ cpu_to_be32(MLX5E_RX_MAX_HEAD);
+ umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
+ umr_wqe->inline_klms[i].va =
+ cpu_to_be64(dma_info->addr + headroom);
+ }
+
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
+ .num_wqebbs = wqe_bbs,
+ .shampo.len = new_entries,
+ };
+
+ shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
+ shampo->last_page = page;
+ shampo->last_addr = addr;
+ sq->pc += wqe_bbs;
+ sq->doorbell_cseg = &umr_wqe->ctrl;
+
+ return 0;
+
+err_unmap:
+ while (--i >= 0) {
+ if (--index < 0)
+ index = shampo->hd_per_wq - 1;
+ dma_info = &shampo->info[index];
+ if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
+ dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
+ mlx5e_page_release(rq, dma_info, true);
+ }
+ }
+ rq->stats->buff_alloc_err++;
+ return err;
+}
+
+static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 klm_entries, num_wqe, index, entries_before;
+ struct mlx5e_icosq *sq = rq->icosq;
+ int i, err, max_klm_entries, len;
+
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
+ klm_entries = bitmap_find_window(shampo->bitmap,
+ shampo->hd_per_wqe,
+ shampo->hd_per_wq, shampo->pi);
+ if (!klm_entries)
+ return 0;
+
+ klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+ entries_before = shampo->hd_per_wq - index;
+
+ if (unlikely(entries_before < klm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
+ DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ else
+ num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+
+ for (i = 0; i < num_wqe; i++) {
+ len = (klm_entries > max_klm_entries) ? max_klm_entries :
+ klm_entries;
+ if (unlikely(index + len > shampo->hd_per_wq))
+ len = shampo->hd_per_wq - index;
+ err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
+ if (unlikely(err))
+ return err;
+ index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
+ klm_entries -= len;
+ }
+
+ return 0;
+}
+
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -514,6 +672,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
}
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ err = mlx5e_alloc_rx_hd_mpwqe(rq);
+ if (unlikely(err))
+ goto err;
+ }
+
pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@ -558,6 +722,44 @@ err:
return err;
}
+/* This function is responsible to dealloc SHAMPO header buffer.
+ * close == true specifies that we are in the middle of closing RQ operation so
+ * we go over all the entries and if they are not in use we free them,
+ * otherwise we only go over a specific range inside the header buffer that are
+ * not in use.
+ */
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int hd_per_wq = shampo->hd_per_wq;
+ struct page *deleted_page = NULL;
+ struct mlx5e_dma_info *hd_info;
+ int i, index = start;
+
+ for (i = 0; i < len; i++, index++) {
+ if (index == hd_per_wq)
+ index = 0;
+
+ if (close && !test_bit(index, shampo->bitmap))
+ continue;
+
+ hd_info = &shampo->info[index];
+ hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
+ if (hd_info->page != deleted_page) {
+ deleted_page = hd_info->page;
+ mlx5e_page_release(rq, hd_info, false);
+ }
+ }
+
+ if (start + len > hd_per_wq) {
+ len -= hd_per_wq - start;
+ bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
+ start = 0;
+ }
+
+ bitmap_clear(shampo->bitmap, start, len);
+}
+
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -629,6 +831,28 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
+static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+ struct mlx5e_shampo_hd *shampo;
+ /* assume 1:1 relationship between RQ and icosq */
+ struct mlx5e_rq *rq = &c->rq;
+ int end, from, len = umr.len;
+
+ shampo = rq->mpwqe.shampo;
+ end = shampo->hd_per_wq;
+ from = shampo->ci;
+ if (from + len > shampo->hd_per_wq) {
+ len -= end - from;
+ bitmap_set(shampo->bitmap, from, end - from);
+ from = 0;
+ }
+
+ bitmap_set(shampo->bitmap, from, len);
+ shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+}
+
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@ -685,6 +909,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
case MLX5E_ICOSQ_WQE_NOP:
break;
+ case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
+ mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
+ break;
#ifdef CONFIG_MLX5_EN_TLS
case MLX5E_ICOSQ_WQE_UMR_TLS:
break;
@@ -782,8 +1009,8 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
if (tcp_ack) {
tcp->ack = 1;
- tcp->ack_seq = cqe->lro_ack_seq_num;
- tcp->window = cqe->lro_tcp_win;
+ tcp->ack_seq = cqe->lro.ack_seq_num;
+ tcp->window = cqe->lro.tcp_win;
}
}
@@ -809,7 +1036,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp = ip_p + sizeof(struct iphdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->ttl = cqe->lro.min_ttl;
ipv4->tot_len = cpu_to_be16(tot_len);
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
@@ -829,7 +1056,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->hop_limit = cqe->lro.min_ttl;
ipv6->payload_len = cpu_to_be16(payload_len);
mlx5e_lro_update_tcp_hdr(cqe, tcp);
@@ -841,6 +1068,142 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
}
}
+static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
+{
+ struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
+ u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+
+ return page_address(last_head->page) + head_offset;
+}
+
+static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
+{
+ int udp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + udp_off);
+ uh->len = htons(skb->len - udp_off);
+
+ if (uh->check)
+ uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
+ ipv4->daddr, 0);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+}
+
+static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
+{
+ int udp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + udp_off);
+ uh->len = htons(skb->len - udp_off);
+
+ if (uh->check)
+ uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
+ &ipv6->daddr, 0);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+}
+
+static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct tcphdr *skb_tcp_hd)
+{
+ u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ struct tcphdr *last_tcp_hd;
+ void *last_hd_addr;
+
+ last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
+ last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+}
+
+static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
+ struct mlx5_cqe64 *cqe, bool match)
+{
+ int tcp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct tcphdr *tcp;
+
+ tcp = (struct tcphdr *)(skb->data + tcp_off);
+ if (match)
+ mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
+
+ tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
+ ipv4->daddr, 0);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
+ skb->csum_start = (unsigned char *)tcp - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (tcp->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+}
+
+static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
+ struct mlx5_cqe64 *cqe, bool match)
+{
+ int tcp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct tcphdr *tcp;
+
+ tcp = (struct tcphdr *)(skb->data + tcp_off);
+ if (match)
+ mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
+
+ tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
+ &ipv6->daddr, 0);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ skb->csum_start = (unsigned char *)tcp - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (tcp->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+}
+
+static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
+{
+ bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ if (is_ipv4) {
+ int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
+ __be16 newlen = htons(skb->len - nhoff);
+
+ csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
+ ipv4->tot_len = newlen;
+
+ if (ipv4->protocol == IPPROTO_TCP)
+ mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
+ else
+ mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
+ } else {
+ int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
+
+ ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
+
+ if (ipv6->nexthdr == IPPROTO_TCP)
+ mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
+ else
+ mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
+ }
+}
+
static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
@@ -1090,6 +1453,27 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
stats->mcast_packets++;
}
+static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->packets++;
+ stats->gro_packets++;
+ stats->bytes += cqe_bcnt;
+ stats->gro_bytes += cqe_bcnt;
+ if (NAPI_GRO_CB(skb)->count != 1)
+ return;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ skb_reset_network_header(skb);
+ if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
+ napi_gro_receive(rq->cq.napi, skb);
+ rq->hw_gro_data->skb = NULL;
+ }
+}
+
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
@@ -1199,7 +1583,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
+ headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1275,7 +1660,6 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
@@ -1311,15 +1695,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
- !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
- dev_kfree_skb_any(skb);
- goto free_wqe;
- }
-
- napi_gro_receive(rq->cq.napi, skb);
-
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ mlx5e_rep_tc_receive(cqe, rq, skb);
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
@@ -1336,7 +1712,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
- struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -1369,15 +1744,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
- !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
- dev_kfree_skb_any(skb);
- goto mpwrq_cqe_out;
- }
-
- napi_gro_receive(rq->cq.napi, skb);
-
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ mlx5e_rep_tc_receive(cqe, rq, skb);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
@@ -1395,6 +1762,30 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
};
#endif
+static void
+mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ u32 data_bcnt, u32 data_offset)
+{
+ net_prefetchw(skb->data);
+
+ while (data_bcnt) {
+ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
+ unsigned int truesize;
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ truesize = pg_consumed_bytes;
+ else
+ truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+
+ mlx5e_add_skb_frag(rq, skb, di, data_offset,
+ pg_consumed_bytes, truesize);
+
+ data_bcnt -= pg_consumed_bytes;
+ data_offset = 0;
+ di++;
+ }
+}
+
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
@@ -1420,20 +1811,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
frag_offset -= PAGE_SIZE;
}
- while (byte_cnt) {
- u32 pg_consumed_bytes =
- min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- unsigned int truesize =
- ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
-
- mlx5e_add_skb_frag(rq, skb, di, frag_offset,
- pg_consumed_bytes, truesize);
- byte_cnt -= pg_consumed_bytes;
- frag_offset = 0;
- di++;
- }
+ mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1487,6 +1867,181 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
+static void
+mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe, u16 header_index)
+{
+ struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
+ u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ u16 head_size = cqe->shampo.header_size;
+ u16 rx_headroom = rq->buff.headroom;
+ struct sk_buff *skb = NULL;
+ void *hdr, *data;
+ u32 frag_size;
+
+ hdr = page_address(head->page) + head_offset;
+ data = hdr + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
+
+ if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ /* build SKB around header */
+ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
+ prefetchw(hdr);
+ prefetch(data);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+
+ if (unlikely(!skb))
+ return;
+
+ /* queue up for recycling/reuse */
+ page_ref_inc(head->page);
+
+ } else {
+ /* allocate SKB and copy header for large header */
+ rq->stats->gro_large_hds++;
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(head_size, sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return;
+ }
+
+ prefetchw(skb->data);
+ mlx5e_copy_skb_header(rq->pdev, skb, head,
+ head_offset + rx_headroom,
+ rx_headroom, head_size);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += head_size;
+ skb->len += head_size;
+ }
+ rq->hw_gro_data->skb = skb;
+ NAPI_GRO_CB(skb)->count = 1;
+ skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+}
+
+static void
+mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
+{
+ skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ unsigned int frag_size = skb_frag_size(last_frag);
+ unsigned int frag_truesize;
+
+ frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
+ skb->truesize += frag_truesize - frag_size;
+}
+
+static void
+mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
+{
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->gro_skbs++;
+ if (likely(skb_shinfo(skb)->nr_frags))
+ mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
+ if (NAPI_GRO_CB(skb)->count > 1)
+ mlx5e_shampo_update_hdr(rq, cqe, match);
+ napi_gro_receive(rq->cq.napi, skb);
+ rq->hw_gro_data->skb = NULL;
+}
+
+static bool
+mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
+{
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
+}
+
+static void
+mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u64 addr = shampo->info[header_index].addr;
+
+ if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
+ mlx5e_page_release(rq, &shampo->info[header_index], true);
+ }
+ bitmap_clear(shampo->bitmap, header_index, 1);
+}
+
+static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
+ u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct sk_buff **skb = &rq->hw_gro_data->skb;
+ bool flush = cqe->shampo.flush;
+ bool match = cqe->shampo.match;
+ struct mlx5e_rq_stats *stats = rq->stats;
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5e_dma_info *di;
+ struct mlx5e_mpw_info *wi;
+ struct mlx5_wq_ll *wq;
+
+ wi = &rq->mpwqe.info[wqe_id];
+ wi->consumed_strides += cstrides;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ stats->wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
+ goto mpwrq_cqe_out;
+ }
+
+ stats->gro_match_packets += match;
+
+ if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
+ match = false;
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+ }
+
+ if (!*skb) {
+ mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (unlikely(!*skb))
+ goto free_hd_entry;
+ } else {
+ NAPI_GRO_CB(*skb)->count++;
+ if (NAPI_GRO_CB(*skb)->count == 2 &&
+ rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
+ void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
+ int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
+ sizeof(struct iphdr);
+ struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
+
+ rq->hw_gro_data->second_ip_id = ntohs(iph->id);
+ }
+ }
+
+ di = &wi->umr.dma_info[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+
+ mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (flush)
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+free_hd_entry:
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
+
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
@@ -1579,11 +2134,15 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5_cqwq_pop(cqwq);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, cqe);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
+ mlx5e_shampo_flush_skb(rq, NULL, false);
+
if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
@@ -1784,15 +2343,24 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
if (mlx5_fpga_is_ipsec_device(mdev)) {
netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
- if (!rq->handle_rx_cqe) {
- netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
- return -EINVAL;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
+ } else {
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
}
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index ce8ab1f01876..8c9163d2c646 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -35,30 +35,7 @@
#include <net/udp.h>
#include "en.h"
#include "en/port.h"
-
-enum {
- MLX5E_ST_LINK_STATE,
- MLX5E_ST_LINK_SPEED,
- MLX5E_ST_HEALTH_INFO,
-#ifdef CONFIG_INET
- MLX5E_ST_LOOPBACK,
-#endif
- MLX5E_ST_NUM,
-};
-
-const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
- "Link Test",
- "Speed Test",
- "Health Test",
-#ifdef CONFIG_INET
- "Loopback Test",
-#endif
-};
-
-int mlx5e_self_test_num(struct mlx5e_priv *priv)
-{
- return ARRAY_SIZE(mlx5e_self_tests);
-}
+#include "eswitch.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5e_refresh_tirs(priv, false, false);
}
+static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
+{
+ if (is_mdev_switchdev_mode(priv->mdev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
static int mlx5e_test_loopback(struct mlx5e_priv *priv)
{
@@ -313,37 +298,47 @@ out:
}
#endif
-static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
- mlx5e_test_link_state,
- mlx5e_test_link_speed,
- mlx5e_test_health_info,
+typedef int (*mlx5e_st_func)(struct mlx5e_priv *);
+
+struct mlx5e_st {
+ char name[ETH_GSTRING_LEN];
+ mlx5e_st_func st_func;
+ mlx5e_st_func cond_func;
+};
+
+static struct mlx5e_st mlx5e_sts[] = {
+ { "Link Test", mlx5e_test_link_state },
+ { "Speed Test", mlx5e_test_link_speed },
+ { "Health Test", mlx5e_test_health_info },
#ifdef CONFIG_INET
- mlx5e_test_loopback,
+ { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback },
#endif
};
+#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts)
+
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf)
{
struct mlx5e_priv *priv = netdev_priv(ndev);
- int i;
-
- memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
+ int i, count = 0;
mutex_lock(&priv->state_lock);
netdev_info(ndev, "Self test begin..\n");
for (i = 0; i < MLX5E_ST_NUM; i++) {
- netdev_info(ndev, "\t[%d] %s start..\n",
- i, mlx5e_self_tests[i]);
- buf[i] = mlx5e_st_func[i](priv);
- netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
- i, mlx5e_self_tests[i], buf[i]);
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
+ buf[count] = st.st_func(priv);
+ netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
}
mutex_unlock(&priv->state_lock);
- for (i = 0; i < MLX5E_ST_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (buf[i]) {
etest->flags |= ETH_TEST_FL_FAILED;
break;
@@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "Self test out: status flags(0x%x)\n",
etest->flags);
}
+
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ int i, count = 0;
+
+ for (i = 0; i < MLX5E_ST_NUM; i++) {
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ if (data)
+ strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ count++;
+ }
+ return count;
+}
+
+int mlx5e_self_test_num(struct mlx5e_priv *priv)
+{
+ return mlx5e_self_test_fill_strings(priv, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e1dd17019030..2a9bfc3ffa2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -128,6 +128,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -313,6 +318,11 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_gro_packets += rq_stats->gro_packets;
+ s->rx_gro_bytes += rq_stats->gro_bytes;
+ s->rx_gro_skbs += rq_stats->gro_skbs;
+ s->rx_gro_match_packets += rq_stats->gro_match_packets;
+ s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -1760,6 +1770,11 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 139e59f30db0..2c1ed5b81be6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -144,6 +144,11 @@ struct mlx5e_sw_stats {
u64 tx_mpwqe_pkts;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_gro_packets;
+ u64 rx_gro_bytes;
+ u64 rx_gro_skbs;
+ u64 rx_gro_match_packets;
+ u64 rx_gro_large_hds;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -322,6 +327,11 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 gro_packets;
+ u64 gro_bytes;
+ u64 gro_skbs;
+ u64 gro_match_packets;
+ u64 gro_large_hds;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ba8164792016..835caa1c7b74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
+#include <linux/if_macvlan.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/psample.h>
@@ -59,7 +60,6 @@
#include "en/mapping.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
-#include "en/tc_priv.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "lib/devcom.h"
@@ -67,6 +67,8 @@
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
+#include "lag/lag.h"
+#include "lag/mp.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -229,6 +231,23 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
return err;
}
+struct mlx5e_tc_int_port_priv *
+mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->int_port_priv;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -246,7 +265,6 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv *priv)
{
@@ -263,7 +281,6 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
-#endif
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
@@ -1146,11 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
} else if (flow_flag_test(flow, SAMPLE)) {
rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
mlx5e_tc_get_flow_tun_id(flow));
-#endif
} else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -1186,12 +1201,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
if (flow_flag_test(flow, SAMPLE)) {
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
return;
}
-#endif
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1388,6 +1401,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
@@ -1413,6 +1429,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_decap_route(priv, flow);
if (err)
goto err_out;
+
+ if (!attr->chain && esw_attr->int_port) {
+ /* If decap route device is internal port, change the
+ * source vport value in reg_c0 back to uplink just in
+ * case the rule performs goto chain > 0. If we have a miss
+ * on chain > 0 we want the metadata regs to hold the
+ * chain id so SW will resume handling of this packet
+ * from the proper chain.
+ */
+ u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
+ esw_attr->in_rep->vport);
+
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
+ MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
+ metadata);
+ if (err)
+ return err;
+ }
}
if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
@@ -1421,8 +1455,31 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_out;
}
- parse_attr = attr->parse_attr;
- esw_attr = attr->esw_attr;
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ struct mlx5e_tc_int_port *int_port;
+
+ if (attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Internal port rule is only supported on chain 0");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->dest_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Internal port rule offload doesn't support goto action");
+ return -EOPNOTSUPP;
+ }
+
+ int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
+ parse_attr->filter_dev->ifindex,
+ flow_flag_test(flow, EGRESS) ?
+ MLX5E_TC_INT_PORT_EGRESS :
+ MLX5E_TC_INT_PORT_INGRESS);
+ if (IS_ERR(int_port))
+ return PTR_ERR(int_port);
+
+ esw_attr->int_port = int_port;
+ }
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev;
@@ -1445,7 +1502,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_out;
if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
vf_tun = true;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -1553,7 +1611,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
vf_tun = true;
if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
@@ -1577,6 +1636,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
+ if (esw_attr->int_port)
+ mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
+
+ if (esw_attr->dest_int_port)
+ mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
+
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
@@ -1688,8 +1753,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
if (opt->opt_class != htons(U16_MAX) ||
opt->type != U8_MAX) {
- NL_SET_ERR_MSG(extack,
- "Partial match of tunnel options in chain > 0 isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
"Partial match of tunnel options in chain > 0 isn't supported");
return -EOPNOTSUPP;
@@ -1896,8 +1961,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
bool needs_mapping, sets_mapping;
int err;
- if (!mlx5e_is_eswitch_flow(flow))
+ if (!mlx5e_is_eswitch_flow(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
return -EOPNOTSUPP;
+ }
needs_mapping = !!flow->attr->chain;
sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
@@ -1905,8 +1972,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- NL_SET_ERR_MSG(extack,
- "Chains on tunnel devices isn't supported without register loopback support");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP;
@@ -2269,8 +2336,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
return -EOPNOTSUPP;
+ }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
@@ -2437,8 +2506,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMP))
+ MLX5_FLEX_PROTO_ICMP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMP is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
@@ -2450,8 +2522,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMPV6))
+ MLX5_FLEX_PROTO_ICMPV6)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMPV6 is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
@@ -2557,15 +2632,19 @@ static int pedit_header_offsets[] = {
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs)
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
- if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
goto out_err;
+ }
*curr_pmask |= mask;
*curr_pval |= (val & mask);
@@ -2898,7 +2977,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
val = act->mangle.val;
offset = act->mangle.offset;
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
@@ -2910,16 +2989,17 @@ out_err:
}
static int
-parse_pedit_to_reformat(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
+parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
- if (act->id != FLOW_ACTION_MANGLE)
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
+ }
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
@@ -2943,7 +3023,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(priv, act, parse_attr, extack);
+ return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace,
parse_attr, hdrs, extack);
@@ -3025,10 +3105,10 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static int is_action_keys_supported(const struct flow_action_entry *act,
- bool ct_flow, bool *modify_ip_header,
- bool *modify_tuple,
- struct netlink_ext_ack *extack)
+static bool
+is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
+ bool *modify_ip_header, bool *modify_tuple,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -3056,7 +3136,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv4 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -3074,7 +3154,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv6 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
@@ -3082,11 +3162,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of transport header ports with action ct");
- return -EOPNOTSUPP;
+ return false;
}
}
- return 0;
+ return true;
}
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
@@ -3133,7 +3213,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i, err;
+ int i;
headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
@@ -3151,11 +3231,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
act->id != FLOW_ACTION_ADD)
continue;
- err = is_action_keys_supported(act, ct_flow,
- &modify_ip_header,
- &modify_tuple, extack);
- if (err)
- return err;
+ if (!is_action_keys_supported(act, ct_flow,
+ &modify_ip_header,
+ &modify_tuple, extack))
+ return false;
}
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
@@ -3176,37 +3255,65 @@ out_ok:
return true;
}
-static bool actions_match_supported(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static bool
+actions_match_supported_fdb(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- bool ct_flow = false, ct_clear = false;
- u32 actions;
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ bool ct_flow, ct_clear;
- ct_clear = flow->attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- actions = flow->attr->action;
- if (mlx5e_is_eswitch_flow(flow)) {
- if (flow->attr->esw_attr->split_count && ct_flow &&
- !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
- /* All registers used by ct are cleared when using
- * split rules.
- */
- NL_SET_ERR_MSG_MOD(extack,
- "Can't offload mirroring with action ct");
- return false;
- }
+ if (esw_attr->split_count && ct_flow &&
+ !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
+ return false;
}
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(priv, &parse_attr->spec,
- flow_action, actions,
- ct_flow, ct_clear,
- extack);
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
+ netdev_warn_once(priv->netdev,
+ "current firmware doesn't support split rule for port mirroring\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+actions_match_supported(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ u32 actions = flow->attr->action;
+ bool ct_flow, ct_clear;
+
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+
+ if (!(actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
+ actions, ct_flow, ct_clear, extack))
+ return false;
+
+ if (mlx5e_is_eswitch_flow(flow) &&
+ !actions_match_supported_fdb(priv, parse_attr, flow, extack))
+ return false;
return true;
}
@@ -3355,11 +3462,51 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
+static int
+actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
+ !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
+ return 0;
+
+ ns_type = get_flow_name_space(flow);
+
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
+ if (parse_attr->mod_hdr_acts.num_actions > 0)
+ return 0;
+
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+
+ if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
+ return 0;
+
+ if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+ attr->esw_attr->split_count = 0;
+
+ return 0;
+}
+
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
@@ -3368,12 +3515,16 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
u32 action = 0;
int err, i;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
nic_attr = attr->nic_attr;
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
@@ -3451,7 +3602,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -3462,38 +3614,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, CT);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in NIC action");
return -EOPNOTSUPP;
}
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- }
- }
-
attr->action = action;
- if (attr->dest_chain) {
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3517,19 +3653,25 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
+ }
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan pop action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
@@ -3545,20 +3687,27 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio))
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
@@ -3592,7 +3741,8 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
@@ -3603,7 +3753,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
@@ -3614,14 +3764,15 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
return -ENODEV;
if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action);
+ err = add_vlan_push_action(priv, attr, out_dev, action, extack);
return err;
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
@@ -3631,7 +3782,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
}
@@ -3732,6 +3883,45 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type,
+ u32 *action,
+ int out_index)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5e_tc_int_port_priv *int_port_priv;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_tc_int_port *dest_int_port;
+ int err;
+
+ parse_attr = attr->parse_attr;
+ int_port_priv = mlx5e_get_int_port_priv(priv);
+
+ dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
+ if (IS_ERR(dest_int_port))
+ return PTR_ERR(dest_int_port);
+
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
+ MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
+ mlx5e_tc_int_port_get_metadata(dest_int_port));
+ if (err) {
+ mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
+ return err;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ esw_attr->dest_int_port = dest_int_port;
+ esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+
+ /* Forward to root fdb for matching against the new source vport */
+ attr->dest_chain = 0;
+
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3751,20 +3941,39 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
+ bool ptype_host = false;
bool mpls_push = false;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ break;
+ case FLOW_ACTION_PTYPE:
+ if (act->ptype != PACKET_HOST) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "skbedit ptype is only supported with type host");
+ return -EOPNOTSUPP;
+ }
+
+ ptype_host = true;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -3828,6 +4037,50 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
return -EOPNOTSUPP;
+ case FLOW_ACTION_REDIRECT_INGRESS: {
+ struct net_device *out_dev;
+
+ out_dev = act->dev;
+ if (!out_dev)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_ovs_master(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is supported only for OVS internal ports");
+ return -EOPNOTSUPP;
+ }
+
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is not supported from internal port");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ptype_host) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress requires ptype=host action");
+ return -EOPNOTSUPP;
+ }
+
+ if (esw_attr->out_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress is supported only as single destination");
+ return -EOPNOTSUPP;
+ }
+
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS,
+ &action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+
+ break;
+ }
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED: {
struct mlx5e_priv *out_priv;
@@ -3902,18 +4155,21 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
&out_dev,
- &action);
+ &action, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr,
- &action);
+ &action, extack);
if (err)
return err;
}
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
err = verify_uplink_forwarding(priv, flow, out_dev, extack);
if (err)
return err;
@@ -3935,6 +4191,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
+ } else if (netif_is_ovs_master(out_dev)) {
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
+ out_dev->ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &action,
+ esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -3955,10 +4221,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_TUNNEL_ENCAP:
info = act->tunnel;
- if (info)
+ if (info) {
encap = true;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Zero tunnel attributes is not supported");
return -EOPNOTSUPP;
+ }
break;
case FLOW_ACTION_VLAN_PUSH:
@@ -3972,7 +4241,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
}
if (err)
return err;
@@ -3998,7 +4267,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -4025,11 +4295,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, SAMPLE);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in FDB action");
return -EOPNOTSUPP;
}
}
+ /* Forward to/from internal port can only have 1 dest */
+ if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
+ esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rules with internal port can have only one destination");
+ return -EOPNOTSUPP;
+ }
+
/* always set IP version for indirect table handling */
attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
@@ -4045,60 +4324,26 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag. we might have set split_count either by pedit or
- * pop/push. if there is no pop/push either, reset it too.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- esw_attr->split_count = 0;
- }
- }
-
attr->action = action;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
- return -EOPNOTSUPP;
- if (attr->dest_chain) {
- if (decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
-
- NL_SET_ERR_MSG(extack,
- "Decap with goto isn't supported");
- netdev_warn(priv->netdev,
- "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
- if (!(attr->action &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack,
- "Rule must have at least one forward/drop action");
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- }
- if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "current firmware doesn't support split rule for port mirroring");
- netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
+ if (attr->dest_chain && decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
@@ -4733,8 +4978,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -5006,9 +5253,9 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
+
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
-#endif
mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
@@ -5022,9 +5269,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_mapping = mapping;
- /* 0xFFF is reserved for stack devices slow path table mark */
+ /* Two last values are reserved for stack devices slow path table mark
+ * and bridge ingress push mark.
+ */
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
- sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5052,9 +5301,8 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
+ mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5074,9 +5322,8 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
+ mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 1a4cd882f0fb..fdb222793027 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -38,6 +38,7 @@
#include "eswitch.h"
#include "en/tc_ct.h"
#include "en/tc_tun.h"
+#include "en/tc/int_port.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -56,7 +57,7 @@
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
- struct net_device *tun_dev;
+ struct net_device *fwd_dev;
};
struct mlx5_nic_flow_attr {
@@ -104,6 +105,8 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
+#define MLX5E_TC_MAX_INT_PORT_NUM (8)
+
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct tunnel_match_key {
@@ -283,6 +286,12 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
u16 *vport);
+int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type,
+ u32 *action,
+ int out_index);
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c63d78eda606..7fd33b356cc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -38,6 +38,7 @@
#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
+#include "en_accel/ipsec_rxtx.h"
#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -213,27 +214,14 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
- * need to set L3 checksum flag for IPsec
- */
-static void
-ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
-{
- eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (skb->encapsulation) {
- eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
- sq->stats->csum_partial_inner++;
- } else {
- sq->stats->csum_partial++;
- }
-}
-
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+ if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
+ return;
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -249,8 +237,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 605c8ecc3610..792e0d6aa861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_ASYNC_EQE,
};
@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) {
- int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {};
+ int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
goto err_out;
}
- vecidx = MLX5_IRQ_VEC_COMP_BASE;
- for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
- vecidx++) {
+ for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
err = irq_cpu_rmap_add(eq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx));
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 7e221038df8d..f690f430f40f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -28,7 +28,10 @@
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -61,6 +64,9 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
struct mlx5_flow_group *egress_mac_fg;
+ struct mlx5_flow_group *egress_miss_fg;
+ struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
+ struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
};
@@ -86,6 +92,26 @@ mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
SWITCHDEV_FDB_DEL_TO_BRIDGE);
}
+static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
+{
+ return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
+ offsetof(struct vlan_ethhdr, h_vlan_proto);
+}
+
+static struct mlx5_pkt_reformat *
+mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
+{
+ struct mlx5_pkt_reformat_params reformat_params = {};
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(struct vlan_hdr);
+ return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
+}
+
static struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
{
@@ -287,43 +313,74 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
return fg;
}
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge egress table miss flow group (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
+ struct mlx5_eswitch *esw = br_offloads->esw;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return -EOPNOTSUPP;
ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(ingress_ft))
return PTR_ERR(ingress_ft);
skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(skip_ft)) {
err = PTR_ERR(skip_ft);
goto err_skip_tbl;
}
- vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
+ vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
+ filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
if (IS_ERR(filter_fg)) {
err = PTR_ERR(filter_fg);
goto err_filter_fg;
}
- mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
+ mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
@@ -362,35 +419,82 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
br_offloads->ingress_ft = NULL;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
+ struct mlx5_flow_handle *miss_handle = NULL;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_flow_table *egress_ft;
int err;
egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(egress_ft))
return PTR_ERR(egress_ft);
- vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
+ vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
+ mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
}
+ if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
+ miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
+ if (IS_ERR(miss_fg)) {
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
+ PTR_ERR(miss_fg));
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
+ if (IS_ERR(miss_pkt_reformat)) {
+ esw_warn(esw->dev,
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
+ PTR_ERR(miss_pkt_reformat));
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
+ br_offloads->skip_ft,
+ miss_pkt_reformat);
+ if (IS_ERR(miss_handle)) {
+ esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
+ PTR_ERR(miss_handle));
+ miss_handle = NULL;
+ mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+ }
+skip_miss_flow:
+
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
bridge->egress_mac_fg = mac_fg;
+ bridge->egress_miss_fg = miss_fg;
+ bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
+ bridge->egress_miss_handle = miss_handle;
return 0;
err_mac_fg:
@@ -403,6 +507,13 @@ err_vlan_fg:
static void
mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
{
+ if (bridge->egress_miss_handle)
+ mlx5_del_flow_rules(bridge->egress_miss_handle);
+ if (bridge->egress_miss_pkt_reformat)
+ mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
+ bridge->egress_miss_pkt_reformat);
+ if (bridge->egress_miss_fg)
+ mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
@@ -443,8 +554,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
if (vlan && vlan->pkt_reformat_push) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.pkt_reformat = vlan->pkt_reformat_push;
+ flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.cvlan_tag);
@@ -564,6 +677,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+ vport_num == MLX5_VPORT_UPLINK)
+ rule_spec->flow_context.flow_source =
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
@@ -599,6 +716,41 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
return handle;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
+ .flags = FLOW_ACT_NO_APPEND,
+ .pkt_reformat = pkt_reformat,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
+
+ handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
struct mlx5_esw_bridge_offloads *br_offloads)
{
@@ -736,14 +888,20 @@ mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
kvfree(entry);
}
+static void
+mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
+ struct mlx5_esw_bridge *bridge)
+{
+ mlx5_esw_bridge_fdb_del_notify(entry);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+}
+
static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
{
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- }
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
}
static struct mlx5_esw_bridge_vlan *
@@ -798,24 +956,14 @@ mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5
static int
mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
- struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
- if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
- offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
return -EOPNOTSUPP;
}
- reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
- reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
- reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
- reformat_params.size = sizeof(struct vlan_hdr);
- pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_FDB);
+ pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
PTR_ERR(pkt_reformat));
@@ -833,6 +981,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_
vlan->pkt_reformat_pop = NULL;
}
+static int
+mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_modify_hdr *pkt_mod_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, 8);
+ MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
+ MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
+
+ pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
+ if (IS_ERR(pkt_mod_hdr))
+ return PTR_ERR(pkt_mod_hdr);
+
+ vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
+ vlan->pkt_mod_hdr_push_mark = NULL;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
struct mlx5_eswitch *esw)
@@ -852,6 +1027,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
if (err)
goto err_vlan_push;
+
+ err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
+ if (err)
+ goto err_vlan_push_mark;
}
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
@@ -870,6 +1049,9 @@ err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
err_vlan_push:
@@ -886,17 +1068,18 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge *bridge)
{
+ struct mlx5_eswitch *esw = bridge->br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- }
+ list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
if (vlan->pkt_reformat_pop)
- mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
if (vlan->pkt_reformat_push)
- mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
}
static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
@@ -949,6 +1132,17 @@ mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
}
static struct mlx5_esw_bridge_fdb_entry *
+mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
+ const unsigned char *addr, u16 vid)
+{
+ struct mlx5_esw_bridge_fdb_key key = {};
+
+ ether_addr_copy(key.addr, addr);
+ key.vid = vid;
+ return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+}
+
+static struct mlx5_esw_bridge_fdb_entry *
mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
@@ -966,6 +1160,10 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
return ERR_CAST(vlan);
}
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
+ if (entry)
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
+
entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
@@ -1265,7 +1463,6 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct switchdev_notifier_fdb_info *fdb_info)
{
struct mlx5_esw_bridge_fdb_entry *entry;
- struct mlx5_esw_bridge_fdb_key key;
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
@@ -1274,13 +1471,11 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
return;
bridge = port->bridge;
- ether_addr_copy(key.addr, fdb_info->addr);
- key.vid = fdb_info->vid;
- entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_debug(br_offloads->esw->dev,
"FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
- key.addr, key.vid, vport_num);
+ fdb_info->addr, fdb_info->vid, vport_num);
return;
}
@@ -1322,7 +1517,6 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
{
struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry;
- struct mlx5_esw_bridge_fdb_key key;
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
@@ -1331,18 +1525,15 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
return;
bridge = port->bridge;
- ether_addr_copy(key.addr, fdb_info->addr);
- key.vid = fdb_info->vid;
- entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_warn(esw->dev,
"FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
- key.addr, key.vid, vport_num);
+ fdb_info->addr, fdb_info->vid, vport_num);
return;
}
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
}
void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
@@ -1358,13 +1549,11 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
continue;
- if (time_after(lastuse, entry->lastuse)) {
+ if (time_after(lastuse, entry->lastuse))
mlx5_esw_bridge_fdb_entry_refresh(entry);
- } else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
- time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- }
+ else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
+ time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 52964a82d6a6..878311fe950a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan {
struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop;
+ struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
};
struct mlx5_esw_bridge_port {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 20af557ae30c..7f9b96d9537e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -36,7 +36,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
return NULL;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -149,7 +149,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
if (IS_ERR(vport))
return PTR_ERR(vport);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 985e305179d1..c6cc67cb4f6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
err_min_rate:
list_del(&group->list);
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
- if (err)
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ group->tsar_ix))
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
err_sched_elem:
kfree(group);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2c7444101bb9..42f8ee2e5d9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -51,6 +51,7 @@
enum mlx5_mapped_obj_type {
MLX5_MAPPED_OBJ_CHAIN,
MLX5_MAPPED_OBJ_SAMPLE,
+ MLX5_MAPPED_OBJ_INT_PORT_METADATA,
};
struct mlx5_mapped_obj {
@@ -63,6 +64,7 @@ struct mlx5_mapped_obj {
u32 trunc_size;
u32 tunnel_id;
} sample;
+ u32 int_port_metadata;
};
};
@@ -88,6 +90,7 @@ enum {
MAPPING_TYPE_TUNNEL_ENC_OPTS,
MAPPING_TYPE_LABELS,
MAPPING_TYPE_ZONE,
+ MAPPING_TYPE_INT_PORT,
};
struct vport_ingress {
@@ -336,6 +339,9 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
+
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
@@ -433,7 +439,7 @@ enum mlx5_flow_match_level {
};
/* current maximum for flow based vport multicasting */
-#define MLX5_MAX_FLOW_FWD_VPORTS 2
+#define MLX5_MAX_FLOW_FWD_VPORTS 32
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
@@ -447,12 +453,22 @@ enum {
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
};
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5_esw_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
struct mlx5_core_dev *counter_dev;
+ struct mlx5e_tc_int_port *dest_int_port;
+ struct mlx5e_tc_int_port *int_port;
int split_count;
int out_count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0d461e38add3..f4eaa5893886 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -86,12 +86,18 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
- if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr && attr->in_rep)
- spec->flow_context.flow_source =
- attr->in_rep->vport == MLX5_VPORT_UPLINK ?
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
+ return;
+
+ if (attr->int_port) {
+ spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
+
+ return;
+ }
+
+ spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
@@ -121,6 +127,8 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_eswitch *src_esw,
u16 vport)
{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ u32 metadata;
void *misc2;
void *misc;
@@ -130,10 +138,16 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
+
+ if (esw_attr->int_port)
+ metadata =
+ mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
+ else
+ metadata =
+ mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
+
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
- MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(src_esw,
- vport));
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
@@ -290,8 +304,11 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
if (err)
goto err_setup_chain;
- flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+
+ if (esw_attr->dests[j].pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+ }
}
return 0;
@@ -315,7 +332,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
int i;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
- if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ if (esw_attr->dests[i].rep &&
+ mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
return true;
return false;
@@ -440,7 +458,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
@@ -467,7 +485,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -482,12 +500,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(esw_attr->split_count);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int i = 0;
@@ -495,6 +513,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@@ -574,6 +596,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_add_rule:
@@ -584,6 +607,7 @@ err_add_rule:
err_esw_get:
esw_cleanup_dests(esw, attr);
err_create_goto_table:
+ kfree(dest);
return rule;
}
@@ -592,16 +616,20 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i, err = 0;
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
@@ -654,6 +682,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
@@ -661,6 +690,7 @@ err_chain_src_rewrite:
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
+ kfree(dest);
return rule;
}
@@ -678,7 +708,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (!mlx5_esw_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -1009,7 +1039,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
- flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
if (!flows)
return -ENOMEM;
@@ -1188,7 +1218,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1845,6 +1875,17 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0);
}
+static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
+{
+ int nvports;
+
+ nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
+ if (mlx5e_tc_int_port_supported(esw))
+ nvports += MLX5E_TC_MAX_INT_PORT_NUM;
+
+ return nvports;
+}
+
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{
struct mlx5_flow_table_attr ft_attr = {};
@@ -1859,7 +1900,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
+ ft_attr.max_fte = esw_get_offloads_ft_size(esw);
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
@@ -1888,7 +1929,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports;
int err = 0;
- nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
+ nvports = esw_get_offloads_ft_size(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -2793,12 +2834,13 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
- u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
+ /* Reserve 0xf for internal port offload */
+ u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
u32 pf_num;
int id;
/* Only 4 bits of pf_num */
- pf_num = PCI_FUNC(esw->dev->pdev->devfn);
+ pf_num = mlx5_get_dev_index(esw->dev);
if (pf_num > max_pf_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index b45954905845..182306bbefaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -219,8 +219,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
- !mlx5_eswitch_offload_is_uplink_port(esw, spec))
+ mlx5_esw_attr_flags_skip(attr->flags) ||
+ (!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
/* push vlan on RX */
@@ -229,7 +229,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
/* hairpin */
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
- if (esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
+ esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 306279b7f9e7..12abe991583a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -115,7 +115,7 @@ static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
data->byte_count = cpu_to_be32(buf->sg[0].size);
- data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey);
data->addr = cpu_to_be64(buf->sg[0].dma_addr);
conn->qp.rq.pc++;
@@ -155,7 +155,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
if (!buf->sg[sgi].data)
break;
data->byte_count = cpu_to_be32(buf->sg[sgi].size);
- data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey);
data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
data++;
size++;
@@ -221,7 +221,7 @@ static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
}
static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@@ -978,7 +978,7 @@ int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
goto err_dealloc_pd;
}
- mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
+ mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey);
return 0;
@@ -994,7 +994,7 @@ out:
void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
{
- mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
+ mlx5_core_destroy_mkey(fdev->mdev, fdev->conn_res.mkey);
mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
mlx5_nic_vport_disable_roce(fdev->mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 52c9dee91ea4..2a984e82ae16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -54,7 +54,7 @@ struct mlx5_fpga_device {
/* QP Connection resources */
struct {
u32 pdn;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5_uars_page *uar;
} conn_res;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7db8df64a60e..750b21124a1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
}
+static int
+mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return 0;
+}
+
+static int
+mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return 0;
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@@ -563,8 +577,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(dst->dest_attr.vport.flags &
@@ -572,6 +586,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
+ if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
+ /* destination_id is reserved */
+ id = 0;
+ break;
+ }
+ id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
@@ -909,6 +929,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
+static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+ return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ void *ptr;
+ int err;
+
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+ MLX5_SET(match_definer, ptr, format_id, format_id);
+
+ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+ err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
+ return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -923,6 +982,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -942,6 +1003,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_stub_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -969,6 +1032,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
+ case FS_FT_PORT_SEL:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 5ecd33cdc087..220ec632d35a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -97,6 +97,10 @@ struct mlx5_flow_cmds {
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*create_match_definer)(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask);
+ int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
+ int definer_id);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fe501ba88bea..386ab9a2d490 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -99,6 +99,9 @@
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
+#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
+
#define BY_PASS_PRIO_NUM_LEVELS 1
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
@@ -206,34 +209,63 @@ static struct init_tree_node egress_root_fs = {
}
};
-#define RDMA_RX_BYPASS_PRIO 0
-#define RDMA_RX_KERNEL_PRIO 1
+enum {
+ RDMA_RX_COUNTERS_PRIO,
+ RDMA_RX_BYPASS_PRIO,
+ RDMA_RX_KERNEL_PRIO,
+};
+
+#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
+#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
+ [RDMA_RX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
+ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
[RDMA_RX_BYPASS_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
[RDMA_RX_KERNEL_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
ADD_MULTIPLE_PRIO(1, 1))),
}
};
+enum {
+ RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_BYPASS_PRIO,
+};
+
+#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
+#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 1,
+ .ar_size = 2,
.children = (struct init_tree_node[]) {
- ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ [RDMA_TX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
+ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_BYPASS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
- ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
BY_PASS_PRIO_NUM_LEVELS))),
}
};
@@ -2191,6 +2223,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2215,6 +2251,12 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
prio = RDMA_RX_KERNEL_PRIO;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
root_ns = steering->rdma_tx_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_COUNTERS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_COUNTERS_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2596,6 +2638,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@@ -2634,6 +2677,21 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return PTR_ERR_OR_ZERO(prio);
}
+#define PORT_SEL_NUM_LEVELS 3
+static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -3020,6 +3078,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@@ -3224,6 +3288,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
+{
+ return definer->id;
+}
+
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_definer *definer;
+ int id;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return ERR_PTR(-ENOMEM);
+
+ definer->ns_type = ns_type;
+ id = root->cmds->create_match_definer(root, format_id, match_mask);
+ if (id < 0) {
+ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
+ kfree(definer);
+ return ERR_PTR(id);
+ }
+ definer->id = id;
+ return definer;
+}
+
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, definer->ns_type);
+ if (WARN_ON(!root))
+ return;
+
+ root->cmds->destroy_match_definer(root, definer->id);
+ kfree(definer);
+}
+
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 98240badc342..7711db245c63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -49,6 +49,11 @@
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
+struct mlx5_flow_definer {
+ enum mlx5_flow_namespace_type ns_type;
+ u32 id;
+};
+
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
@@ -97,7 +102,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
- FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
};
enum fs_flow_table_op_mod {
@@ -129,6 +135,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
+ struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
};
@@ -341,7 +348,8 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 18e5aec14641..31c99d53faf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -40,6 +40,7 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+#define MLX5_SF_NUM_COUNTERS_BULK 6
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
@@ -146,8 +147,12 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
- return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
- (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+ int num_counters_bulk = mlx5_core_is_sf(dev) ?
+ MLX5_SF_NUM_COUNTERS_BULK :
+ MLX5_SW_MAX_COUNTERS_BULK;
+
+ return min_t(int, num_counters_bulk,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
static void update_counter_cache(int index, u32 *bulk_raw_data,
@@ -296,7 +301,7 @@ static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
return mlx5_fc_single_alloc(dev);
}
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
@@ -327,8 +332,6 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out_alloc;
llist_add(&counter->addlist, &fc_stats->addlist);
-
- mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
}
return counter;
@@ -337,6 +340,16 @@ err_out_alloc:
mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (aging)
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ return counter;
+}
EXPORT_SYMBOL(mlx5_fc_create);
u32 mlx5_fc_id(struct mlx5_fc *counter)
@@ -497,8 +510,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
- GFP_KERNEL);
+ bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 016d26f809a5..2d8406fab844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
+#include "lib/tout.h"
#include "accel/tls.h"
enum {
@@ -148,6 +149,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
+ if (MLX5_CAP_GEN(dev, port_selection_cap)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
@@ -262,6 +269,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -317,10 +330,9 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
-#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN);
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
@@ -618,17 +630,18 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
fwhandle, 0);
}
-#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status)
{
- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT);
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
u32 out[MLX5_ST_SZ_DW(mirc_reg)];
u32 in[MLX5_ST_SZ_DW(mirc_reg)];
+ unsigned long exp_time;
int err;
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE));
+
if (!MLX5_CAP_MCAM_REG2(dev, mirc))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 106b50e42b46..0b0234f9d694 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -3,6 +3,7 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
+#include "lib/tout.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
@@ -228,8 +229,6 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-#define MLX5_PCI_LINK_UP_TIMEOUT 2000
-
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -286,7 +285,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
goto restore;
}
- timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT);
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_TOGGLE));
do {
err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
if (err)
@@ -299,8 +298,8 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
mlx5_core_info(dev, "PCI Link up\n");
} else {
- mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n",
- reg16, MLX5_PCI_LINK_UP_TIMEOUT);
+ mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
+ reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
}
@@ -395,16 +394,16 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
return NOTIFY_OK;
}
-#define MLX5_FW_RESET_TIMEOUT_MSEC 5000
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC);
+ unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
+ unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
int err;
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
- mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n",
- MLX5_FW_RESET_TIMEOUT_MSEC / 1000);
+ mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
+ pci_sync_update_timeout / 1000);
err = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 037e18dd4be0..64f1abc4dc36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -36,14 +36,15 @@
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
+#include <linux/kern_levels.h>
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
#include "lib/pci_vsc.h"
+#include "lib/tout.h"
#include "diag/fw_tracer.h"
enum {
- MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
MAX_MISSES = 3,
};
@@ -74,6 +75,11 @@ enum {
MLX5_SENSOR_FW_SYND_RFR = 5,
};
+enum {
+ MLX5_SEVERITY_MASK = 0x7,
+ MLX5_SEVERITY_VALID_MASK = 0x8,
+};
+
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
@@ -98,12 +104,19 @@ static bool sensor_pci_not_working(struct mlx5_core_dev *dev)
return (ioread32be(&h->fw_ver) == 0xffffffff);
}
+static int mlx5_health_get_rfr(u8 rfr_severity)
+{
+ return rfr_severity >> MLX5_RFR_BIT_OFFSET;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
u8 synd = ioread8(&h->synd);
+ u8 rfr;
+
+ rfr = mlx5_health_get_rfr(ioread8(&h->rfr_severity));
if (rfr && synd)
mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
@@ -219,11 +232,9 @@ unlock:
mutex_unlock(&dev->intf_state_mutex);
}
-#define MLX5_CRDUMP_WAIT_MS 60000
-#define MLX5_FW_RESET_WAIT_MS 1000
void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, PCI_TOGGLE);
int lock = -EBUSY;
mutex_lock(&dev->intf_state_mutex);
@@ -237,7 +248,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
lock = lock_sem_sw_reset(dev, true);
if (lock == -EBUSY) {
- delay_ms = MLX5_CRDUMP_WAIT_MS;
+ delay_ms = mlx5_tout_ms(dev, FULL_CRDUMP);
goto recover_from_sw_reset;
}
/* Execute SW reset */
@@ -307,13 +318,11 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
-/* How much time to wait until health resetting the driver (in msecs) */
-#define MLX5_RECOVERY_WAIT_MSECS 60000
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
{
unsigned long end;
- end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
+ end = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FW_RESET));
while (sensor_pci_not_working(dev)) {
if (time_after(jiffies, end))
return -ETIMEDOUT;
@@ -370,35 +379,69 @@ static const char *hsynd_str(u8 synd)
}
}
+static const char *mlx5_loglevel_str(int level)
+{
+ switch (level) {
+ case LOGLEVEL_EMERG:
+ return "EMERGENCY";
+ case LOGLEVEL_ALERT:
+ return "ALERT";
+ case LOGLEVEL_CRIT:
+ return "CRITICAL";
+ case LOGLEVEL_ERR:
+ return "ERROR";
+ case LOGLEVEL_WARNING:
+ return "WARNING";
+ case LOGLEVEL_NOTICE:
+ return "NOTICE";
+ case LOGLEVEL_INFO:
+ return "INFO";
+ case LOGLEVEL_DEBUG:
+ return "DEBUG";
+ }
+ return "Unknown log level";
+}
+
+static int mlx5_health_get_severity(u8 rfr_severity)
+{
+ return rfr_severity & MLX5_SEVERITY_VALID_MASK ?
+ rfr_severity & MLX5_SEVERITY_MASK : LOGLEVEL_ERR;
+}
+
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- char fw_str[18];
- u32 fw;
+ u8 rfr_severity;
+ int severity;
int i;
/* If the syndrome is 0, the device is OK and no need to print buffer */
if (!ioread8(&h->synd))
return;
+ rfr_severity = ioread8(&h->rfr_severity);
+ severity = mlx5_health_get_severity(rfr_severity);
+ mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
+ hsynd_str(ioread8(&h->synd)), severity, mlx5_loglevel_str(severity));
+
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
- ioread32be(h->assert_var + i));
-
- mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
- ioread32be(&h->assert_exit_ptr));
- mlx5_core_err(dev, "assert_callra 0x%08x\n",
- ioread32be(&h->assert_callra));
- sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
- mlx5_core_err(dev, "fw_ver %s\n", fw_str);
- mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
- mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
- mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
- hsynd_str(ioread8(&h->synd)));
- mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
- fw = ioread32be(&h->fw_ver);
- mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
+ mlx5_log(dev, severity, "assert_var[%d] 0x%08x\n", i,
+ ioread32be(h->assert_var + i));
+
+ mlx5_log(dev, severity, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+ mlx5_log(dev, severity, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ mlx5_log(dev, severity, "fw_ver %d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev),
+ fw_rev_sub(dev));
+ mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
+ mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
+ mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
+ mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
+ hsynd_str(ioread8(&h->synd)));
+ mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+ mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
}
static int
@@ -447,6 +490,7 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
+ u8 rfr_severity;
int err;
int i;
@@ -479,9 +523,19 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
ioread32be(&h->assert_callra));
if (err)
return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
+ if (err)
+ return err;
err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
if (err)
return err;
+ rfr_severity = ioread8(&h->rfr_severity);
+ err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
+ if (err)
+ return err;
err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
ioread8(&h->irisc_index));
if (err)
@@ -674,13 +728,13 @@ static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
devlink_health_reporter_destroy(health->fw_fatal_reporter);
}
-static unsigned long get_next_poll_jiffies(void)
+static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
{
unsigned long next;
get_random_bytes(&next, sizeof(next));
next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+ next += jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL));
return next;
}
@@ -698,6 +752,31 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&health->wq_lock, flags);
}
+#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
+static void mlx5_health_log_ts_update(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ u32 out[MLX5_ST_SZ_DW(mrtc_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtc_reg)] = {};
+ struct mlx5_core_health *health;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u64 now_us;
+
+ health = container_of(dwork, struct mlx5_core_health, update_fw_log_ts_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ now_us = ktime_to_us(ktime_get_real());
+
+ MLX5_SET(mrtc_reg, in, time_h, now_us >> 32);
+ MLX5_SET(mrtc_reg, in, time_l, now_us & 0xFFFFFFFF);
+ mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MRTC, 0, 1);
+
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work,
+ msecs_to_jiffies(MLX5_MSEC_PER_HOUR));
+}
+
static void poll_health(struct timer_list *t)
{
struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
@@ -740,11 +819,12 @@ static void poll_health(struct timer_list *t)
queue_work(health->wq, &health->report_work);
out:
- mod_timer(&health->timer, get_next_poll_jiffies());
+ mod_timer(&health->timer, get_next_poll_jiffies(dev));
}
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
+ u64 poll_interval_ms = mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL);
struct mlx5_core_health *health = &dev->priv.health;
timer_setup(&health->timer, poll_health, 0);
@@ -753,7 +833,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
+ health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
}
@@ -779,6 +859,7 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
spin_unlock_irqrestore(&health->wq_lock, flags);
+ cancel_delayed_work_sync(&health->update_fw_log_ts_work);
cancel_work_sync(&health->report_work);
cancel_work_sync(&health->fatal_report_work);
}
@@ -794,6 +875,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ cancel_delayed_work_sync(&health->update_fw_log_ts_work);
destroy_workqueue(health->wq);
mlx5_fw_reporters_destroy(dev);
}
@@ -819,6 +901,9 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
spin_lock_init(&health->wq_lock);
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
+ INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
+ if (mlx5_core_is_pf(dev))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 0c8594c7df21..962d41418ce7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -217,6 +217,32 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_MLX5_EN_RXNFC
+static u32 mlx5i_flow_type_mask(u32 flow_type)
+{
+ return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+}
+
+static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+
+ if (mlx5i_flow_type_mask(fs->flow_type) == ETHER_FLOW)
+ return -EINVAL;
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
+}
+
+static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
+}
+#endif
+
const struct ethtool_ops mlx5i_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -233,6 +259,10 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+#ifdef CONFIG_MLX5_EN_RXNFC
+ .get_rxnfc = mlx5i_get_rxnfc,
+ .set_rxnfc = mlx5i_set_rxnfc,
+#endif
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 269ebb53eda6..ea1efdecc88c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -67,7 +67,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->lro_en = false;
+ params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
}
@@ -219,7 +219,7 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- unsigned char *dev_addr = priv->netdev->dev_addr;
+ const unsigned char *dev_addr = priv->netdev->dev_addr;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -336,6 +336,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_arfs_tables;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_arfs_tables:
@@ -348,12 +350,12 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@ -368,9 +370,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@ -472,11 +474,13 @@ int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv = priv->ppriv;
+ u8 addr_mod[3];
/* Set dev address using underlay QP */
- dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
- dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
- dev->dev_addr[3] = (ipriv->qpn) & 0xff;
+ addr_mod[0] = (ipriv->qpn >> 16) & 0xff;
+ addr_mod[1] = (ipriv->qpn >> 8) & 0xff;
+ addr_mod[2] = (ipriv->qpn) & 0xff;
+ dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
/* Add QPN to net-device mapping to HT */
mlx5i_pkey_add_qpn(dev, ipriv->qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ca5690b0a7ab..48d2ea690d7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -38,7 +38,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "lag.h"
-#include "lag_mp.h"
+#include "mp.h"
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -47,16 +47,21 @@
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2, bool shared_fdb)
+ u8 remap_port2, bool shared_fdb, u8 flags)
{
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+ if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+ } else {
+ MLX5_SET(lagc, lag_ctx, port_select_mode,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
+ }
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@@ -199,6 +204,15 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port1 = 2;
}
+static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+
+ if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
+ return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
+ return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -211,39 +225,56 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ return;
+ }
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
-
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
-
- err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
- if (err)
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
}
}
+static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker, u8 *flags)
+{
+ bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
+ struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+
+ if (roce_lag ||
+ !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
+ tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return;
+ *flags |= MLX5_LAG_FLAG_HASH_BASED;
+}
+
+static char *get_str_port_sel_mode(u8 flags)
+{
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ return "hash";
+ return "queue_affinity";
+}
+
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- bool shared_fdb)
+ bool shared_fdb, u8 flags)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
- &ldev->v2p_map[MLX5_LAG_P2]);
-
- mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d",
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
- shared_fdb);
+ shared_fdb, get_str_port_sel_mode(flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2], shared_fdb);
+ ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -279,16 +310,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- err = mlx5_create_lag(ldev, tracker, shared_fdb);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
+ mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+ if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to create LAG port selection(%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
if (err) {
- if (roce_lag) {
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ mlx5_lag_port_sel_destroy(ldev);
+ if (roce_lag)
mlx5_core_err(dev0,
"Failed to activate RoCE LAG\n");
- } else {
+ else
mlx5_core_err(dev0,
"Failed to activate VF LAG\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
- }
return err;
}
@@ -302,6 +349,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
+ u8 flags = ldev->flags;
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
@@ -324,6 +372,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
+ } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ mlx5_lag_port_sel_destroy(ldev);
}
return err;
@@ -442,6 +492,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
+ /* VF LAG is in multipath mode, ignore bond change requests */
+ if (mlx5_lag_is_multipath(dev0))
+ return;
+
tracker = ldev->tracker;
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -588,8 +642,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!(bond_status & 0x3))
return 0;
- if (lag_upper_info)
+ if (lag_upper_info) {
tracker->tx_type = lag_upper_info->tx_type;
+ tracker->hash_type = lag_upper_info->hash_type;
+ }
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
@@ -688,7 +744,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev,
struct net_device *netdev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
@@ -718,7 +774,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index d4bae528954e..e5d231c31b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -5,7 +5,8 @@
#define __MLX5_LAG_H__
#include "mlx5_core.h"
-#include "lag_mp.h"
+#include "mp.h"
+#include "port_sel.h"
enum {
MLX5_LAG_P1,
@@ -17,10 +18,12 @@ enum {
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
MLX5_LAG_FLAG_READY = 1 << 3,
+ MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
- MLX5_LAG_FLAG_MULTIPATH)
+ MLX5_LAG_FLAG_MULTIPATH | \
+ MLX5_LAG_FLAG_HASH_BASED)
struct lag_func {
struct mlx5_core_dev *dev;
@@ -32,6 +35,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ enum netdev_lag_hash hash_type;
};
/* LAG data of a ConnectX card.
@@ -49,6 +53,7 @@ struct mlx5_lag {
struct delayed_work bond_work;
struct notifier_block nb;
struct lag_mp lag_mp;
+ struct mlx5_lag_port_sel port_sel;
};
static inline struct mlx5_lag *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index f239b352a58a..bf4d3cbefa63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -3,26 +3,29 @@
#include <linux/netdevice.h>
#include <net/nexthop.h>
-#include "lag.h"
-#include "lag_mp.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lib/mlx5.h"
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
if (!mlx5_lag_is_ready(ldev))
return false;
+ if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
+ return false;
+
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
}
-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
-{
- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
-}
-
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
index 729c839397a8..57af962cad29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
@@ -24,12 +24,14 @@ struct lag_mp {
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+static inline bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_LAG_MP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
new file mode 100644
index 000000000000..adc836b3d857
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/netdevice.h>
+#include "lag.h"
+
+enum {
+ MLX5_LAG_FT_LEVEL_TTC,
+ MLX5_LAG_FT_LEVEL_INNER_TTC,
+ MLX5_LAG_FT_LEVEL_DEFINER,
+};
+
+static struct mlx5_flow_group *
+mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_definer *definer)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_definer_id,
+ mlx5_get_match_definer_id(definer));
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
+ MLX5_SET(create_flow_group_in, in, group_type,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
+
+ fg = mlx5_create_flow_group(ft, in);
+ kvfree(in);
+ return fg;
+}
+
+static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer,
+ u8 port1, u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_namespace *ns;
+ int err, i;
+
+ ft_attr.max_fte = MLX5_MAX_PORTS;
+ ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
+ if (!ns) {
+ mlx5_core_warn(dev, "Failed to get port selection namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(lag_definer->ft)) {
+ mlx5_core_warn(dev, "Failed to create port selection table\n");
+ return PTR_ERR(lag_definer->ft);
+ }
+
+ lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
+ lag_definer->definer);
+ if (IS_ERR(lag_definer->fg)) {
+ err = PTR_ERR(lag_definer->fg);
+ goto destroy_ft;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ u8 affinity = i == 0 ? port1 : port2;
+
+ dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
+ vhca_id);
+ lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
+ NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(lag_definer->rules[i])) {
+ err = PTR_ERR(lag_definer->rules[i]);
+ while (i--)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ goto destroy_fg;
+ }
+ }
+
+ return 0;
+
+destroy_fg:
+ mlx5_destroy_flow_group(lag_definer->fg);
+destroy_ft:
+ mlx5_destroy_flow_table(lag_definer->ft);
+ return err;
+}
+
+static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt)
+{
+ int format_id;
+ u8 *ipv6;
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 31;
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 32;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ default:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static int mlx5_lag_set_definer(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt, bool tunnel,
+ enum netdev_lag_hash hash)
+{
+ int format_id;
+ u8 *ipv6;
+
+ if (tunnel)
+ return mlx5_lag_set_definer_inner(match_definer_mask, tt);
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 29;
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 30;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_15_0);
+ break;
+ default:
+ format_id = 0;
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_15_0);
+
+ if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
+ MLX5_SET_TO_ONES(match_definer_format_0,
+ match_definer_mask,
+ outer_first_vlan_vid);
+ break;
+ }
+
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_ethertype);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static struct mlx5_lag_definer *
+mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
+ enum mlx5_traffic_types tt, bool tunnel, u8 port1,
+ u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_definer *lag_definer;
+ u32 *match_definer_mask;
+ int format_id, err;
+
+ lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
+ if (!lag_definer)
+ return ERR_PTR(ENOMEM);
+
+ match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
+ match_mask),
+ GFP_KERNEL);
+ if (!match_definer_mask) {
+ err = -ENOMEM;
+ goto free_lag_definer;
+ }
+
+ format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
+ lag_definer->definer =
+ mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
+ format_id, match_definer_mask);
+ if (IS_ERR(lag_definer->definer)) {
+ err = PTR_ERR(lag_definer->definer);
+ goto free_mask;
+ }
+
+ err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
+ if (err)
+ goto destroy_match_definer;
+
+ kvfree(match_definer_mask);
+
+ return lag_definer;
+
+destroy_match_definer:
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+free_mask:
+ kvfree(match_definer_mask);
+free_lag_definer:
+ kfree(lag_definer);
+ return ERR_PTR(err);
+}
+
+static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ mlx5_destroy_flow_group(lag_definer->fg);
+ mlx5_destroy_flow_table(lag_definer->ft);
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+ kfree(lag_definer);
+}
+
+static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int tt;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ if (port_sel->outer.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->outer.definers[tt]);
+ if (port_sel->inner.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->inner.definers[tt]);
+ }
+}
+
+static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_lag_definer *lag_definer;
+ int tt, err;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
+ false, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->outer.definers[tt] = lag_definer;
+
+ if (!port_sel->tunnel)
+ continue;
+
+ lag_definer =
+ mlx5_lag_create_definer(ldev, hash_type, tt,
+ true, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->inner.definers[tt] = lag_definer;
+ }
+
+ return 0;
+
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
+ enum netdev_lag_hash hash)
+{
+ port_sel->tunnel = false;
+
+ switch (hash) {
+ case NETDEV_LAG_HASH_E34:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L34:
+ set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ case NETDEV_LAG_HASH_E23:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L23:
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ default:
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ }
+}
+
+#define SET_IGNORE_DESTS_BITS(tt_map, dests) \
+ do { \
+ int idx; \
+ \
+ for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
+ set_bit(idx, dests); \
+ } while (0)
+
+static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+}
+
+static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+
+ ttc_params->inner_ttc = port_sel->tunnel;
+ if (!port_sel->tunnel)
+ return;
+
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ ttc_params->tunnel_dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->tunnel_dests[tt].ft =
+ mlx5_get_ttc_flow_table(port_sel->inner.ttc);
+ }
+}
+
+static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
+ port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->outer.ttc))
+ return PTR_ERR(port_sel->outer.ttc);
+
+ return 0;
+}
+
+static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
+ port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->inner.ttc))
+ return PTR_ERR(port_sel->inner.ttc);
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ set_tt_map(port_sel, hash_type);
+ err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
+ if (err)
+ return err;
+
+ if (port_sel->tunnel) {
+ err = mlx5_lag_create_inner_ttc_table(ldev);
+ if (err)
+ goto destroy_definers;
+ }
+
+ err = mlx5_lag_create_ttc_table(ldev);
+ if (err)
+ goto destroy_inner;
+
+ return 0;
+
+destroy_inner:
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static int
+mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer **definers,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_destination dest = {};
+ int err;
+ int tt;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ struct mlx5_flow_handle **rules = definers[tt]->rules;
+
+ if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+
+ if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ err = mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->outer.definers,
+ port1, port2);
+ if (err)
+ return err;
+
+ if (!port_sel->tunnel)
+ return 0;
+
+ return mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->inner.definers,
+ port1, port2);
+}
+
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+
+ mlx5_destroy_ttc_table(port_sel->outer.ttc);
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+ mlx5_lag_destroy_definers(ldev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
new file mode 100644
index 000000000000..6d15b28a42fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LAG_FS_H__
+#define __MLX5_LAG_FS_H__
+
+#include "lib/fs_ttc.h"
+
+struct mlx5_lag_definer {
+ struct mlx5_flow_definer *definer;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
+};
+
+struct mlx5_lag_ttc {
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_lag_definer *definers[MLX5_NUM_TT];
+};
+
+struct mlx5_lag_port_sel {
+ DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
+ bool tunnel;
+ struct mlx5_lag_ttc outer;
+ struct mlx5_lag_ttc inner;
+};
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1,
+ u8 port2);
+
+#else /* CONFIG_MLX5_ESWITCH */
+static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ return 0;
+}
+
+static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
+ u8 port2)
+{
+ return 0;
+}
+
+static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_LAG_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 749d17c0057d..b63dec24747a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -247,6 +247,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (test_bit(tt, params->ignore_dests))
+ continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -266,6 +268,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
if (!mlx5_tunnel_proto_supported_rx(dev,
ttc_tunnel_rules[tt].proto))
continue;
+ if (test_bit(tt, params->ignore_tunnel_dests))
+ continue;
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index ce95be8f8382..85fef0cd1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -43,7 +43,9 @@ struct ttc_params {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
+ DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
bool inner_ttc;
+ DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
new file mode 100644
index 000000000000..0dd96a6b140d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/driver.h>
+#include "lib/tout.h"
+
+struct mlx5_timeouts {
+ u64 to[MAX_TIMEOUT_TYPES];
+};
+
+static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
+ [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
+ [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
+ [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
+ [MLX5_TO_FW_INIT_MS] = 2000,
+ [MLX5_TO_CMD_MS] = 60000,
+ [MLX5_TO_PCI_TOGGLE_MS] = 2000,
+ [MLX5_TO_HEALTH_POLL_INTERVAL_MS] = 2000,
+ [MLX5_TO_FULL_CRDUMP_MS] = 60000,
+ [MLX5_TO_FW_RESET_MS] = 60000,
+ [MLX5_TO_FLUSH_ON_ERROR_MS] = 2000,
+ [MLX5_TO_PCI_SYNC_UPDATE_MS] = 5000,
+ [MLX5_TO_TEARDOWN_MS] = 3000,
+ [MLX5_TO_FSM_REACTIVATE_MS] = 5000,
+ [MLX5_TO_RECLAIM_PAGES_MS] = 5000,
+ [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
+};
+
+static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
+{
+ dev->timeouts->to[type] = val;
+}
+
+static void tout_set_def_val(struct mlx5_core_dev *dev)
+{
+ int i;
+
+ for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+}
+
+int mlx5_tout_init(struct mlx5_core_dev *dev)
+{
+ dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
+ if (!dev->timeouts)
+ return -ENOMEM;
+
+ tout_set_def_val(dev);
+ return 0;
+}
+
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev)
+{
+ kfree(dev->timeouts);
+}
+
+/* Time register consists of two fields to_multiplier(time out multiplier)
+ * and to_value(time out value). to_value is the quantity of the time units and
+ * to_multiplier is the type and should be one off these four values.
+ * 0x0: millisecond
+ * 0x1: seconds
+ * 0x2: minutes
+ * 0x3: hours
+ * this function converts the time stored in the two register fields into
+ * millisecond.
+ */
+static u64 tout_convert_reg_field_to_ms(u32 to_mul, u32 to_val)
+{
+ u64 msec = to_val;
+
+ to_mul &= 0x3;
+ /* convert hours/minutes/seconds to miliseconds */
+ if (to_mul)
+ msec *= 1000 * int_pow(60, to_mul - 1);
+
+ return msec;
+}
+
+static u64 tout_convert_iseg_to_ms(u32 iseg_to)
+{
+ return tout_convert_reg_field_to_ms(iseg_to >> 29, iseg_to & 0xfffff);
+}
+
+static bool tout_is_supported(struct mlx5_core_dev *dev)
+{
+ return !!ioread32be(&dev->iseg->cmd_q_init_to);
+}
+
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev)
+{
+ u32 to;
+
+ if (!tout_is_supported(dev))
+ return;
+
+ to = ioread32be(&dev->iseg->cmd_q_init_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_FW_INIT_MS);
+
+ to = ioread32be(&dev->iseg->cmd_exec_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_CMD_MS);
+}
+
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
+{
+ return dev->timeouts->to[type];
+}
+
+#define MLX5_TIMEOUT_QUERY(fld, reg_out) \
+ ({ \
+ struct mlx5_ifc_default_timeout_bits *time_field; \
+ u32 to_multi, to_value; \
+ u64 to_val_ms; \
+ \
+ time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \
+ to_multi = MLX5_GET(default_timeout, time_field, to_multiplier); \
+ to_value = MLX5_GET(default_timeout, time_field, to_value); \
+ to_val_ms = tout_convert_reg_field_to_ms(to_multi, to_value); \
+ to_val_ms; \
+ })
+
+#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
+ ({ \
+ u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
+ tout_set(dev, fw_to + (to_extra), to_type); \
+ fw_to; \
+ })
+
+static int tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ u64 pcie_toggle_to_val, tear_down_to_val;
+ u32 out[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_DTOR, 0, 0);
+ if (err)
+ return err;
+
+ pcie_toggle_to_val = MLX5_TIMEOUT_FILL(pcie_toggle_to, out, dev, MLX5_TO_PCI_TOGGLE_MS, 0);
+ MLX5_TIMEOUT_FILL(fw_reset_to, out, dev, MLX5_TO_FW_RESET_MS, pcie_toggle_to_val);
+
+ tear_down_to_val = MLX5_TIMEOUT_FILL(tear_down_to, out, dev, MLX5_TO_TEARDOWN_MS, 0);
+ MLX5_TIMEOUT_FILL(pci_sync_update_to, out, dev, MLX5_TO_PCI_SYNC_UPDATE_MS,
+ tear_down_to_val);
+
+ MLX5_TIMEOUT_FILL(health_poll_to, out, dev, MLX5_TO_HEALTH_POLL_INTERVAL_MS, 0);
+ MLX5_TIMEOUT_FILL(full_crdump_to, out, dev, MLX5_TO_FULL_CRDUMP_MS, 0);
+ MLX5_TIMEOUT_FILL(flush_on_err_to, out, dev, MLX5_TO_FLUSH_ON_ERROR_MS, 0);
+ MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
+
+ return 0;
+}
+
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ if (tout_is_supported(dev))
+ return tout_query_dtor(dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
new file mode 100644
index 000000000000..31faa5c17aa9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef MLX5_TIMEOUTS_H
+#define MLX5_TIMEOUTS_H
+
+enum mlx5_timeouts_types {
+ /* pre init timeouts (not read from FW) */
+ MLX5_TO_FW_PRE_INIT_TIMEOUT_MS,
+ MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS,
+ MLX5_TO_FW_PRE_INIT_WAIT_MS,
+
+ /* init segment timeouts */
+ MLX5_TO_FW_INIT_MS,
+ MLX5_TO_CMD_MS,
+
+ /* DTOR timeouts */
+ MLX5_TO_PCI_TOGGLE_MS,
+ MLX5_TO_HEALTH_POLL_INTERVAL_MS,
+ MLX5_TO_FULL_CRDUMP_MS,
+ MLX5_TO_FW_RESET_MS,
+ MLX5_TO_FLUSH_ON_ERROR_MS,
+ MLX5_TO_PCI_SYNC_UPDATE_MS,
+ MLX5_TO_TEARDOWN_MS,
+ MLX5_TO_FSM_REACTIVATE_MS,
+ MLX5_TO_RECLAIM_PAGES_MS,
+ MLX5_TO_RECLAIM_VFS_PAGES_MS,
+
+ MAX_TIMEOUT_TYPES
+};
+
+struct mlx5_core_dev;
+int mlx5_tout_init(struct mlx5_core_dev *dev);
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
+
+#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
+
+# endif /* MLX5_TIMEOUTS_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 79482824c64f..a92a92a52346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
#include "devlink.h"
#include "fw_reset.h"
#include "lib/mlx5.h"
+#include "lib/tout.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
@@ -176,11 +177,6 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
-#define FW_PRE_INIT_TIMEOUT_MILI 120000
-#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
-
static int fw_initializing(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->initializing) >> 31;
@@ -193,8 +189,6 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0;
- BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
-
while (fw_initializing(dev)) {
if (time_after(jiffies, end)) {
err = -EBUSY;
@@ -205,7 +199,7 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
jiffies_to_msecs(end - warn) / 1000);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
- msleep(FW_INIT_WAIT_MS);
+ msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
}
return err;
@@ -564,15 +558,38 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
+/* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the
+ * boot process.
+ * In case RoCE cap is writable in FW and user/devlink requested to change the
+ * cap, we are yet to query the final state of the above cap.
+ * Hence, the need for this function.
+ *
+ * Returns
+ * True:
+ * 1) RoCE cap is read only in FW and already disabled
+ * OR:
+ * 2) RoCE cap is writable in FW and user/devlink requested it off.
+ *
+ * In any other case, return False.
+ */
+static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
+}
+
static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
int err;
- if (!MLX5_CAP_GEN(dev, roce))
+ if (is_roce_fw_disabled(dev))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
@@ -975,25 +992,34 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ return err;
+ }
+
/* wait for firmware to accept initialization segments configurations
*/
- err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT),
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
- FW_PRE_INIT_TIMEOUT_MILI);
- return err;
+ mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ goto err_tout_cleanup;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- return err;
+ goto err_tout_cleanup;
}
- err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
+ mlx5_tout_query_iseg(dev);
+
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
- FW_INIT_TIMEOUT_MILI);
+ mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_INIT));
goto err_cmd_cleanup;
}
@@ -1017,6 +1043,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_disable_hca;
}
+ err = mlx5_tout_query_dtor(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to read dtor\n");
+ goto reclaim_boot_pages;
+ }
+
err = set_hca_ctrl(dev);
if (err) {
mlx5_core_err(dev, "set_hca_ctrl failed\n");
@@ -1062,6 +1094,8 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+err_tout_cleanup:
+ mlx5_tout_cleanup(dev);
return err;
}
@@ -1080,6 +1114,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+ mlx5_tout_cleanup(dev);
return 0;
}
@@ -1112,8 +1147,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
- mlx5_core_err(dev, "Failed to init FW tracer\n");
- goto err_fw_tracer;
+ mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
+ mlx5_fw_tracer_destroy(dev->tracer);
+ dev->tracer = NULL;
}
mlx5_fw_reset_events_start(dev);
@@ -1121,8 +1157,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_rsc_dump_init(dev);
if (err) {
- mlx5_core_err(dev, "Failed to init Resource dump\n");
- goto err_rsc_dump;
+ mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
+ mlx5_rsc_dump_destroy(dev);
+ dev->rsc_dump = NULL;
}
err = mlx5_fpga_device_start(dev);
@@ -1192,11 +1229,9 @@ err_tls_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
-err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
-err_fw_tracer:
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1381,6 +1416,8 @@ static const int types[] = {
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
+ MLX5_CAP_PORT_SELECTION,
+ MLX5_CAP_DEV_SHAMPO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1537,8 +1574,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
- if (!mlx5_core_is_mp_slave(dev))
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
err_init_one:
@@ -1558,7 +1594,7 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 230eab7e3bc9..bb677329ea08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -97,6 +97,30 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
+{
+ struct device *device = dev->device;
+ struct va_format vaf;
+ va_list args;
+
+ if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG,
+ "Level %d is out of range, set to default level\n", level))
+ level = LOGLEVEL_DEFAULT;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device),
+ &vaf);
+ va_end(args);
+}
+
+#define mlx5_log(__dev, level, format, ...) \
+ mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
{
return &dev->pdev->dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index abd024173c42..8116815663a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -8,8 +8,6 @@
#define MLX5_COMP_EQS_PER_SF 8
-#define MLX5_IRQ_EQ_CTRL (0)
-
struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 174f71ed5280..f099a087400e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -35,13 +35,11 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
- void *mkc;
int err;
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
@@ -50,38 +48,33 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
if (err)
return err;
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
- mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
- mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
- mkey->pd = MLX5_GET(mkc, mkc, pd);
- init_waitqueue_head(&mkey->wait);
+ *mkey = MLX5_GET(create_mkey_in, in, memory_key_mkey_entry.mkey_7_0) |
+ mlx5_idx_to_mkey(mkey_index);
- mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
+ mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, *mkey);
return 0;
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey)
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey)
{
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
- MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
return mlx5_cmd_exec_in(dev, destroy_mkey, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen)
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {};
memset(out, 0, outlen);
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
- MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 110c0837f95b..f6b5451328fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -38,6 +38,7 @@
#include <linux/xarray.h>
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
@@ -65,11 +66,6 @@ struct fw_page {
};
enum {
- MAX_RECLAIM_TIME_MSECS = 5000,
- MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
-};
-
-enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
@@ -641,7 +637,8 @@ static int optimal_reclaimed_pages(void)
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u16 func_id)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
int nclaimed;
@@ -656,7 +653,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
}
if (nclaimed)
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ end = jiffies + recl_pages_to_jiffies;
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
@@ -727,7 +724,8 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
+ unsigned long end = jiffies + recl_vf_pages_to_jiffies;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
@@ -743,7 +741,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return -ETIMEDOUT;
}
if (*pages < prev_pages) {
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ end = jiffies + recl_vf_pages_to_jiffies;
prev_pages = *pages;
}
msleep(50);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 763c83a02380..830444f927d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
}
-static void irq_set_name(char *name, int vecidx)
+static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
{
- if (vecidx == 0) {
+ if (!pool->xa_num_irqs.max) {
+ /* in case we only have a single irq for the device */
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
+ return;
+ }
+
+ if (vecidx == pool->xa_num_irqs.max) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
- vecidx - MLX5_IRQ_VEC_COMP_BASE);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
+}
+
+static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+{
+ return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
}
static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
@@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
- if (!pool->name[0])
- irq_set_name(name, i);
+ if (!irq_pool_is_sf_pool(pool))
+ irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
@@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
if (IS_ERR(irq) || !affinity)
goto unlock;
cpumask_copy(irq->mask, affinity);
+ if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
+ cpumask_empty(irq->mask))
+ cpumask_set_cpu(0, irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
@@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
pf_irq:
pool = irq_table->pf_pool;
+ vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
out:
if (IS_ERR(irq))
@@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{
+ if (!table->pf_pool->xa_num_irqs.max)
+ return 1;
return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
}
@@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_IRQ_VEC_COMP_BASE;
+ pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pf_vec = min_t(int, pf_vec, num_eqs);
- if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
- return -ENOMEM;
total_vec = pf_vec;
if (mlx5_sf_max_functions(dev))
total_vec += MLX5_IRQ_CTRL_SF_MAX +
MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
- total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
- total_vec, PCI_IRQ_MSIX);
+ total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
if (total_vec < 0)
return total_vec;
pf_vec = min(pf_vec, total_vec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 871c2fbe18d3..f37db7cc32a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -9,6 +9,8 @@
#include "sf/sf.h"
#include "sf/mlx5_ifc_vhca_event.h"
#include "ecpf.h"
+#define CREATE_TRACE_POINTS
+#include "diag/dev_tracepoint.h"
struct mlx5_sf_dev_table {
struct xarray devices;
@@ -66,13 +68,18 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
{
+ int id;
+
+ id = sf_dev->adev.id;
+ trace_mlx5_sf_dev_del(dev, sf_dev, id);
+
auxiliary_device_delete(&sf_dev->adev);
auxiliary_device_uninit(&sf_dev->adev);
}
-static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
+static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, u32 sfnum)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
struct mlx5_sf_dev *sf_dev;
@@ -100,6 +107,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
sf_dev->adev.dev.groups = sf_attr_groups;
sf_dev->sfnum = sfnum;
sf_dev->parent_mdev = dev;
+ sf_dev->fn_id = fn_id;
if (!table->max_sfs) {
mlx5_adev_idx_free(id);
@@ -109,6 +117,8 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
}
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
+ trace_mlx5_sf_dev_add(dev, sf_dev, id);
+
err = auxiliary_device_init(&sf_dev->adev);
if (err) {
mlx5_adev_idx_free(id);
@@ -128,7 +138,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
return;
xa_err:
- mlx5_sf_dev_remove(sf_dev);
+ mlx5_sf_dev_remove(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -139,7 +149,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(sf_dev);
+ mlx5_sf_dev_remove(dev, sf_dev);
}
static int
@@ -178,7 +188,8 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
- mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
+ mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
+ event->sw_function_id);
break;
default:
break;
@@ -260,7 +271,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(sf_dev);
+ mlx5_sf_dev_remove(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 149fd9e698cf..2a66a427ef15 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -16,6 +16,7 @@ struct mlx5_sf_dev {
struct mlx5_core_dev *mdev;
phys_addr_t bar_base_addr;
u32 sfnum;
+ u16 fn_id;
};
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
new file mode 100644
index 000000000000..7f7c9af5deed
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_SF_DEV_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_SF_DEV_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/mlx5/driver.h>
+#include "../../dev/dev.h"
+
+DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_sf_dev *sfdev,
+ int aux_id),
+ TP_ARGS(dev, sfdev, aux_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(const struct mlx5_sf_dev*, sfdev)
+ __field(int, aux_id)
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->sfdev = sfdev;
+ __entry->aux_id = aux_id;
+ __entry->hw_fn_id = sfdev->fn_id;
+ __entry->sfnum = sfdev->sfnum;
+ ),
+ TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n",
+ __get_str(devname), __entry->sfdev,
+ __entry->aux_id, __entry->hw_fn_id,
+ __entry->sfnum)
+);
+
+DEFINE_EVENT(mlx5_sf_dev_template, mlx5_sf_dev_add,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_sf_dev *sfdev,
+ int aux_id),
+ TP_ARGS(dev, sfdev, aux_id)
+ );
+
+DEFINE_EVENT(mlx5_sf_dev_template, mlx5_sf_dev_del,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_sf_dev *sfdev,
+ int aux_id),
+ TP_ARGS(dev, sfdev, aux_id)
+ );
+
+#endif /* _MLX5_SF_DEV_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH sf/dev/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dev_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 052f48068dc1..7b4783ce213e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -46,7 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
init_one_err:
@@ -61,10 +61,9 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink;
+ struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
- devlink = priv_to_devlink(sf_dev->mdev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 13891fdc607e..3be659cd91f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -8,6 +8,8 @@
#include "mlx5_ifc_vhca_event.h"
#include "vhca_event.h"
#include "ecpf.h"
+#define CREATE_TRACE_POINTS
+#include "diag/sf_tracepoint.h"
struct mlx5_sf {
struct devlink_port dl_port;
@@ -112,6 +114,7 @@ static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
+ trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
kfree(sf);
}
@@ -209,6 +212,7 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
return err;
sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
+ trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
return 0;
}
@@ -224,6 +228,7 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
return err;
sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
+ trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
return 0;
}
@@ -293,6 +298,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
if (err)
goto esw_err;
*new_port_index = sf->port_index;
+ trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
return 0;
esw_err:
@@ -323,7 +329,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
@@ -442,6 +448,8 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
+ trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
+ sf->hw_fn_id, sf->hw_state);
sf_err:
mutex_unlock(&table->sf_state_lock);
mlx5_sf_table_put(table);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
new file mode 100644
index 000000000000..8bf1cd90930d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_SF_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_SF_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/mlx5/driver.h>
+#include "sf/vhca_event.h"
+
+TRACE_EVENT(mlx5_sf_add,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ unsigned int port_index,
+ u32 controller,
+ u16 hw_fn_id,
+ u32 sfnum),
+ TP_ARGS(dev, port_index, controller, hw_fn_id, sfnum),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ __entry->sfnum = sfnum;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x sfnum=%u\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id, __entry->sfnum)
+);
+
+TRACE_EVENT(mlx5_sf_free,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ unsigned int port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id)
+);
+
+TRACE_EVENT(mlx5_sf_hwc_alloc,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 controller,
+ u16 hw_fn_id,
+ u32 sfnum),
+ TP_ARGS(dev, controller, hw_fn_id, sfnum),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ __entry->sfnum = sfnum;
+ ),
+ TP_printk("(%s) controller=%u hw_id=0x%x sfnum=%u\n",
+ __get_str(devname), __entry->controller, __entry->hw_fn_id,
+ __entry->sfnum)
+);
+
+TRACE_EVENT(mlx5_sf_hwc_free,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u16 hw_fn_id),
+ TP_ARGS(dev, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u16, hw_fn_id)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
+);
+
+TRACE_EVENT(mlx5_sf_hwc_deferred_free,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u16 hw_fn_id),
+ TP_ARGS(dev, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u16, hw_fn_id)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
+);
+
+DECLARE_EVENT_CLASS(mlx5_sf_state_template,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id)
+);
+
+DEFINE_EVENT(mlx5_sf_state_template, mlx5_sf_activate,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id)
+ );
+
+DEFINE_EVENT(mlx5_sf_state_template, mlx5_sf_deactivate,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id)
+ );
+
+TRACE_EVENT(mlx5_sf_update_state,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ unsigned int port_index,
+ u32 controller,
+ u16 hw_fn_id,
+ u8 state),
+ TP_ARGS(dev, port_index, controller, hw_fn_id, state),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ __field(u8, state)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ __entry->state = state;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x state=%u\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id, __entry->state)
+);
+
+#endif /* _MLX5_SF_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH sf/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE sf_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
new file mode 100644
index 000000000000..fd814a190b8b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_SF_VHCA_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_SF_VHCA_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/mlx5/driver.h>
+#include "sf/vhca_event.h"
+
+TRACE_EVENT(mlx5_sf_vhca_event,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_vhca_state_event *event),
+ TP_ARGS(dev, event),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ __field(u8, vhca_state)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->hw_fn_id = event->function_id;
+ __entry->sfnum = event->sw_function_id;
+ __entry->vhca_state = event->new_vhca_state;
+ ),
+ TP_printk("(%s) hw_id=0x%x sfnum=%u vhca_state=%d\n",
+ __get_str(devname), __entry->hw_fn_id,
+ __entry->sfnum, __entry->vhca_state)
+);
+
+#endif /* _MLX5_SF_VHCA_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH sf/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vhca_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index d9c69123c1ab..252d6017387d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -8,6 +8,7 @@
#include "ecpf.h"
#include "mlx5_core.h"
#include "eswitch.h"
+#include "diag/sf_tracepoint.h"
struct mlx5_sf_hw {
u32 usr_sfnum;
@@ -142,6 +143,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
goto vhca_err;
}
+ trace_mlx5_sf_hwc_alloc(dev, controller, hw_fn_id, usr_sfnum);
mutex_unlock(&table->table_lock);
return sw_id;
@@ -172,6 +174,7 @@ static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
hwc->sfs[idx].allocated = false;
hwc->sfs[idx].pending_delete = false;
+ trace_mlx5_sf_hwc_free(dev, hwc->start_fn_id + idx);
}
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
@@ -195,6 +198,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller
hwc->sfs[id].allocated = false;
} else {
hwc->sfs[id].pending_delete = true;
+ trace_mlx5_sf_hwc_deferred_free(dev, hw_fn_id);
}
err:
mutex_unlock(&table->table_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index 28b14b05086f..d908fba968f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -6,6 +6,8 @@
#include "mlx5_core.h"
#include "vhca_event.h"
#include "ecpf.h"
+#define CREATE_TRACE_POINTS
+#include "diag/vhca_tracepoint.h"
struct mlx5_vhca_state_notifier {
struct mlx5_core_dev *dev;
@@ -82,6 +84,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
vhca_state_context.vhca_state);
mlx5_vhca_event_arm(dev, event->function_id);
+ trace_mlx5_sf_vhca_event(dev, event);
blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index a5b9f65db23c..07936841ce99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
[DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
[DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
+ [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
@@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport.
*/
- ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
- dest_action->vport->caps->num,
- final_icm_addr);
+ ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+ dest_action->vport->caps->num,
+ final_icm_addr);
if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
return ret;
@@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
return -EOPNOTSUPP;
case DR_ACTION_TYP_CTR:
attr.ctr_id = action->ctr->ctr_id +
- action->ctr->offeset;
+ action->ctr->offset;
break;
case DR_ACTION_TYP_TAG:
attr.flow_tag = action->flow_tag->flow_tag;
@@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
- if (action->vport->caps->num == WIRE_PORT) {
+ if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
return -EOPNOTSUPP;
}
@@ -853,6 +854,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action;
bool reformat_req = false;
u32 num_of_ref = 0;
+ u32 ref_act_cnt;
int ret;
int i;
@@ -861,11 +863,14 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
return NULL;
}
- hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL);
if (!hw_dests)
return NULL;
- ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt)))
+ goto free_hw_dests;
+
+ ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL);
if (!ref_actions)
goto free_hw_dests;
@@ -1747,7 +1752,7 @@ dec_ref:
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id)
{
struct mlx5dr_cmd_vport_cap *vport_cap;
@@ -1767,9 +1772,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return NULL;
}
- vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport);
+ mlx5dr_err(dmn,
+ "Failed to get vport 0x%x caps - vport is disabled or invalid\n",
+ vport);
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 56307283bf9b..1d8febed0d76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
return 0;
}
@@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id)
+ u16 vport)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
void *in_flow_context;
@@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT);
- MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 0fe159809ba1..49089cbe897c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -9,48 +9,45 @@
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
-static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
- * recalculation is needed due to a HW bug.
+ * recalculation is needed due to a HW bug in STEv0.
*/
- dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
- sizeof(dmn->cache.recalc_cs_ft[0]),
- GFP_KERNEL);
- if (!dmn->cache.recalc_cs_ft)
- return -ENOMEM;
-
- return 0;
+ xa_init(&dmn->csum_fts_xa);
}
-static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
- int i;
-
- for (i = 0; i < dmn->info.caps.num_vports; i++) {
- if (!dmn->cache.recalc_cs_ft[i])
- continue;
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ unsigned long i;
- mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
+ xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
+ if (recalc_cs_ft)
+ mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
}
- kfree(dmn->cache.recalc_cs_ft);
+ xa_destroy(&dmn->csum_fts_xa);
}
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr)
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr)
{
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ int ret;
- recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
+ recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
if (!recalc_cs_ft) {
- /* Table not in cache, need to allocate a new one */
+ /* Table hasn't been created yet */
recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
if (!recalc_cs_ft)
return -EINVAL;
- dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
+ ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
+ recalc_cs_ft, GFP_KERNEL));
+ if (ret)
+ return ret;
}
*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
@@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
}
+static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_vport_cap *uplink_vport)
+{
+ struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+
+ uplink_vport->num = MLX5_VPORT_UPLINK;
+ uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+ uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+ uplink_vport->vport_gvmi = 0;
+ uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
+}
+
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
- bool other_vport,
- u16 vport_number)
+ u16 vport_number,
+ struct mlx5dr_cmd_vport_cap *vport_caps)
{
- struct mlx5dr_cmd_vport_cap *vport_caps;
+ u16 cmd_vport = vport_number;
+ bool other_vport = true;
int ret;
- vport_caps = &dmn->info.caps.vports_caps[vport_number];
+ if (vport_number == MLX5_VPORT_UPLINK) {
+ dr_domain_fill_uplink_caps(dmn, vport_caps);
+ return 0;
+ }
+
+ if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
+ other_vport = false;
+ cmd_vport = 0;
+ }
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->icm_address_rx,
&vport_caps->icm_address_tx);
if (ret)
@@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->vport_gvmi);
if (ret)
return ret;
@@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
+static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
- struct mlx5dr_cmd_vport_cap *wire_vport;
- int vport;
+ return dr_domain_query_vport(dmn,
+ dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
+ &dmn->info.caps.vports.esw_manager_caps);
+}
+
+static struct mlx5dr_cmd_vport_cap *
+dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
int ret;
- /* Query vports (except wire vport) */
- for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
- ret = dr_domain_query_vport(dmn, !!vport, vport);
- if (ret)
- return ret;
+ vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
+ if (!vport_caps)
+ return NULL;
+
+ ret = dr_domain_query_vport(dmn, vport, vport_caps);
+ if (ret) {
+ kvfree(vport_caps);
+ return NULL;
}
- /* Last vport is the wire port */
- wire_vport = &dmn->info.caps.vports_caps[vport];
- wire_vport->num = WIRE_PORT;
- wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
- wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
- wire_vport->vport_gvmi = 0;
- wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
+ ret = xa_insert(&caps->vports.vports_caps_xa, vport,
+ vport_caps, GFP_KERNEL);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
+ kvfree(vport_caps);
+ return ERR_PTR(ret);
+ }
+
+ return vport_caps;
+}
+
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+ (!caps->is_ecpf && vport == 0))
+ return &caps->vports.esw_manager_caps;
+
+vport_load:
+ vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
+ if (vport_caps)
+ return vport_caps;
+
+ vport_caps = dr_domain_add_vport_cap(dmn, vport);
+ if (PTR_ERR(vport_caps) == -EBUSY)
+ /* caps were already stored by another thread */
+ goto vport_load;
+
+ return vport_caps;
+}
+
+static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ unsigned long i;
+
+ xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
+ vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
+ kvfree(vport_caps);
+ }
+}
+
+static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
+ if (!vport_caps)
+ return -EINVAL;
return 0;
}
@@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
- dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
- sizeof(dmn->info.caps.vports_caps[0]),
- GFP_KERNEL);
- if (!dmn->info.caps.vports_caps)
- return -ENOMEM;
+ xa_init(&dmn->info.caps.vports.vports_caps_xa);
- ret = dr_domain_query_vports(dmn);
+ /* Query eswitch manager and uplink vports only. Rest of the
+ * vports (vport 0, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = dr_domain_query_esw_mngr(dmn);
if (ret) {
- mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
- goto free_vports_caps;
+ mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
}
- dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
+ ret = dr_domain_query_uplink(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
+ }
return 0;
-free_vports_caps:
- kfree(dmn->info.caps.vports_caps);
- dmn->info.caps.vports_caps = NULL;
+free_vports_caps_xa:
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
+
return ret;
}
@@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
}
- dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
-
ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
if (ret)
return ret;
@@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
- vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
- if (!vport_cap) {
- mlx5dr_err(dmn, "Failed to get esw manager vport\n");
- return -ENOENT;
- }
+ vport_cap = &dmn->info.caps.vports.esw_manager_caps;
dmn->info.supp_sw_steering = true;
dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
@@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
{
- kfree(dmn->info.caps.vports_caps);
+ dr_domain_clear_vports(dmn);
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
}
struct mlx5dr_domain *
@@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_caps;
}
- ret = dr_domain_init_cache(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Failed initialize domain cache\n");
- goto uninit_resourses;
- }
+ dr_domain_init_csum_recalc_fts(dmn);
return dmn;
-uninit_resourses:
- dr_domain_uninit_resources(dmn);
uninit_caps:
dr_domain_caps_uninit(dmn);
free_domain:
@@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
- dr_domain_uninit_cache(dmn);
+ dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
mutex_destroy(&dmn->info.tx.mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 0d6f86eb248b..68a4c32d5f34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -5,7 +5,7 @@
#include "dr_types.h"
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 66c24767e3b0..7f6fd9c5e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -24,7 +24,7 @@ struct mlx5dr_icm_dm {
};
struct mlx5dr_icm_mr {
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5dr_icm_dm dm;
struct mlx5dr_domain *dmn;
size_t length;
@@ -33,7 +33,7 @@ struct mlx5dr_icm_mr {
static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
u32 pd, u64 length, u64 start_addr, int mode,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
@@ -116,7 +116,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
return icm_mr;
free_mkey:
- mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
+ mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
free_dm:
mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
icm_mr->dm.addr, icm_mr->dm.obj_id);
@@ -130,7 +130,7 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
struct mlx5dr_icm_dm *dm = &icm_mr->dm;
- mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
+ mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
dm->addr, dm->obj_id);
kvfree(icm_mr);
@@ -252,7 +252,7 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
- chunk->rkey = buddy_mem_pool->icm_mr->mkey.key;
+ chunk->rkey = buddy_mem_pool->icm_mr->mkey;
chunk->mr_addr = offset;
chunk->icm_addr =
(uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index b5409cc021d3..75c775bee351 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -875,9 +875,10 @@ uninit_nic_rx:
static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *mask)
{
+ struct mlx5dr_match_parameters consumed_mask;
struct mlx5dr_table *tbl = matcher->tbl;
struct mlx5dr_domain *dmn = tbl->dmn;
- int ret;
+ int i, ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_err(dmn, "Invalid match criteria attribute\n");
@@ -889,8 +890,16 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
+
+ consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
+ if (!consumed_mask.match_buf)
+ return -ENOMEM;
+
+ consumed_mask.match_sz = mask->match_sz;
+ memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
mlx5dr_ste_copy_param(matcher->match_criteria,
- &matcher->mask, mask);
+ &matcher->mask, &consumed_mask,
+ true);
}
switch (dmn->type) {
@@ -909,9 +918,22 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
break;
default:
WARN_ON(true);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_consumed_mask;
+ }
+
+ /* Check that all mask data was consumed */
+ for (i = 0; i < consumed_mask.match_sz; i++) {
+ if (consumed_mask.match_buf[i]) {
+ mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
+ ret = -EOPNOTSUPP;
+ goto free_consumed_mask;
+ }
}
+ ret = 0;
+free_consumed_mask:
+ kfree(consumed_mask.match_buf);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index aca80efc28fa..6a390e981b09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -917,7 +917,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
- mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
+ mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
s_idx = offsetof(struct mlx5dr_match_param, outer);
@@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain,
return false;
if (mask->misc.source_port) {
- if (rx && value->misc.source_port != WIRE_PORT)
+ if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
return true;
- if (!rx && value->misc.source_port == WIRE_PORT)
+ if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index bfb14b4b1906..00aef47d7682 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -350,7 +350,7 @@ static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
send_info->read.length = send_info->write.length;
/* Read into the same write area */
send_info->read.addr = (uintptr_t)send_info->write.addr;
- send_info->read.lkey = send_ring->mr->mkey.key;
+ send_info->read.lkey = send_ring->mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->read.send_flags = IB_SEND_SIGNALED;
@@ -388,7 +388,7 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
(void *)(uintptr_t)send_info->write.addr,
send_info->write.length);
send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
- send_info->write.lkey = send_ring->mr->mkey.key;
+ send_info->write.lkey = send_ring->mr->mkey;
}
send_ring->tx_head++;
@@ -848,8 +848,7 @@ static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
kfree(cq);
}
-static int
-dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey)
+static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
void *mkc;
@@ -908,7 +907,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{
- mlx5_core_destroy_mkey(mdev, &mr->mkey);
+ mlx5_core_destroy_mkey(mdev, mr->mkey);
dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL);
kfree(mr);
@@ -1039,7 +1038,7 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
send_info.write.lkey = 0;
/* Using the sync_mr in order to write/read */
send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey.key;
+ send_info.rkey = send_ring->sync_mr->mkey;
for (i = 0; i < num_of_sends_req; i++) {
ret = dr_postsend_icm_data(dmn, &send_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 1cdfe4fccc7a..219a5474a8a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -668,101 +668,116 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
return 0;
}
-static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
-{
- spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
- spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
- spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
- spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
- spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
-
- spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
- spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
- source_eswitch_owner_vhca_id);
-
- spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
- spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
- spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
- spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
- spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
- spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
+#define IFC_GET_CLR(typ, p, fld, clear) ({ \
+ void *__p = (p); \
+ u32 __t = MLX5_GET(typ, __p, fld); \
+ if (clear) \
+ MLX5_SET(typ, __p, fld, 0); \
+ __t; \
+})
+
+#define memcpy_and_clear(to, from, len, clear) ({ \
+ void *__to = (to), *__from = (from); \
+ size_t __len = (len); \
+ memcpy(__to, __from, __len); \
+ if (clear) \
+ memset(__from, 0, __len); \
+})
+
+static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
+{
+ spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
+ spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
+ spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
+ spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
+ spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
+
+ spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
+ spec->source_eswitch_owner_vhca_id =
+ IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
+
+ spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
+ spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
+ spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
+ spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
+ spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
+ spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
spec->outer_second_cvlan_tag =
- MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
+ IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
spec->inner_second_cvlan_tag =
- MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
+ IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
spec->outer_second_svlan_tag =
- MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
+ IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
spec->inner_second_svlan_tag =
- MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
-
- spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
+ IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
+ spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
- spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
- spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
+ spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
+ spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
- spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
+ spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
- spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
- spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
+ spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
+ spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
spec->outer_ipv6_flow_label =
- MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
+ IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
spec->inner_ipv6_flow_label =
- MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
+ IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
- spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
+ spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
spec->geneve_protocol_type =
- MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
+ IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
- spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
+ spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
}
-static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
+static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
{
__be32 raw_ip[4];
- spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
+ spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
- spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
- spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
+ spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
+ spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
- spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
+ spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
- spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
- spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
- spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
- spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
+ spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
+ spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
+ spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
+ spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
- spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
- spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
- spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
- spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
- spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
- spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
- spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
- spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
- spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
- spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
+ spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
+ spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
+ spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
+ spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
+ spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
+ spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
+ spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
+ spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
+ spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
+ spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
- spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
+ spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
- spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
- spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
+ spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
+ spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
- memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(raw_ip));
+ memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip), clr);
spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
- memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(raw_ip));
+ memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip), clr);
spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
@@ -770,104 +785,105 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
}
-static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
+static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
{
spec->outer_first_mpls_label =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
spec->outer_first_mpls_exp =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
spec->outer_first_mpls_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
spec->outer_first_mpls_ttl =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
spec->inner_first_mpls_label =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
spec->inner_first_mpls_exp =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
spec->inner_first_mpls_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
spec->inner_first_mpls_ttl =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
spec->outer_first_mpls_over_gre_label =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
spec->outer_first_mpls_over_gre_exp =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
spec->outer_first_mpls_over_gre_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
spec->outer_first_mpls_over_gre_ttl =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
spec->outer_first_mpls_over_udp_label =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
spec->outer_first_mpls_over_udp_exp =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
spec->outer_first_mpls_over_udp_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
spec->outer_first_mpls_over_udp_ttl =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
- spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
- spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
- spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
- spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
- spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
- spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
- spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
- spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
- spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
-}
-
-static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
-{
- spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
- spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
- spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
- spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
+ spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
+ spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
+ spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
+ spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
+ spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
+ spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
+ spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
+ spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
+ spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
+}
+
+static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
+{
+ spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
+ spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
+ spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
+ spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
spec->outer_vxlan_gpe_vni =
- MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
+ IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
spec->outer_vxlan_gpe_next_protocol =
- MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
+ IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
spec->outer_vxlan_gpe_flags =
- MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
- spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
+ IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
+ spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
spec->icmpv6_header_data =
- MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
- spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
- spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
- spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
- spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
+ IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
+ spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
+ spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
+ spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
+ spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
spec->geneve_tlv_option_0_data =
- MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
- spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
- spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
- spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
- spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
- spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
+ IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
+ spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
+ spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
+ spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
+ spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
+ spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
spec->gtpu_first_ext_dw_0 =
- MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
+ IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
}
-static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
+static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
{
spec->prog_sample_field_id_0 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
spec->prog_sample_field_value_0 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
spec->prog_sample_field_id_1 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
spec->prog_sample_field_value_1 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
spec->prog_sample_field_id_2 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
spec->prog_sample_field_value_2 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
spec->prog_sample_field_id_3 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
spec->prog_sample_field_value_3 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
}
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
- struct mlx5dr_match_parameters *mask)
+ struct mlx5dr_match_parameters *mask,
+ bool clr)
{
u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
u8 *data = (u8 *)mask->match_buf;
@@ -881,7 +897,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = mask->match_buf;
}
- dr_ste_copy_mask_spec(buff, &set_param->outer);
+ dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
}
param_location = sizeof(struct mlx5dr_match_spec);
@@ -894,7 +910,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc(buff, &set_param->misc);
+ dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
}
param_location += sizeof(struct mlx5dr_match_misc);
@@ -907,7 +923,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_spec(buff, &set_param->inner);
+ dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
}
param_location += sizeof(struct mlx5dr_match_spec);
@@ -920,7 +936,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc2(buff, &set_param->misc2);
+ dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
}
param_location += sizeof(struct mlx5dr_match_misc2);
@@ -934,7 +950,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc3(buff, &set_param->misc3);
+ dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
}
param_location += sizeof(struct mlx5dr_match_misc3);
@@ -948,7 +964,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc4(buff, &set_param->misc4);
+ dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 9c704bce3c12..b0649c2877dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set;
@@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (source_gvmi_set) {
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
+ misc->source_port);
if (!vport_cap) {
- mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index b2481c99da79..cb9cf67b0a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action;
- dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
- action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
- action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_encap_l3(last_ste,
@@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
@@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
- misc->source_eswitch_owner_vhca_id = 0;
+ misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
return 0;
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index b20e8aabb861..3028b776da00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -4,7 +4,7 @@
#ifndef _DR_TYPES_
#define _DR_TYPES_
-#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <linux/refcount.h>
#include "fs_core.h"
#include "wq.h"
@@ -14,7 +14,6 @@
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
-#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
@@ -752,9 +751,9 @@ struct mlx5dr_esw_caps {
struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi;
u16 vhca_gvmi;
+ u16 num;
u64 icm_address_rx;
u64 icm_address_tx;
- u32 num;
};
struct mlx5dr_roce_cap {
@@ -763,6 +762,11 @@ struct mlx5dr_roce_cap {
u8 fl_rc_qp_when_roce_enabled:1;
};
+struct mlx5dr_vports {
+ struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct xarray vports_caps_xa;
+};
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
- u8 num_esw_ports;
u8 sw_format_ver;
bool eswitch_manager;
bool rx_sw_owner;
@@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps {
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
- u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
- struct mlx5dr_cmd_vport_cap *vports_caps;
+ struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@ -826,10 +829,6 @@ struct mlx5dr_domain_info {
struct mlx5dr_cmd_caps caps;
};
-struct mlx5dr_domain_cache {
- struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
-};
-
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev;
@@ -841,7 +840,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
- struct mlx5dr_domain_cache cache;
+ struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
};
@@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl {
struct mlx5dr_action_ctr {
u32 ctr_id;
- u32 offeset;
+ u32 offset;
};
struct mlx5dr_action_vport {
@@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
return true;
}
-static inline struct mlx5dr_cmd_vport_cap *
-mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
-{
- if (!caps->vports_caps ||
- (vport >= caps->num_vports && vport != WIRE_PORT))
- return NULL;
-
- if (vport == WIRE_PORT)
- vport = caps->num_vports;
-
- return &caps->vports_caps[vport];
-}
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details {
u8 status;
@@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id);
+ u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id);
@@ -1241,7 +1230,8 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_htbl_connect_info *connect_info);
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
- struct mlx5dr_match_parameters *mask);
+ struct mlx5dr_match_parameters *mask,
+ bool clear);
struct mlx5dr_qp {
struct mlx5_core_dev *mdev;
@@ -1275,7 +1265,7 @@ struct mlx5dr_cq {
struct mlx5dr_mr {
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
dma_addr_t dma_addr;
void *addr;
size_t size;
@@ -1372,12 +1362,12 @@ struct mlx5dr_fw_recalc_cs_ft {
};
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr);
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 7e58f4e594b7..2632d5ae9bc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -625,6 +625,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
+static int
+mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
@@ -727,6 +740,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_dr_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c5a8b1601999..c7c93131b762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id);
struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index da481a7c12f4..01e9c412977c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -36,7 +36,7 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
+static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
@@ -44,13 +44,14 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
- if (!err)
- *uarn = MLX5_GET(alloc_uar_out, out, uar);
- return err;
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+ return 0;
}
-EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
+static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
@@ -58,7 +59,6 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
MLX5_SET(dealloc_uar_in, in, uar, uarn);
return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
-EXPORT_SYMBOL(mlx5_cmd_free_uar);
static int uars_per_sys_page(struct mlx5_core_dev *mdev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 4c1440a95ad7..8846d30a380a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -421,19 +421,21 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
-
+out:
kvfree(out);
-
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
@@ -1133,19 +1135,20 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- u64 tmp = 0;
+ u64 tmp;
+ int err;
if (mdev->sys_image_guid)
return mdev->sys_image_guid;
if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
- mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
else
- mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
- mdev->sys_image_guid = tmp;
+ mdev->sys_image_guid = err ? 0 : tmp;
- return tmp;
+ return mdev->sys_image_guid;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 6704f5c1aa32..b990782c1eb1 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -75,7 +75,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
u64_to_ether_addr(local_mac, mac);
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
} else {
/* Provide a random MAC if for some reason the device has
* not been configured with a valid MAC address already.
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 7654841a05c2..e6475ea77cd1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -19,7 +19,7 @@ struct mlxfw_dev {
static inline
struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev)
{
- return mlxfw_dev->devlink->dev;
+ return devlink_to_dev(mlxfw_dev->devlink);
}
#define MLXFW_PRFX "mlxfw: "
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f080fab3de2b..3fd3812b8f31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -90,7 +90,6 @@ struct mlxsw_core {
struct devlink_health_reporter *fw_fatal;
} health;
struct mlxsw_env *env;
- bool is_initialized; /* Denotes if core was already initialized. */
unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -1975,12 +1974,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_emad_init;
if (!reload) {
- err = devlink_register(devlink);
- if (err)
- goto err_devlink_register;
- }
-
- if (!reload) {
err = mlxsw_core_params_register(mlxsw_core);
if (err)
goto err_register_params;
@@ -1995,12 +1988,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_health_init;
- if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
- if (err)
- goto err_driver_init;
- }
-
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -2014,31 +2001,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_env_init;
- mlxsw_core->is_initialized = true;
- devlink_params_publish(devlink);
-
- if (!reload)
- devlink_reload_enable(devlink);
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
+ if (err)
+ goto err_driver_init;
+ }
+ if (!reload) {
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
+ }
return 0;
+err_driver_init:
+ mlxsw_env_fini(mlxsw_core->env);
err_env_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
-err_driver_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
- if (!reload)
- devlink_unregister(devlink);
-err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
kfree(mlxsw_core->lag.mapping);
@@ -2088,7 +2075,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (!reload)
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
+
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -2099,18 +2087,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- devlink_params_unpublish(devlink);
- mlxsw_core->is_initialized = false;
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
mlxsw_env_fini(mlxsw_core->env);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
mlxsw_core_health_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
- if (!reload)
- devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
@@ -2124,7 +2108,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
@@ -2939,49 +2922,6 @@ struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
return mlxsw_core->env;
}
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->is_initialized;
-}
-
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
-{
- enum mlxsw_reg_pmtm_module_type module_type;
- char pmtm_pl[MLXSW_REG_PMTM_LEN];
- int err;
-
- mlxsw_reg_pmtm_pack(pmtm_pl, module);
- err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
- if (err)
- return err;
- mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
-
- /* Here we need to get the module width according to the module type. */
-
- switch (module_type) {
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
- case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
- return 8;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
- return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
- case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
- return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
- return 1;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(mlxsw_core_module_max_width);
-
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 80712dc803d0..12023a550007 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -249,8 +249,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core);
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 3713c45cfa1e..6dd4ae2f45f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -5,6 +5,7 @@
#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/sfp.h>
+#include <linux/mutex.h>
#include "core.h"
#include "core_env.h"
@@ -14,12 +15,15 @@
struct mlxsw_env_module_info {
u64 module_overheat_counter;
bool is_overheat;
+ int num_ports_mapped;
+ int num_ports_up;
+ enum ethtool_module_power_mode_policy power_mode_policy;
};
struct mlxsw_env {
struct mlxsw_core *core;
u8 module_count;
- spinlock_t module_info_lock; /* Protects 'module_info'. */
+ struct mutex module_info_lock; /* Protects 'module_info'. */
struct mlxsw_env_module_info module_info[];
};
@@ -389,6 +393,205 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 req = *flags;
+ int err;
+
+ if (!(req & ETH_RESET_PHY) &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
+ return 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].num_ports_up) {
+ netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
+ netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlxsw_env_module_reset(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev, "Failed to reset module\n");
+ goto out;
+ }
+
+ *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_reset_module);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ char mcion_pl[MLXSW_REG_MCION_LEN];
+ u32 status_bits;
+ int err;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ params->policy = mlxsw_env->module_info[module].power_mode_policy;
+
+ mlxsw_reg_mcion_pack(mcion_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
+ goto out;
+ }
+
+ status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl);
+ if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK))
+ goto out;
+
+ if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK)
+ params->mode = ETHTOOL_MODULE_POWER_MODE_LOW;
+ else
+ params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
+
+static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool enable)
+{
+ enum mlxsw_reg_pmaos_admin_status admin_status;
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
+ mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
+ mlxsw_reg_pmaos_ase_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power)
+{
+ u16 eeprom_override_mask, eeprom_override;
+ char pmmp_pl[MLXSW_REG_PMMP_LEN];
+
+ mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
+ /* Mask all the bits except low power mode. */
+ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
+ mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask);
+ eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK :
+ 0;
+ mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl);
+}
+
+static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
+ return err;
+ }
+
+ err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
+ goto err_module_low_power_set;
+ }
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
+ goto err_module_enable_set;
+ }
+
+ return 0;
+
+err_module_enable_set:
+ mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+err_module_low_power_set:
+ mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ bool low_power;
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ goto out;
+
+ /* If any ports are up, we are already in high power mode. */
+ if (mlxsw_env->module_info[module].num_ports_up)
+ goto out_set_policy;
+
+ low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
+ extack);
+ if (err)
+ goto out;
+
+out_set_policy:
+ mlxsw_env->module_info[module].power_mode_policy = policy;
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
+
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
u8 module,
bool *p_has_temp_sensor)
@@ -482,22 +685,32 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
return 0;
}
-static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
- char *mtwe_pl, void *priv)
+struct mlxsw_env_module_temp_warn_event {
+ struct mlxsw_env *mlxsw_env;
+ char mtwe_pl[MLXSW_REG_MTWE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
- struct mlxsw_env *mlxsw_env = priv;
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
+ event = container_of(work, struct mlxsw_env_module_temp_warn_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
for (i = 0; i < mlxsw_env->module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
*/
sensor_warning =
- mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl,
+ mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- spin_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
is_overheat =
mlxsw_env->module_info[i].is_overheat;
@@ -507,13 +720,13 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
mlxsw_env->module_info[i].is_overheat = false;
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
@@ -521,13 +734,32 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
*/
mlxsw_env->module_info[i].is_overheat = true;
mlxsw_env->module_info[i].module_overheat_counter++;
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
}
}
+
+ kfree(event);
+}
+
+static void
+mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
+ void *priv)
+{
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN);
+ INIT_WORK(&event->work, mlxsw_env_mtwe_event_work);
+ mlxsw_core_schedule_work(&event->work);
}
static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
- MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE);
+ MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
{
@@ -568,9 +800,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- spin_lock_bh(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[event->module].is_overheat = false;
- spin_unlock_bh(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
&has_temp_sensor);
@@ -652,8 +884,10 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
for (i = 0; i < module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i,
- MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_e_set(pmaos_pl,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
if (err)
return err;
@@ -667,29 +901,110 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- /* Prevent switch driver from accessing uninitialized data. */
- if (!mlxsw_core_is_initialized(mlxsw_core)) {
- *p_counter = 0;
- return 0;
- }
-
if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
return -EINVAL;
- spin_lock_bh(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
*p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- spin_unlock_bh(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[module].num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_map);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[module].num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_inc;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_inc;
+
+ /* Transition to high power mode following first port using the module
+ * being put administratively up.
+ */
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
+ NULL);
+ if (err)
+ goto out_unlock;
+
+out_inc:
+ mlxsw_env->module_info[module].num_ports_up++;
+out_unlock:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_up);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ mlxsw_env->module_info[module].num_ports_up--;
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_unlock;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_unlock;
+
+ /* Transition to low power mode following last port using the module
+ * being put administratively down.
+ */
+ __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+
+out_unlock:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_down);
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
{
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
u8 module_count;
- int err;
+ int i, err;
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
@@ -702,7 +1017,14 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (!env)
return -ENOMEM;
- spin_lock_init(&env->module_info_lock);
+ /* Firmware defaults to high power mode policy where modules are
+ * transitioned to high power mode following plug-in.
+ */
+ for (i = 0; i < module_count; i++)
+ env->module_info[i].power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+
+ mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
env->module_count = module_count;
*p_env = env;
@@ -732,6 +1054,7 @@ err_oper_state_event_enable:
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
+ mutex_destroy(&env->module_info_lock);
kfree(env);
return err;
}
@@ -742,5 +1065,6 @@ void mlxsw_env_fini(struct mlxsw_env *env)
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
+ mutex_destroy(&env->module_info_lock);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 0bf5bd0f8a7e..da121b1a84b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -24,9 +24,32 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 module,
+ u32 *flags);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack);
+
int
mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
u64 *p_counter);
+
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+
int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index e92cadc98128..ab70a873a01a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -270,11 +270,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
{ \
__mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -290,13 +292,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u8 val) \
{ \
@@ -311,11 +313,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u16 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
{ \
__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -331,13 +335,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 \
+static inline u16 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u16 val) \
{ \
@@ -352,11 +356,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
{ \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -372,13 +378,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 \
+static inline u32 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u32 val) \
{ \
@@ -393,11 +399,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u64 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
{ \
__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -413,13 +421,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 \
+static inline u64 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u64 val) \
{ \
@@ -433,19 +441,19 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
{ \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
{ \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
{ \
return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
@@ -460,7 +468,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
unsigned short index, \
char *dst) \
@@ -468,7 +476,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
unsigned short index, \
const char *src) \
@@ -476,7 +484,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
{ \
return __mlxsw_item_data(buf, \
@@ -491,14 +499,14 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
{ \
return __mlxsw_item_bit_array_get(buf, \
&__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
{ \
return __mlxsw_item_bit_array_set(buf, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9d56c44e994..5d4dfa5ddbb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -54,8 +54,20 @@ static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
return 0;
}
-static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
+static int mlxsw_m_port_open(struct net_device *dev)
{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+}
+
+static int mlxsw_m_port_stop(struct net_device *dev)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
return 0;
}
@@ -70,8 +82,8 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev)
}
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
- .ndo_open = mlxsw_m_port_dummy_open_stop,
- .ndo_stop = mlxsw_m_port_dummy_open_stop,
+ .ndo_open = mlxsw_m_port_open,
+ .ndo_stop = mlxsw_m_port_stop,
.ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
@@ -124,11 +136,47 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ flags);
+}
+
+static int
+mlxsw_m_get_module_power_mode(struct net_device *netdev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ params, extack);
+}
+
+static int
+mlxsw_m_set_module_power_mode(struct net_device *netdev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ params->policy, extack);
+}
+
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .reset = mlxsw_m_reset,
+ .get_module_power_mode = mlxsw_m_get_module_power_mode,
+ .set_module_power_mode = mlxsw_m_set_module_power_mode,
};
static int
@@ -152,20 +200,16 @@ static int
mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
{
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- struct net_device *dev = mlxsw_m_port->dev;
char ppad_pl[MLXSW_REG_PPAD_LEN];
+ u8 addr[ETH_ALEN];
int err;
mlxsw_reg_ppad_pack(ppad_pl, false, 0);
err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl);
if (err)
return err;
- mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
- /* The last byte value in base mac address is guaranteed
- * to be such it does not overflow when adding local_port
- * value.
- */
- dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
return 0;
}
@@ -266,6 +310,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
+ mlxsw_env_module_port_map(mlxsw_m->core, module);
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
@@ -274,6 +319,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
{
mlxsw_m->module_to_port[module] = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, module);
}
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 13b0259f7ea6..fcace73eae40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
struct sk_buff *skb;
int err;
- elem_info->u.rdq.skb = NULL;
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
if (!skb)
return -ENOMEM;
- /* Assume that wqe was previously zeroed. */
-
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
buf_len, DMA_FROM_DEVICE);
if (err)
@@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char *wqe;
+ char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.sdq.skb;
- if (!skb)
- return;
- wqe = elem_info->elem;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb = elem_info->u.rdq.skb;
+ memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err) {
+ dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ goto out;
+ }
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
@@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
- memset(wqe, 0, q->elem_size);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
- if (err)
- dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+out:
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6fbda6ebd590..8d420eb8ade2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4951,7 +4951,7 @@ enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
- MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
+ MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13,
};
/* reg_ppcnt_grp
@@ -5371,7 +5371,7 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
-/* Ethernet Per Traffic Group Counters */
+/* Ethernet Per Traffic Class Counters */
/* reg_ppcnt_tc_transmit_queue
* Contains the transmit queue depth in cells of traffic class
@@ -5398,6 +5398,12 @@ MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+/* reg_ppcnt_ecn_marked_tc
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
@@ -5681,6 +5687,14 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
+/* reg_pmaos_rst
+ * Module reset toggle.
+ * Note: Setting reset while module is plugged-in will result in transition to
+ * "initializing" operational state.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1);
+
/* reg_pmaos_slot_index
* Slot index.
* Access: Index
@@ -5693,6 +5707,24 @@ MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
+enum mlxsw_reg_pmaos_admin_status {
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1,
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2,
+ /* If the module is active and then unplugged, or experienced an error
+ * event, the operational status should go to "disabled" and can only
+ * be enabled upon explicit enable command.
+ */
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3,
+};
+
+/* reg_pmaos_admin_status
+ * Module administrative state (the desired state of the module).
+ * Note: To disable a module, all ports associated with the port must be
+ * administatively down first.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4);
+
/* reg_pmaos_ase
* Admin state update enable.
* If this bit is set, admin state will be updated based on admin_state field.
@@ -5721,13 +5753,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module,
- enum mlxsw_reg_pmaos_e e)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
mlxsw_reg_pmaos_module_set(payload, module);
- mlxsw_reg_pmaos_e_set(payload, e);
- mlxsw_reg_pmaos_ee_set(payload, true);
}
/* PPLR - Port Physical Loopback Register
@@ -5766,6 +5795,69 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMTDB - Port Module To local DataBase Register
+ * ----------------------------------------------
+ * The PMTDB register allows to query the possible module<->local port
+ * mapping than can be used in PMLP. It does not represent the actual/current
+ * mapping of the local to module. Actual mapping is only defined by PMLP.
+ */
+#define MLXSW_REG_PMTDB_ID 0x501A
+#define MLXSW_REG_PMTDB_LEN 0x40
+
+MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN);
+
+/* reg_pmtdb_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4);
+
+/* reg_pmtdb_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8);
+
+/* reg_pmtdb_ports_width
+ * Port's width
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4);
+
+/* reg_pmtdb_num_ports
+ * Number of ports in a single module (split/breakout)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4);
+
+enum mlxsw_reg_pmtdb_status {
+ MLXSW_REG_PMTDB_STATUS_SUCCESS,
+};
+
+/* reg_pmtdb_status
+ * Status
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
+
+/* reg_pmtdb_port_num
+ * The local_port value which can be assigned to the module.
+ * In case of more than one port, port<x> represent the /<x> port of
+ * the module.
+ * Access: RO
+ */
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+
+static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
+ u8 ports_width, u8 num_ports)
+{
+ MLXSW_REG_ZERO(pmtdb, payload);
+ mlxsw_reg_pmtdb_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtdb_module_set(payload, module);
+ mlxsw_reg_pmtdb_ports_width_set(payload, ports_width);
+ mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5860,67 +5952,100 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
mlxsw_reg_pddr_page_select_set(payload, page_select);
}
-/* PMTM - Port Module Type Mapping Register
- * ----------------------------------------
- * The PMTM allows query or configuration of module types.
+/* PMMP - Port Module Memory Map Properties Register
+ * -------------------------------------------------
+ * The PMMP register allows to override the module memory map advertisement.
+ * The register can only be set when the module is disabled by PMAOS register.
*/
-#define MLXSW_REG_PMTM_ID 0x5067
-#define MLXSW_REG_PMTM_LEN 0x10
+#define MLXSW_REG_PMMP_ID 0x5044
+#define MLXSW_REG_PMMP_LEN 0x2C
-MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
-/* reg_pmtm_module
+/* reg_pmmp_module
* Module number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
-enum mlxsw_reg_pmtm_module_type {
- /* Backplane with 4 lanes */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
- /* QSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_QSFP,
- /* SFP */
- MLXSW_REG_PMTM_MODULE_TYPE_SFP,
- /* Backplane with single lane */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
- /* Backplane with two lane */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
- /* Chip2Chip4x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10,
- /* Chip2Chip2x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C2X,
- /* Chip2Chip1x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C1X,
- /* QSFP-DD */
- MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
- /* OSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_OSFP,
- /* SFP-DD */
- MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD,
- /* DSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_DSFP,
- /* Chip2Chip8x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C8X,
+/* reg_pmmp_sticky
+ * When set, will keep eeprom_override values after plug-out event.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1);
+
+/* reg_pmmp_eeprom_override_mask
+ * Write mask bit (negative polarity).
+ * 0 - Allow write
+ * 1 - Ignore write
+ * On write, indicates which of the bits from eeprom_override field are
+ * updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16);
+
+enum {
+ /* Set module to low power mode */
+ MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8),
};
-/* reg_pmtm_module_type
- * Module type.
+/* reg_pmmp_eeprom_override
+ * Override / ignore EEPROM advertisement properties bitmask
* Access: RW
*/
-MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
{
- MLXSW_REG_ZERO(pmtm, payload);
- mlxsw_reg_pmtm_module_set(payload, module);
+ MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_module_set(payload, module);
}
-static inline void
-mlxsw_reg_pmtm_unpack(char *payload,
- enum mlxsw_reg_pmtm_module_type *module_type)
+/* PLLP - Port Local port to Label Port mapping Register
+ * -----------------------------------------------------
+ * The PLLP register returns the mapping from Local Port into Label Port.
+ */
+#define MLXSW_REG_PLLP_ID 0x504A
+#define MLXSW_REG_PLLP_LEN 0x10
+
+MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
+
+/* reg_pllp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
+
+/* reg_pllp_label_port
+ * Front panel label of the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8);
+
+/* reg_pllp_split_num
+ * Label split mapping for local_port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
+
+/* reg_pllp_slot_index
+ * Slot index (0: Main board).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
+
+static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
{
- *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+ MLXSW_REG_ZERO(pllp, payload);
+ mlxsw_reg_pllp_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
+ u8 *split_num, u8 *slot_index)
+{
+ *label_port = mlxsw_reg_pllp_label_port_get(payload);
+ *split_num = mlxsw_reg_pllp_split_num_get(payload);
+ *slot_index = mlxsw_reg_pllp_slot_index_get(payload);
}
/* HTGT - Host Trap Group Table
@@ -6401,6 +6526,12 @@ MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
*/
MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
+/* reg_ritr_if_mac_profile_id
+ * MAC msb profile ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_mac_profile_id, 0x10, 16, 4);
+
/* reg_ritr_if_mac
* Router interface MAC address.
* In Spectrum, all MAC addresses must have the same 38 MSBits.
@@ -6664,6 +6795,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
+static inline void
+mlxsw_reg_ritr_loopback_ipip6_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif,
+ const struct in6_addr *usip, u32 gre_key)
+{
+ enum mlxsw_reg_ritr_loopback_protocol protocol =
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6;
+
+ mlxsw_reg_ritr_loopback_protocol_set(payload, protocol);
+ mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
+ uvr_id, underlay_rif, gre_key);
+ mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload,
+ (const char *)usip);
+}
+
/* RTAR - Router TCAM Allocation Register
* --------------------------------------
* This register is used for allocation of regions in the TCAM table.
@@ -6932,6 +7080,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
}
+static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr)
+{
+ mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6);
+ mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr);
+}
+
static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
bool counter_enable)
{
@@ -8117,19 +8271,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload,
}
static inline void
-mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
- enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
- unsigned int type_check, bool gre_key_check,
- u32 ipv4_usip, u32 expected_gre_key)
+mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 expected_gre_key)
{
mlxsw_reg_rtdp_ipip_irif_set(payload, irif);
mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check);
mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check);
mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check);
- mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+static inline void
+mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv4_usip, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv6_usip_ptr, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr);
+}
+
+/* RIPS - Router IP version Six Register
+ * -------------------------------------
+ * The RIPS register is used to store IPv6 addresses for use by the NVE and
+ * IPinIP
+ */
+#define MLXSW_REG_RIPS_ID 0x8021
+#define MLXSW_REG_RIPS_LEN 0x14
+
+MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN);
+
+/* reg_rips_index
+ * Index to IPv6 address.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24);
+
+/* reg_rips_ipv6
+ * IPv6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16);
+
+static inline void mlxsw_reg_rips_pack(char *payload, u32 index,
+ const struct in6_addr *ipv6)
+{
+ MLXSW_REG_ZERO(rips, payload);
+ mlxsw_reg_rips_index_set(payload, index);
+ mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6);
+}
+
/* RATRAD - Router Adjacency Table Activity Dump Register
* ------------------------------------------------------
* The RATRAD register is used to dump and optionally clear activity bits of
@@ -10208,6 +10414,39 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCION - Management Cable IO and Notifications Register
+ * ------------------------------------------------------
+ * The MCION register is used to query transceiver modules' IO pins and other
+ * notifications.
+ */
+#define MLXSW_REG_MCION_ID 0x9052
+#define MLXSW_REG_MCION_LEN 0x18
+
+MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
+
+/* reg_mcion_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+
+enum {
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_mcion_module_status_bits
+ * Module IO status as defined by SFF.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_module_set(payload, module);
+}
+
/* MTPPS - Management Pulse Per Second Register
* --------------------------------------------
* This register provides the device PPS capabilities, configure the PPS in and
@@ -12200,9 +12439,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pspa),
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
+ MLXSW_REG(pmtdb),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
- MLXSW_REG(pmtm),
+ MLXSW_REG(pmmp),
+ MLXSW_REG(pllp),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
@@ -12210,6 +12451,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rips),
MLXSW_REG(ratrad),
MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
@@ -12249,6 +12491,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
+ MLXSW_REG(mcion),
MLXSW_REG(mtpps),
MLXSW_REG(mtutc),
MLXSW_REG(mpsc),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index a56c9e19a390..c7fc650608eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -25,9 +25,6 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
- MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
- MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
- MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -52,6 +49,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
+ MLXSW_RES_ID_MAX_RIF_MAC_PROFILES,
MLXSW_RES_ID_MAX_LPM_TREES,
MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
@@ -84,9 +82,6 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
@@ -111,6 +106,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
[MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
+ [MLXSW_RES_ID_MAX_RIF_MAC_PROFILES] = 0x2C14,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
[MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
[MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 250c5a24264d..5925db386b1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -47,7 +47,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 2406
+#define MLXSW_SP1_FWREV_SUBMINOR 3326
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -64,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 2406
+#define MLXSW_SP2_FWREV_SUBMINOR 3326
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -79,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 2406
+#define MLXSW_SP3_FWREV_SUBMINOR 3326
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -316,11 +316,11 @@ static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
- ether_addr_copy(addr, mlxsw_sp->base_mac);
- addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
- return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+ eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
+ mlxsw_sp_port->local_port);
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
+ mlxsw_sp_port->dev->dev_addr);
}
static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
@@ -351,12 +351,12 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
@@ -529,55 +529,80 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
port_mapping->module = module;
port_mapping->width = width;
+ port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
+static int
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_port_mapping *port_mapping)
{
- struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- int i;
+ int i, err;
+
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ goto err_pmlp_write;
+ return 0;
+
+err_pmlp_write:
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ return err;
}
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 module)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
if (err)
return err;
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_admin_status_set;
netif_start_queue(dev);
return 0;
+
+err_port_admin_status_set:
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
+ return err;
}
static int mlxsw_sp_port_stop(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
netif_stop_queue(dev);
- return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
+ return 0;
}
static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
@@ -649,7 +674,7 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -799,12 +824,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
for (i = 0; i < TC_MAX_QUEUE; i++) {
err = mlxsw_sp_port_get_stats_raw(dev,
- MLXSW_REG_PPCNT_TC_CONG_TC,
+ MLXSW_REG_PPCNT_TC_CONG_CNT,
i, ppcnt_pl);
- if (!err)
- xstats->wred_drop[i] =
- mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ if (err)
+ goto tc_cnt;
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
+
+tc_cnt:
err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
i, ppcnt_pl);
if (err)
@@ -1010,6 +1039,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
+ case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
+ return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
default:
return -EOPNOTSUPP;
}
@@ -1442,29 +1473,68 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
}
+static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *port_number,
+ u8 *split_port_subnumber,
+ u8 *slot_index)
+{
+ char pllp_pl[MLXSW_REG_PLLP_LEN];
+ int err;
+
+ mlxsw_reg_pllp_pack(pllp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pllp_unpack(pllp_pl, port_number,
+ split_port_subnumber, slot_index);
+ return 0;
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 split_base_local_port,
+ bool split,
struct mlxsw_sp_port_mapping *port_mapping)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- bool split = !!split_base_local_port;
struct mlxsw_sp_port *mlxsw_sp_port;
u32 lanes = port_mapping->width;
+ u8 split_port_subnumber;
struct net_device *dev;
+ u8 port_number;
+ u8 slot_index;
bool splittable;
int err;
+ err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+ local_port);
+ return err;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
+ &split_port_subnumber, &slot_index);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
+ local_port);
+ goto err_port_label_info_get;
+ }
+
splittable = lanes > 1 && !split;
err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
- port_mapping->module + 1, split,
- port_mapping->lane / lanes,
- splittable, lanes,
- mlxsw_sp->base_mac,
+ port_number, split, split_port_subnumber,
+ splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
- return err;
+ goto err_core_port_init;
}
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
@@ -1480,7 +1550,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
- mlxsw_sp_port->split_base_local_port = split_base_local_port;
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
@@ -1498,20 +1567,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
- err = mlxsw_sp_port_module_map(mlxsw_sp_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
- mlxsw_sp_port->local_port);
- goto err_port_module_map;
- }
-
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -1712,21 +1767,24 @@ err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp_port);
-err_port_module_map:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
free_netdev(dev);
err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+err_core_port_init:
+err_port_label_info_get:
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
@@ -1742,12 +1800,13 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1789,8 +1848,15 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp_port);
}
+static bool mlxsw_sp_local_port_valid(u8 local_port)
+{
+ return local_port != MLXSW_PORT_CPU_PORT;
+}
+
static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
+ if (!mlxsw_sp_local_port_valid(local_port))
+ return false;
return mlxsw_sp->ports[local_port] != NULL;
}
@@ -1827,7 +1893,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
port_mapping = mlxsw_sp->port_mapping[i];
if (!port_mapping)
continue;
- err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
goto err_port_create;
}
@@ -1894,17 +1960,10 @@ static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->port_mapping);
}
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
-{
- u8 offset = (local_port - 1) % max_width;
-
- return local_port - offset;
-}
-
static int
-mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port_mapping *port_mapping,
- unsigned int count, u8 offset)
+ unsigned int count, const char *pmtdb_pl)
{
struct mlxsw_sp_port_mapping split_port_mapping;
int err, i;
@@ -1912,8 +1971,13 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
split_port_mapping = *port_mapping;
split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
- base_port, &split_port_mapping);
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (!mlxsw_sp_local_port_valid(s_local_port))
+ continue;
+
+ err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
+ true, &split_port_mapping);
if (err)
goto err_port_create;
split_port_mapping.lane += split_port_mapping.width;
@@ -1922,49 +1986,34 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
return 0;
err_port_create:
- for (i--; i >= 0; i--)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+ for (i--; i >= 0; i--) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
return err;
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
- u8 base_port,
- unsigned int count, u8 offset)
+ unsigned int count,
+ const char *pmtdb_pl)
{
struct mlxsw_sp_port_mapping *port_mapping;
int i;
/* Go over original unsplit ports in the gap and recreate them. */
- for (i = 0; i < count * offset; i++) {
- port_mapping = mlxsw_sp->port_mapping[base_port + i];
- if (!port_mapping)
+ for (i = 0; i < count; i++) {
+ u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ port_mapping = mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
continue;
- mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, port_mapping);
}
}
-static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
- unsigned int count,
- unsigned int max_width)
-{
- enum mlxsw_res_id local_ports_in_x_res_id;
- int split_width = max_width / count;
-
- if (split_width == 1)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
- else if (split_width == 2)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
- else if (split_width == 4)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
- else
- return -EINVAL;
-
- if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
- return -EINVAL;
- return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
-}
-
static struct mlxsw_sp_port *
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
@@ -1980,9 +2029,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
- int max_width;
- u8 base_port;
- int offset;
+ enum mlxsw_reg_pmtdb_status status;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
int i;
int err;
@@ -1994,57 +2042,37 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- max_width = mlxsw_core_module_max_width(mlxsw_core,
- mlxsw_sp_port->mapping.module);
- if (max_width < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
- return max_width;
+ if (mlxsw_sp_port->split) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is already split");
+ return -EINVAL;
}
- /* Split port with non-max cannot be split. */
- if (mlxsw_sp_port->mapping.width != max_width) {
- netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
- NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
- return -EINVAL;
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
}
- offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
- if (offset < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
+ if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
return -EINVAL;
}
- /* Only in case max split is being done, the local port and
- * base port may differ.
- */
- base_port = count == max_width ?
- mlxsw_sp_cluster_base_port_get(local_port, max_width) :
- local_port;
+ port_mapping = mlxsw_sp_port->mapping;
- for (i = 0; i < count * offset; i++) {
- /* Expect base port to exist and also the one in the middle in
- * case of maximal split count.
- */
- if (i == 0 || (count == max_width && i == count / 2))
- continue;
+ for (i = 0; i < count; i++) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
- netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
- NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
- return -EINVAL;
- }
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
}
- port_mapping = mlxsw_sp_port->mapping;
-
- for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
-
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
- count, offset);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
+ count, pmtdb_pl);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -2053,7 +2081,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
err_port_split_create:
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
return err;
}
@@ -2062,11 +2090,10 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
unsigned int count;
- int max_width;
- u8 base_port;
- int offset;
int i;
+ int err;
mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
@@ -2077,35 +2104,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
}
if (!mlxsw_sp_port->split) {
- netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
NL_SET_ERR_MSG_MOD(extack, "Port was not split");
return -EINVAL;
}
- max_width = mlxsw_core_module_max_width(mlxsw_core,
- mlxsw_sp_port->mapping.module);
- if (max_width < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
- return max_width;
- }
-
- count = max_width / mlxsw_sp_port->mapping.width;
+ count = mlxsw_sp_port->mapping.module_width /
+ mlxsw_sp_port->mapping.width;
- offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
- if (WARN_ON(offset < 0)) {
- netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
- return -EINVAL;
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
}
- base_port = mlxsw_sp_port->split_base_local_port;
+ for (i = 0; i < count; i++) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
return 0;
}
@@ -3260,6 +3282,30 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
&span_size_params);
}
+static int
+mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u8 max_rif_mac_profiles;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
+ return -EIO;
+
+ max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_RIF_MAC_PROFILES);
+ devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
+ max_rif_mac_profiles, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
int err;
@@ -3278,10 +3324,16 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
err = mlxsw_sp_policer_resources_register(mlxsw_core);
if (err)
- goto err_resources_counter_register;
+ goto err_policer_resources_register;
+
+ err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
+ if (err)
+ goto err_resources_rif_mac_profile_register;
return 0;
+err_resources_rif_mac_profile_register:
+err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
@@ -3306,10 +3358,16 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
err = mlxsw_sp_policer_resources_register(mlxsw_core);
if (err)
- goto err_resources_counter_register;
+ goto err_policer_resources_register;
+
+ err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
+ if (err)
+ goto err_resources_rif_mac_profile_register;
return 0;
+err_resources_rif_mac_profile_register:
+err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3a43cba6d23c..32fdd37657dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -67,6 +67,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_COUNTERS_RIF,
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
};
struct mlxsw_sp_port;
@@ -144,7 +145,8 @@ struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
- u8 width;
+ u8 width; /* Number of lanes used by the port */
+ u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
@@ -284,6 +286,7 @@ struct mlxsw_sp_port_vlan {
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
+ u64 tc_ecn[TC_MAX_QUEUE];
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
@@ -345,7 +348,6 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- u8 split_base_local_port;
int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
@@ -747,6 +749,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS,
MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
@@ -758,6 +761,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS:
case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
default:
return 1;
@@ -1193,6 +1197,8 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p);
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 3a73d654017f..10ae1115de6c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index ded4cf658680..4b713832fdd5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -119,7 +119,6 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
- size_t alloc_size;
u64 max_lkey_id;
int err;
@@ -131,8 +130,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
if (!region_12kb)
return -ENOMEM;
- alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
- region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
+ region_12kb->used_lkey_id = bitmap_zalloc(max_lkey_id, GFP_KERNEL);
if (!region_12kb->used_lkey_id) {
err = -ENOMEM;
goto err_used_lkey_id_alloc;
@@ -149,7 +147,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
return 0;
err_rhashtable_init:
- kfree(region_12kb->used_lkey_id);
+ bitmap_free(region_12kb->used_lkey_id);
err_used_lkey_id_alloc:
kfree(region_12kb);
return err;
@@ -161,7 +159,7 @@ mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
rhashtable_destroy(&region_12kb->lkey_ht);
- kfree(region_12kb->used_lkey_id);
+ bitmap_free(region_12kb->used_lkey_id);
kfree(region_12kb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7cccc41dd69c..31f7f4c3acc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -36,7 +36,6 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
- size_t alloc_size;
int err;
mutex_init(&tcam->lock);
@@ -52,15 +51,13 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
- tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
+ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
if (!tcam->used_regions)
return -ENOMEM;
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
- tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
+ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
if (!tcam->used_groups) {
err = -ENOMEM;
goto err_alloc_used_groups;
@@ -76,9 +73,9 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- kfree(tcam->used_groups);
+ bitmap_free(tcam->used_groups);
err_alloc_used_groups:
- kfree(tcam->used_regions);
+ bitmap_free(tcam->used_regions);
return err;
}
@@ -89,8 +86,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
mutex_destroy(&tcam->lock);
ops->fini(mlxsw_sp, tcam->priv);
- kfree(tcam->used_groups);
- kfree(tcam->used_regions);
+ bitmap_free(tcam->used_groups);
+ bitmap_free(tcam->used_regions);
}
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9de160e740b2..d78cf5a7220a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1583,7 +1583,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
- unsigned long cb_priv;
+ unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
u8 masked_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index b65b93a2b9bc..fc2257753b9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -122,7 +122,6 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_counter_pool *pool;
- unsigned int map_size;
int err;
pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
@@ -143,9 +142,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
mlxsw_sp_counter_pool_occ_get, pool);
- map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
-
- pool->usage = kzalloc(map_size, GFP_KERNEL);
+ pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
if (!pool->usage) {
err = -ENOMEM;
goto err_usage_alloc;
@@ -158,7 +155,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_sub_pools_init:
- kfree(pool->usage);
+ bitmap_free(pool->usage);
err_usage_alloc:
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_COUNTERS);
@@ -176,7 +173,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
pool->pool_size);
WARN_ON(atomic_read(&pool->active_entries_count));
- kfree(pool->usage);
+ bitmap_free(pool->usage);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 267590a0eee7..84d4460f3dcd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -96,6 +96,9 @@ mlxsw_sp_link_ext_state_opcode_map[] = {
{1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
{1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+
+ {1042, ETHTOOL_LINK_EXT_STATE_MODULE,
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY},
};
static void
@@ -124,6 +127,10 @@ mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_m
link_ext_state_info->cable_issue =
link_ext_state_mapping.link_ext_substate;
break;
+ case ETHTOOL_LINK_EXT_STATE_MODULE:
+ link_ext_state_info->module =
+ link_ext_state_mapping.link_ext_substate;
+ break;
default:
break;
}
@@ -1197,6 +1204,41 @@ mlxsw_sp_get_rmon_stats(struct net_device *dev,
*ranges = mlxsw_rmon_ranges;
}
+static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+}
+
+static int
+mlxsw_sp_get_module_power_mode(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
+ extack);
+}
+
+static int
+mlxsw_sp_set_module_power_mode(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
+ params->policy, extack);
+}
+
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.cap_link_lanes_supported = true,
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
@@ -1218,6 +1260,9 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
.get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
.get_rmon_stats = mlxsw_sp_get_rmon_stats,
+ .reset = mlxsw_sp_reset,
+ .get_module_power_mode = mlxsw_sp_get_module_power_mode,
+ .set_module_power_mode = mlxsw_sp_set_module_power_mode,
};
struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 5facabd86882..ad3926de88f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
{
- return !!(parms.i_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms.o_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
- be32_to_cpu(parms.i_key) : 0;
+ be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_ikey(parms) ?
+ be32_to_cpu(parms->i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
- be32_to_cpu(parms.o_key) : 0;
+ be32_to_cpu(parms->o_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_okey(parms) ?
+ be32_to_cpu(parms->o_key) : 0;
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr };
}
union mlxsw_sp_l3addr
@@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_saddr(parms4);
+ return mlxsw_sp_ipip_parms4_saddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_saddr(parms6);
+ return mlxsw_sp_ipip_parms6_saddr(&parms6);
}
WARN_ON(1);
@@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
+ return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
static union mlxsw_sp_l3addr
@@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4);
+ return mlxsw_sp_ipip_parms4_daddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_daddr(parms6);
+ return mlxsw_sp_ipip_parms6_daddr(&parms6);
}
WARN_ON(1);
@@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
return !memcmp(&addr, &naddr, sizeof(naddr));
}
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV4,
+ .saddr = mlxsw_sp_ipip_parms4_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms4_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms4_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
+ };
+}
+
static int
mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
u32 ikey;
parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
- ikey = mlxsw_sp_ipip_parms4_ikey(parms);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(&parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
@@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_parms4_okey(parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- struct netlink_ext_ack *extack)
+mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ const struct mlxsw_sp_ipip_parms *new_parms,
+ struct netlink_ext_ack *extack)
{
- union mlxsw_sp_l3addr old_saddr, new_saddr;
- union mlxsw_sp_l3addr old_daddr, new_daddr;
- struct ip_tunnel_parm new_parms;
+ const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms;
bool update_tunnel = false;
bool update_decap = false;
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
-
- new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
- old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
- new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
- old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
-
- if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
/* Since the local address has changed, if there is another
* tunnel with a matching saddr, both need to be demoted.
*/
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
- MLXSW_SP_L3_PROTO_IPV4,
- new_saddr, ul_tb_id,
+ new_parms->proto,
+ new_parms->saddr,
+ ul_tb_id,
ipip_entry)) {
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
return 0;
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_okey(new_parms)) ||
- ipip_entry->parms4.link != new_parms.link) {
+ } else if (old_parms->okey != new_parms->okey ||
+ old_parms->link != new_parms->link) {
update_tunnel = true;
- } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_ikey(new_parms)) {
+ } else if (old_parms->ikey != new_parms->ikey) {
update_decap = true;
}
@@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
false, false, false,
extack);
+ if (err)
+ return err;
- ipip_entry->parms4 = new_parms;
- return err;
+ ipip_entry->parms = *new_parms;
+ return 0;
+}
+
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
+ .inc_parsing_depth = false,
+ .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
.decap_config = mlxsw_sp_ipip_decap_config_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
+ .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4,
+ .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4,
};
-const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_parms parms = {0};
+
+ WARN_ON_ONCE(1);
+ return parms;
+}
+
+static int
+mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ return false;
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_rif_ipip_lb_config config = {0};
+
+ WARN_ON_ONCE(1);
+ return config;
+}
+
+static int
+mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp1_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp1_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
+};
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_parms6_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms6_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms6_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ enum mlxsw_reg_ratr_op op;
+
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
+ adj_index, rif_index);
+ mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl,
+ ipip_entry->dip_kvdl_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct __ip6_tnl_parm parms;
+ unsigned int type_check;
+ bool has_ikey;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms6_ikey(&parms);
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
+
+ type_check = has_ikey ?
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
+
+ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
+ * remote IP). Thus configure decap so that it filters out packets that
+ * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is
+ * generated for packets that fail this criterion. Linux then handles
+ * such packets in slow path and generates ICMP destination unreachable.
+ */
+ mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index,
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6,
+ type_check, has_ikey,
+ ipip_entry->dip_kvdl_index, ikey);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ bool inherit_ttl = tparm.hop_limit == 0;
+ __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+
+ return (tparm.i_flags & ~okflags) == 0 &&
+ (tparm.o_flags & ~okflags) == 0 &&
+ inherit_ttl && inherit_tos &&
+ mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+
+ lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ?
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
+ return (struct mlxsw_sp_rif_ipip_lb_config){
+ .lb_ipipt = lb_ipipt,
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ .ul_protocol = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6,
+ ol_dev),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ struct __ip6_tnl_parm parms6;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ &ipip_entry->dip_kvdl_index);
+ if (err)
+ return err;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
+ &parms6.raddr);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ return 0;
+
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+ return err;
+}
+
+static void
+mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp2_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp2_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = {
+ [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops,
};
static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
@@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
+{
+ struct net *net = dev_net(ol_dev);
+ struct ip_tunnel *tun4;
+ struct ip6_tnl *tun6;
+
+ switch (ol_dev->type) {
+ case ARPHRD_IPGRE:
+ tun4 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun4->parms.link);
+ case ARPHRD_IP6GRE:
+ tun6 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun6->parms.link);
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index f0837b42d1d6..8cc259dcc8d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -7,6 +7,7 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
#include <linux/if_tunnel.h>
+#include <net/ip6_tunnel.h>
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
@@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
+ MLXSW_SP_IPIP_TYPE_GRE6,
MLXSW_SP_IPIP_TYPE_MAX,
};
+struct mlxsw_sp_ipip_parms {
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr saddr;
+ union mlxsw_sp_l3addr daddr;
+ int link;
+ u32 ikey;
+ u32 okey;
+};
+
struct mlxsw_sp_ipip_entry {
enum mlxsw_sp_ipip_type ipipt;
struct net_device *ol_dev; /* Overlay. */
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- union {
- struct ip_tunnel_parm parms4;
- };
+ struct mlxsw_sp_ipip_parms parms;
+ u32 dip_kvdl_index;
};
struct mlxsw_sp_ipip_ops {
int dev_type;
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
+ bool inc_parsing_depth;
+
+ struct mlxsw_sp_ipip_parms
+ (*parms_init)(const struct net_device *ol_dev);
int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops {
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack);
+ int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+ void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry);
};
-extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[];
#endif /* _MLXSW_IPIP_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 9958d503bf0e..4243d3b883ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -50,12 +50,24 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent);
unsigned int num_classes;
+
+ u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+ int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+};
+
+struct mlxsw_sp_qdisc_ets_band {
+ u8 prio_bitmap;
+ int tclass_num;
+};
+
+struct mlxsw_sp_qdisc_ets_data {
+ struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_qdisc {
u32 handle;
- int tclass_num;
- u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -67,6 +79,10 @@ struct mlxsw_sp_qdisc {
u64 backlog;
} stats_base;
+ union {
+ struct mlxsw_sp_qdisc_ets_data *ets_data;
+ };
+
struct mlxsw_sp_qdisc_ops *ops;
struct mlxsw_sp_qdisc *parent;
struct mlxsw_sp_qdisc *qdiscs;
@@ -141,8 +157,7 @@ mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
}
static struct mlxsw_sp_qdisc *
-mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
- bool root_only)
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
@@ -150,8 +165,6 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
return NULL;
if (parent == TC_H_ROOT)
return &qdisc_state->root_qdisc;
- if (root_only)
- return NULL;
return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
mlxsw_sp_qdisc_walk_cb_find, &parent);
}
@@ -187,6 +200,32 @@ mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
}
+static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return 0xff;
+ if (!parent->ops->get_prio_bitmap)
+ return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
+ return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return MLXSW_SP_PORT_DEFAULT_TCLASS;
+ if (!parent->ops->get_tclass_num)
+ return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
+ return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -194,6 +233,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
int err_hdroom = 0;
int err = 0;
+ int i;
if (!mlxsw_sp_qdisc)
return 0;
@@ -211,6 +251,9 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
if (!mlxsw_sp_qdisc->ops)
return 0;
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_qdisc->qdiscs[i]);
mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
if (mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
@@ -226,6 +269,87 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
return err_hdroom ?: err;
}
+struct mlxsw_sp_qdisc_tree_validate {
+ bool forbid_ets;
+ bool forbid_root_tbf;
+ bool forbid_tbf;
+ bool forbid_red;
+};
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate);
+
+static int
+mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
+ validate);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ switch (mlxsw_sp_qdisc->ops->type) {
+ case MLXSW_SP_QDISC_FIFO:
+ break;
+ case MLXSW_SP_QDISC_RED:
+ if (validate.forbid_red)
+ return -EINVAL;
+ validate.forbid_red = true;
+ validate.forbid_root_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_TBF:
+ if (validate.forbid_root_tbf) {
+ if (validate.forbid_tbf)
+ return -EINVAL;
+ /* This is a TC TBF. */
+ validate.forbid_tbf = true;
+ validate.forbid_ets = true;
+ } else {
+ /* This is root TBF. */
+ validate.forbid_root_tbf = true;
+ }
+ break;
+ case MLXSW_SP_QDISC_PRIO:
+ case MLXSW_SP_QDISC_ETS:
+ if (validate.forbid_ets)
+ return -EINVAL;
+ validate.forbid_root_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
+}
+
+static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_qdisc_tree_validate validate = {};
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
+}
+
static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -268,6 +392,10 @@ static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->num_classes = ops->num_classes;
mlxsw_sp_qdisc->ops = ops;
mlxsw_sp_qdisc->handle = handle;
+ err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
+ if (err)
+ goto err_replace;
+
err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_replace;
@@ -406,13 +534,17 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *p_tx_bytes, u64 *p_tx_packets,
u64 *p_drops, u64 *p_backlog)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
u64 tx_bytes, tx_packets;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&tx_packets, &tx_bytes);
*p_tx_packets += tx_packets;
@@ -506,19 +638,24 @@ static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *red_base;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&stats_base->tx_packets,
&stats_base->tx_bytes);
+ red_base->prob_mark = xstats->tc_ecn[tclass_num];
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
@@ -532,8 +669,10 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
- mlxsw_sp_qdisc->tclass_num);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
}
static int
@@ -564,15 +703,33 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc);
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle);
+
+static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct tc_red_qopt_offload_params *p = params;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num;
u32 min, max;
u64 prob;
+ int err;
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
/* calculate probability in percentage */
prob = p->probability;
@@ -615,22 +772,27 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
void *xstats_ptr)
{
struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
- int early_drops, pdrops;
+ int early_drops, marks, pdrops;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
+ res->prob_mark += marks;
xstats_base->pdrop += pdrops;
xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
@@ -639,16 +801,19 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
u64 overlimits;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
- overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
+ overlimits = xstats->wred_drop[tclass_num] +
+ xstats->tc_ecn[tclass_num] - stats_base->overlimits;
stats_ptr->qstats->overlimits += overlimits;
stats_base->overlimits += overlimits;
@@ -660,11 +825,12 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent)
{
- return NULL;
+ /* RED and TBF are formally classful qdiscs, but all class references,
+ * including X:0, just refer to the same one class.
+ */
+ return &mlxsw_sp_qdisc->qdiscs[0];
}
-#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
-
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.type = MLXSW_SP_QDISC_RED,
.check_params = mlxsw_sp_qdisc_red_check_params,
@@ -675,14 +841,19 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle);
+
static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -704,6 +875,9 @@ static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_RED_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_RED_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -740,13 +914,34 @@ mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static enum mlxsw_reg_qeec_hr
+mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
+ return MLXSW_REG_QEEC_HR_PORT;
+
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
+ * to shaping. That is unlike RED, however UC queue lengths are going to
+ * be different than MC ones due to different pool and quota
+ * configurations, so the configuration is not applicable. For shaper on
+ * the other hand, subjecting the overall stream to the configured
+ * shaper makes sense. Also note that that is what we do for
+ * ieee_setmaxrate().
+ */
+ return MLXSW_REG_QEEC_HR_SUBGROUP;
+}
+
static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
MLXSW_REG_QEEC_MAS_DIS, 0);
}
@@ -828,27 +1023,29 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
+ enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
struct tc_tbf_qopt_offload_replace_params *p = params;
u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ int tclass_num;
u8 burst_size;
int err;
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
if (WARN_ON_ONCE(err))
/* check_params above was supposed to reject this value. */
return -EINVAL;
- /* Configure subgroup shaper, so that both UC and MC traffic is subject
- * to shaping. That is unlike RED, however UC queue lengths are going to
- * be different than MC ones due to different pool and quota
- * configurations, so the configuration is not applicable. For shaper on
- * the other hand, subjecting the overall stream to the configured
- * shaper makes sense. Also note that that is what we do for
- * ieee_setmaxrate().
- */
- return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
rate_kbps, burst_size);
}
@@ -881,6 +1078,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -888,7 +1086,7 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -907,6 +1105,9 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_TBF_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_TBF_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -957,6 +1158,32 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
};
+static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[band])
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ qdisc_state->future_handle = handle;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+}
+
static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p)
{
@@ -965,16 +1192,15 @@ static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned int band;
u32 parent_handle;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
parent_handle = TC_H_MAJ(p->parent);
if (parent_handle != qdisc_state->future_handle) {
/* This notifications is for a different Qdisc than
* previously. Wipe the future cache.
*/
- memset(qdisc_state->future_fifos, 0,
- sizeof(qdisc_state->future_fifos));
- qdisc_state->future_handle = parent_handle;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
+ parent_handle);
}
band = TC_H_MIN(p->parent) - 1;
@@ -1033,11 +1259,10 @@ static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_qdisc->qdiscs[i]);
- mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
}
+ kfree(mlxsw_sp_qdisc->ets_data);
+ mlxsw_sp_qdisc->ets_data = NULL;
return 0;
}
@@ -1066,6 +1291,31 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
return __mlxsw_sp_qdisc_ets_check_params(p->bands);
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *mlxsw_sp_port)
+{
+ u64 backlog;
+
+ if (mlxsw_sp_qdisc->ops) {
+ backlog = mlxsw_sp_qdisc->stats_base.backlog;
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ mlxsw_sp_qdisc->stats_base.backlog = backlog;
+ }
+
+ return NULL;
+}
+
+static void
+mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
+ mlxsw_sp_port);
+}
+
static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -1074,69 +1324,80 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
const unsigned int *weights,
const u8 *priomap)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
+ struct mlxsw_sp_qdisc_ets_band *ets_band;
struct mlxsw_sp_qdisc *child_qdisc;
- int tclass, i, band, backlog;
- u8 old_priomap;
+ u8 old_priomap, new_priomap;
+ int i, band;
int err;
+ if (!ets_data) {
+ ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
+ if (!ets_data)
+ return -ENOMEM;
+ mlxsw_sp_qdisc->ets_data = ets_data;
+
+ for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+
+ ets_band = &ets_data->bands[band];
+ ets_band->tclass_num = tclass_num;
+ }
+ }
+
for (band = 0; band < nbands; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ int tclass_num;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- old_priomap = child_qdisc->prio_bitmap;
- child_qdisc->prio_bitmap = 0;
+ ets_band = &ets_data->bands[band];
+
+ tclass_num = ets_band->tclass_num;
+ old_priomap = ets_band->prio_bitmap;
+ new_priomap = 0;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, !!quanta[band],
+ tclass_num, 0, !!quanta[band],
weights[band]);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (priomap[i] == band) {
- child_qdisc->prio_bitmap |= BIT(i);
+ new_priomap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
- i, tclass);
+ i, tclass_num);
if (err)
return err;
}
}
- child_qdisc->tclass_num = tclass;
+ ets_band->prio_bitmap = new_priomap;
- if (old_priomap != child_qdisc->prio_bitmap &&
- child_qdisc->ops && child_qdisc->ops->clean_stats) {
- backlog = child_qdisc->stats_base.backlog;
- child_qdisc->ops->clean_stats(mlxsw_sp_port,
- child_qdisc);
- child_qdisc->stats_base.backlog = backlog;
- }
+ if (old_priomap != new_priomap)
+ mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
+ child_qdisc);
- if (handle == qdisc_state->future_handle &&
- qdisc_state->future_fifos[band]) {
- err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
- child_qdisc,
- &mlxsw_sp_qdisc_ops_fifo,
- NULL);
- if (err)
- return err;
- }
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
+ band, child_qdisc);
+ if (err)
+ return err;
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ ets_band = &ets_data->bands[band];
+ ets_band->prio_bitmap = 0;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, false, 0);
+ ets_band->tclass_num, 0, false, 0);
}
- qdisc_state->future_handle = TC_H_UNSPEC;
- memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
return 0;
}
@@ -1238,6 +1499,31 @@ mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
return &mlxsw_sp_qdisc->qdiscs[band];
}
+static struct mlxsw_sp_qdisc_ets_band *
+mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
+
+ if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
+ band = 0;
+ return &mlxsw_sp_qdisc->ets_data->bands[band];
+}
+
+static u8
+mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
+}
+
+static int
+mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
+}
+
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.type = MLXSW_SP_QDISC_PRIO,
.check_params = mlxsw_sp_qdisc_prio_check_params,
@@ -1248,6 +1534,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
static int
@@ -1299,6 +1587,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
@@ -1326,10 +1616,9 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
* grafted corresponds to the parent handle. If the two don't match, we
* unoffload the child.
*/
-static int
-__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- u8 band, u32 child_handle)
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
u32 parent;
@@ -1362,21 +1651,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
-{
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->band, p->child_handle);
-}
-
static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1396,8 +1676,9 @@ static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_PRIO_GRAFT:
- return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- &p->graft_params);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1420,7 +1701,7 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1440,9 +1721,9 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_ETS_GRAFT:
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->graft_params.band,
- p->graft_params.child_handle);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1472,6 +1753,7 @@ struct mlxsw_sp_qevent_binding {
u32 handle;
int tclass_num;
enum mlxsw_sp_span_trigger span_trigger;
+ unsigned int action_mask;
};
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
@@ -1482,8 +1764,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_agent_parms *agent_parms,
int *p_span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ bool ingress;
int span_id;
int err;
@@ -1491,18 +1775,19 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
if (err)
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
trigger_parms.probability_rate = 1;
- err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
goto err_agent_bind;
- err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
+ err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
if (err)
goto err_trigger_enable;
@@ -1511,10 +1796,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
return 0;
err_trigger_enable:
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
err_agent_bind:
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
return err;
@@ -1524,16 +1809,20 @@ static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_qevent_binding *qevent_binding,
int span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
.span_id = span_id,
};
+ bool ingress;
+
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
- mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
+ mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
@@ -1583,10 +1872,17 @@ static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
}
-static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
+ if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
+ NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
+ return -EOPNOTSUPP;
+ }
+
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
@@ -1614,15 +1910,17 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
}
}
-static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
- qevent_binding);
+ qevent_binding, extack);
if (err)
goto err_entry_configure;
}
@@ -1646,13 +1944,17 @@ static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qe
qevent_binding);
}
-static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
+static int
+mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
int err;
list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block,
+ qevent_binding,
+ extack);
if (err)
goto err_binding_configure;
}
@@ -1737,7 +2039,7 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
- err = mlxsw_sp_qevent_block_configure(qevent_block);
+ err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
if (err)
goto err_block_configure;
@@ -1825,7 +2127,8 @@ static void mlxsw_sp_qevent_block_release(void *cb_priv)
static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
- enum mlxsw_sp_span_trigger span_trigger)
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp_qevent_binding *binding;
@@ -1837,6 +2140,7 @@ mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
binding->handle = handle;
binding->tclass_num = tclass_num;
binding->span_trigger = span_trigger;
+ binding->action_mask = action_mask;
return binding;
}
@@ -1862,9 +2166,11 @@ mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
return NULL;
}
-static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_qevent_binding *qevent_binding;
@@ -1872,6 +2178,7 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
struct flow_block_cb *block_cb;
struct mlxsw_sp_qdisc *qdisc;
bool register_block = false;
+ int tclass_num;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
@@ -1904,14 +2211,19 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
goto err_binding_exists;
}
- qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
- qdisc->tclass_num, span_trigger);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
+ qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
+ f->sch->handle,
+ tclass_num,
+ span_trigger,
+ action_mask);
if (IS_ERR(qevent_binding)) {
err = PTR_ERR(qevent_binding);
goto err_binding_create;
}
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
+ f->extack);
if (err)
goto err_binding_configure;
@@ -1963,15 +2275,19 @@ static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp
}
}
-static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
+ return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
+ span_trigger,
+ action_mask);
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
return 0;
@@ -1983,7 +2299,22 @@ static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
- return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
+ BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+ action_mask);
+}
+
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_ECN,
+ action_mask);
}
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1995,8 +2326,6 @@ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
return -ENOMEM;
mutex_init(&qdisc_state->lock);
- qdisc_state->root_qdisc.prio_bitmap = 0xff;
- qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
mlxsw_sp_port->qdisc = qdisc_state;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 19bb3ca0515e..217e3b351dfe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -57,6 +57,7 @@ struct mlxsw_sp_rif {
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif_index;
+ u8 mac_profile_id;
u16 vr_id;
const struct mlxsw_sp_rif_ops *ops;
struct mlxsw_sp *mlxsw_sp;
@@ -106,15 +107,23 @@ struct mlxsw_sp_rif_ops {
void (*setup)(struct mlxsw_sp_rif *rif,
const struct mlxsw_sp_rif_params *params);
- int (*configure)(struct mlxsw_sp_rif *rif);
+ int (*configure)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack);
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+struct mlxsw_sp_rif_mac_profile {
+ unsigned char mac_prefix[ETH_ALEN];
+ refcount_t ref_count;
+ u8 id;
+};
+
struct mlxsw_sp_router_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
+ int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
};
static struct mlxsw_sp_rif *
@@ -1055,22 +1064,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->router->vrs);
}
-static struct net_device *
-__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
- struct net *net = dev_net(ol_dev);
-
- return dev_get_by_index_rcu(net, tun->parms.link);
-}
-
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d;
u32 tb_id;
rcu_read_lock();
- d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
@@ -1116,6 +1116,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ int err;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
@@ -1131,26 +1132,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = ipip_ops->parms_init(ol_dev);
- switch (ipip_ops->ul_proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- WARN_ON(1);
- break;
+ err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_rem_ip_addr_set;
}
return ipip_entry;
+err_rem_ip_addr_set:
+ mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
err_ol_ipip_lb_create:
kfree(ipip_entry);
return ret;
}
-static void
-mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
+static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+
+ ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
kfree(ipip_entry);
}
@@ -1174,6 +1179,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
}
+static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* Not all tunnels require to increase the default pasing depth
+ * (96 bytes).
+ */
+ if (ipip_ops->inc_parsing_depth)
+ return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+
+ return 0;
+}
+
+static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ if (ipip_ops->inc_parsing_depth)
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+}
+
static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -1187,18 +1218,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
+ ipip_entry->ipipt);
+ if (err)
+ goto err_parsing_depth_inc;
+
ipip_entry->decap_fib_entry = fib_entry;
fib_entry->decap.ipip_entry = ipip_entry;
fib_entry->decap.tunnel_index = tunnel_index;
+
return 0;
+
+err_parsing_depth_inc:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ fib_entry->decap.tunnel_index);
+ return err;
}
static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
+ enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
+
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
+ mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1, fib_entry->decap.tunnel_index);
}
@@ -1309,6 +1354,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
saddr_len = 4;
saddr_prefix_len = 32;
break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ saddrp = &saddr.addr6;
+ saddr_len = 16;
+ saddr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -1345,7 +1395,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
list_del(&ipip_entry->ipip_list_node);
- mlxsw_sp_ipip_entry_dealloc(ipip_entry);
+ mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
}
static bool
@@ -1450,7 +1500,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
@@ -1536,23 +1586,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
struct mlxsw_sp_rif *rif = &lb_rif->common;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ struct in6_addr *saddr6;
u32 saddr4;
+ ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
switch (lb_cf.ul_protocol) {
case MLXSW_SP_L3_PROTO_IPV4:
saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
- MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr4,
+ lb_cf.okey);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- return -EAFNOSUPPORT;
+ saddr6 = &lb_cf.saddr.addr6;
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr6,
+ lb_cf.okey);
+ break;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -1827,7 +1888,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
@@ -4152,7 +4213,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
bool is_up;
rcu_read_lock();
- ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
rcu_read_unlock();
@@ -4376,6 +4437,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
+{
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_trap_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ mlxsw_sp->router->adj_trap_index,
+ mlxsw_sp->router->lb_rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+ return err;
+}
+
+static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+}
+
+static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
+ return 0;
+
+ err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ refcount_set(&mlxsw_sp->router->num_groups, 1);
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
+{
+ if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
+ return;
+
+ mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
+}
+
static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nexthop_group *nh_grp,
@@ -4790,6 +4911,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_obj_init;
}
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
@@ -4808,6 +4932,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop_obj_init:
for (i--; i >= 0; i--) {
@@ -4832,6 +4958,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
cancel_delayed_work(&router->nh_grp_activity_dw);
}
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -5223,6 +5350,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop4_init;
}
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err)
goto err_group_refresh;
@@ -5230,6 +5360,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop4_init:
for (i--; i >= 0; i--) {
@@ -5247,6 +5379,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -5725,41 +5858,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
-{
- enum mlxsw_reg_ratr_trap_action trap_action;
- char ratr_pl[MLXSW_REG_RATR_LEN];
- int err;
-
- if (mlxsw_sp->router->adj_discard_index_valid)
- return 0;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- &mlxsw_sp->router->adj_discard_index);
- if (err)
- return err;
-
- trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
- MLXSW_REG_RATR_TYPE_ETHERNET,
- mlxsw_sp->router->adj_discard_index,
- mlxsw_sp->router->lb_rif_index);
- mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
- mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
- if (err)
- goto err_ratr_write;
-
- mlxsw_sp->router->adj_discard_index_valid = true;
-
- return 0;
-
-err_ratr_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- mlxsw_sp->router->adj_discard_index);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -5772,7 +5870,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
u16 trap_id = 0;
u32 adjacency_index = 0;
u16 ecmp_size = 0;
- int err;
/* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass
@@ -5783,11 +5880,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
adjacency_index = nhgi->adj_index;
ecmp_size = nhgi->ecmp_size;
} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
- err = mlxsw_sp_adj_discard_write(mlxsw_sp);
- if (err)
- return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- adjacency_index = mlxsw_sp->router->adj_discard_index;
+ adjacency_index = mlxsw_sp->router->adj_trap_index;
ecmp_size = 1;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
@@ -6036,8 +6130,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
@@ -6048,6 +6142,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -6108,7 +6209,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
fib_info_put(fib4_entry->fi);
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
@@ -6641,6 +6742,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
}
nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err)
goto err_group_refresh;
@@ -6648,6 +6752,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop6_init:
for (i--; i >= 0; i--) {
@@ -6665,6 +6771,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -6888,11 +6995,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
-static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry,
- const struct fib6_info *rt)
+static int
+mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
{
- if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
+ union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ int ifindex = nhgi->nexthops[0].ifindex;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV6,
+ dip);
+
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+ return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
+ ipip_entry);
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
+{
+ if (rt->fib6_flags & RTF_LOCAL)
+ return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
+ rt);
+ if (rt->fib6_flags & RTF_ANYCAST)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
@@ -6902,6 +7036,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+
+ return 0;
}
static void
@@ -6959,12 +7095,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_group_vr_link;
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ if (err)
+ goto err_fib6_entry_type_set;
fib_entry->fib_node = fib_node;
return fib6_entry;
+err_fib6_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
err_nexthop6_group_get:
@@ -6983,11 +7123,19 @@ err_fib_entry_priv_create:
return ERR_PTR(err);
}
+static void
+mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
+}
+
static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
@@ -7340,16 +7488,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
}
-
- /* After flushing all the routes, it is not possible anyone is still
- * using the adjacency index that is discarding packets, so free it in
- * case it was allocated.
- */
- if (!mlxsw_sp->router->adj_discard_index_valid)
- return;
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- mlxsw_sp->router->adj_discard_index);
- mlxsw_sp->router->adj_discard_index_valid = false;
}
struct mlxsw_sp_fib6_event {
@@ -8056,7 +8194,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (ops->setup)
ops->setup(rif, params);
- err = ops->configure(rif);
+ err = ops->configure(rif, extack);
if (err)
goto err_configure;
@@ -8175,6 +8313,200 @@ static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_destroy(rif);
}
+static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif_mac_profile *profile,
+ struct netlink_ext_ack *extack)
+{
+ u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ int id;
+
+ id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
+ max_rif_mac_profiles, GFP_KERNEL);
+
+ if (id >= 0) {
+ profile->id = id;
+ return 0;
+ }
+
+ if (id == -ENOSPC)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported router interface MAC profiles");
+
+ return id;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
+ mac_profile);
+ WARN_ON(!profile);
+ return profile;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_alloc(const char *mac)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return NULL;
+
+ ether_addr_copy(profile->mac_prefix, mac);
+ refcount_set(&profile->ref_count, 1);
+ return profile;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ struct mlxsw_sp_rif_mac_profile *profile;
+ int id;
+
+ idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
+ if (!profile)
+ continue;
+
+ if (ether_addr_equal_masked(profile->mac_prefix, mac,
+ mlxsw_sp->mac_mask))
+ return profile;
+ }
+
+ return NULL;
+}
+
+static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+ int err;
+
+ profile = mlxsw_sp_rif_mac_profile_alloc(mac);
+ if (!profile)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
+ if (err)
+ goto profile_index_alloc_err;
+
+ atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
+ return profile;
+
+profile_index_alloc_err:
+ kfree(profile);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
+ u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
+ profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
+ kfree(profile);
+}
+
+static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u8 *p_mac_profile,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
+ if (profile) {
+ refcount_inc(&profile->ref_count);
+ goto out;
+ }
+
+ profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
+
+out:
+ *p_mac_profile = profile->id;
+ return 0;
+}
+
+static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
+ u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ mac_profile);
+ if (WARN_ON(!profile))
+ return;
+
+ if (!refcount_dec_and_test(&profile->ref_count))
+ return;
+
+ mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
+}
+
+static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ rif->mac_profile_id);
+ if (WARN_ON(!profile))
+ return false;
+
+ return refcount_read(&profile->ref_count) > 1;
+}
+
+static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
+ const char *new_mac)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ rif->mac_profile_id);
+ if (WARN_ON(!profile))
+ return -EINVAL;
+
+ ether_addr_copy(profile->mac_prefix, new_mac);
+ return 0;
+}
+
+static int
+mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ const char *new_mac,
+ struct netlink_ext_ack *extack)
+{
+ u8 mac_profile;
+ int err;
+
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
+ &mac_profile, extack);
+ if (err)
+ return err;
+
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
+ rif->mac_profile_id = mac_profile;
+ return 0;
+}
+
static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct net_device *l3_dev,
@@ -8523,36 +8855,6 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev,
- const unsigned char *dev_addr,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_rif *rif;
- int i;
-
- /* A RIF is not created for macvlan netdevs. Their MAC is used to
- * populate the FDB
- */
- if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
- return 0;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
- rif = mlxsw_sp->router->rifs[i];
- if (rif && rif->ops &&
- rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
- continue;
- if (rif && rif->dev && rif->dev != dev &&
- !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
- mlxsw_sp->mac_mask)) {
- NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
unsigned long event,
@@ -8618,11 +8920,6 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
- ivi->extack);
- if (err)
- goto out;
-
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
@@ -8706,11 +9003,6 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
- i6vi->extack);
- if (err)
- goto out;
-
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
@@ -8718,7 +9010,7 @@ out:
}
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
- const char *mac, int mtu)
+ const char *mac, int mtu, u8 mac_profile)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
@@ -8730,15 +9022,18 @@ static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif)
+ struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = rif->dev;
+ u8 old_mac_profile;
u16 fid_index;
int err;
@@ -8748,8 +9043,14 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ old_mac_profile = rif->mac_profile_id;
+ err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
+ extack);
+ if (err)
+ goto err_rif_mac_profile_replace;
+
err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
- dev->mtu);
+ dev->mtu, rif->mac_profile_id);
if (err)
goto err_rif_edit;
@@ -8779,8 +9080,11 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rif_fdb_op:
- mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
+ mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
+ old_mac_profile);
err_rif_edit:
+ mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
+err_rif_mac_profile_replace:
mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
return err;
}
@@ -8788,16 +9092,34 @@ err_rif_edit:
static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
struct netdev_notifier_pre_changeaddr_info *info)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
struct netlink_ext_ack *extack;
+ u8 max_rif_mac_profiles;
+ u64 occ;
extack = netdev_notifier_info_to_extack(&info->info);
- return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
- info->dev_addr, extack);
+
+ profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
+ if (profile)
+ return 0;
+
+ max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
+ occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
+ if (occ < max_rif_mac_profiles)
+ return 0;
+
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
+ return -ENOBUFS;
}
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
int err = 0;
@@ -8814,7 +9136,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
switch (event) {
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
- err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
break;
case NETDEV_PRE_CHANGEADDR:
err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
@@ -8937,6 +9259,7 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
@@ -8945,13 +9268,21 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
+static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
+ u8 mac_profile;
int err;
- err = mlxsw_sp_rif_subport_op(rif, true);
+ err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
+ &mac_profile, extack);
if (err)
return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_subport_op(rif, true);
+ if (err)
+ goto err_rif_subport_op;
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
@@ -8963,6 +9294,8 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
+err_rif_subport_op:
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
return err;
}
@@ -8975,6 +9308,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
static struct mlxsw_sp_fid *
@@ -9003,6 +9337,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -9013,16 +9348,24 @@ u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
-static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ u8 mac_profile;
int err;
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
true);
if (err)
- return err;
+ goto err_rif_vlan_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9050,6 +9393,8 @@ err_fid_bc_flood_set:
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9068,6 +9413,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
static struct mlxsw_sp_fid *
@@ -9172,7 +9518,8 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
}
static int
-mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
@@ -9359,7 +9706,8 @@ out:
}
static int
-mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
@@ -9414,6 +9762,13 @@ static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
{
u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_core *core = mlxsw_sp->core;
+
+ if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
+ return -EIO;
+ mlxsw_sp->router->max_rif_mac_profile =
+ MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
mlxsw_sp->router->rifs = kcalloc(max_rifs,
sizeof(struct mlxsw_sp_rif *),
@@ -9421,16 +9776,28 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->router->rifs)
return -ENOMEM;
+ idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
+ atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+
return 0;
}
static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
+ idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
}
@@ -9447,7 +9814,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
@@ -9460,6 +9826,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
+static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
+static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
@@ -9874,6 +10252,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
.init = mlxsw_sp1_router_init,
+ .ipips_init = mlxsw_sp1_ipips_init,
};
static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -9889,6 +10268,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
.init = mlxsw_sp2_router_init,
+ .ipips_init = mlxsw_sp2_ipips_init,
};
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
@@ -9934,7 +10314,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rifs_init;
- err = mlxsw_sp_ipips_init(mlxsw_sp);
+ err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
if (err)
goto err_ipips_init;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 25d3eae63501..99e8371a82a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -39,6 +39,9 @@ mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
+ struct idr rif_mac_profiles_idr;
+ atomic_t rif_mac_profiles_count;
+ u8 max_rif_mac_profile;
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct rhashtable nexthop_group_ht;
@@ -65,8 +68,6 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
- u32 adj_discard_index;
- bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
struct work_struct fib_event_work;
@@ -82,6 +83,8 @@ struct mlxsw_sp_router {
struct delayed_work nh_grp_activity_dw;
struct list_head nh_res_grp_list;
bool inc_parsing_depth;
+ refcount_t num_groups;
+ u32 adj_trap_index;
};
struct mlxsw_sp_fib_entry_priv {
@@ -226,6 +229,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3398cc01e5ec..f5f819aa9a65 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1650,6 +1650,22 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
}
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger)
+{
+ switch (trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ return true;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ return false;
+ }
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
{
size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index efaefd1ae863..82e711afb02b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -120,6 +120,7 @@ int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger);
extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 22fede5cb32c..81c7e8a7fcf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1635,16 +1635,13 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
u16 fid)
{
struct mlxsw_sp_mid *mid;
- size_t alloc_size;
mid = kzalloc(sizeof(*mid), GFP_KERNEL);
if (!mid)
return NULL;
- alloc_size = sizeof(unsigned long) *
- BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
-
- mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
+ mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
+ GFP_KERNEL);
if (!mid->ports_in_mid)
goto err_ports_in_mid_alloc;
@@ -1663,7 +1660,7 @@ out:
return mid;
err_write_mdb_entry:
- kfree(mid->ports_in_mid);
+ bitmap_free(mid->ports_in_mid);
err_ports_in_mid_alloc:
kfree(mid);
return NULL;
@@ -1680,7 +1677,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_core_max_ports(mlxsw_sp->core))) {
err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
list_del(&mid->list);
- kfree(mid->ports_in_mid);
+ bitmap_free(mid->ports_in_mid);
kfree(mid);
}
return err;
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b27713906d3a..c11b118dc415 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -348,13 +348,15 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
}
-static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+static void ks8842_init_mac_addr(struct ks8842_adapter *adapter)
{
+ u8 addr[ETH_ALEN];
int i;
u16 mac;
for (i = 0; i < ETH_ALEN; i++)
- dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ eth_hw_addr_set(adapter->netdev, addr);
if (adapter->conf_flags & MICREL_KS884X) {
/*
@@ -380,7 +382,7 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
}
}
-static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac)
{
unsigned long flags;
unsigned i;
@@ -1064,7 +1066,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
ks8842_write_mac_addr(adapter, mac);
return 0;
@@ -1191,12 +1193,11 @@ static int ks8842_probe(struct platform_device *pdev)
if (i < netdev->addr_len)
/* an address was passed, use it */
- memcpy(netdev->dev_addr, pdata->macaddr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->macaddr);
}
if (i == netdev->addr_len) {
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ ks8842_init_mac_addr(adapter);
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e2eb0caeac82..6f34a61739b6 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -427,7 +427,7 @@ struct ks8851_net {
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en);
-int ks8851_remove_common(struct device *dev);
+void ks8851_remove_common(struct device *dev);
int ks8851_suspend(struct device *dev);
int ks8851_resume(struct device *dev);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index a6db1a8156e1..691206f19ea7 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -165,6 +165,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
unsigned long flags;
+ u8 addr[ETH_ALEN];
u16 reg;
int i;
@@ -172,9 +173,10 @@ static void ks8851_read_mac_addr(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i += 2) {
reg = ks8851_rdreg16(ks, KS_MAR(i));
- dev->dev_addr[i] = reg >> 8;
- dev->dev_addr[i + 1] = reg & 0xff;
+ addr[i] = reg >> 8;
+ addr[i + 1] = reg & 0xff;
}
+ eth_hw_addr_set(dev, addr);
ks8851_unlock(ks, &flags);
}
@@ -195,7 +197,7 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
struct net_device *dev = ks->netdev;
int ret;
- ret = of_get_mac_address(np, dev->dev_addr);
+ ret = of_get_ethdev_address(np, dev);
if (!ret) {
ks8851_write_mac_addr(dev);
return;
@@ -672,7 +674,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return ks8851_write_mac_addr(dev);
}
@@ -1247,7 +1249,7 @@ err_reg_io:
}
EXPORT_SYMBOL_GPL(ks8851_probe_common);
-int ks8851_remove_common(struct device *dev)
+void ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
@@ -1261,8 +1263,6 @@ int ks8851_remove_common(struct device *dev)
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
-
- return 0;
}
EXPORT_SYMBOL_GPL(ks8851_remove_common);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e8fcce50f9d..2e25798c610e 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -327,7 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev)
static int ks8851_remove_par(struct platform_device *pdev)
{
- return ks8851_remove_common(&pdev->dev);
+ ks8851_remove_common(&pdev->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 479406ecbaa3..0303e727e99f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -454,7 +454,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
static int ks8851_remove_spi(struct spi_device *spi)
{
- return ks8851_remove_common(&spi->dev);
+ ks8851_remove_common(&spi->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index a0ee155f9f51..99c0c1491af2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4033,7 +4033,7 @@ static void hw_set_add_addr(struct ksz_hw *hw)
}
}
-static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
int j = ADDITIONAL_ENTRIES;
@@ -4054,7 +4054,7 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
return -1;
}
-static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
@@ -5581,7 +5581,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
- memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, mac->sa_data);
interrupt = hw_block_intr(hw);
@@ -7005,12 +7005,14 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
- memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- ETH_ALEN);
+ eth_hw_addr_set(dev, hw_priv->hw.override_addr);
else {
- memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
+ u8 addr[ETH_ALEN];
+
+ ether_addr_copy(addr, sw->other_addr);
if (ether_addr_equal(sw->other_addr, hw->override_addr))
- dev->dev_addr[5] += port->first_port;
+ addr[5] += port->first_port;
+ eth_hw_addr_set(dev, addr);
}
dev->netdev_ops = &netdev_ops;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 09cdc2f2e7ff..634ac7649c43 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -517,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(dev->dev_addr, address->sa_data);
+ eth_hw_addr_set(dev, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -1539,7 +1539,6 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
- unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
int ret = 0;
@@ -1572,9 +1571,7 @@ static int enc28j60_probe(struct spi_device *spi)
goto error_irq;
}
- if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
- ether_addr_copy(dev->dev_addr, macaddr);
- else
+ if (device_get_ethdev_address(&spi->dev, dev))
eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 0bc6b3176fbf..b90efc80fb59 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -761,7 +761,7 @@ static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, address->sa_data);
return encx24j600_set_hw_macaddr(dev);
}
@@ -1001,6 +1001,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
struct net_device *ndev;
struct encx24j600_priv *priv;
u16 eidled;
+ u8 addr[ETH_ALEN];
ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
@@ -1056,7 +1057,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Get the MAC address from the chip */
- encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+ encx24j600_hw_get_macaddr(priv, addr);
+ eth_hw_addr_set(ndev, addr);
ndev->ethtool_ops = &encx24j600_ethtool_ops;
@@ -1125,4 +1127,3 @@ module_spi_driver(encx24j600_spi_net_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 9e8561cdc32a..4fc97823bc84 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -816,7 +816,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
index);
}
-static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ gfp_t gfp)
{
struct net_device *netdev = rx->adapter->netdev;
struct device *dev = &rx->adapter->pdev->dev;
@@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
+ skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
if (!skb)
return -ENOMEM;
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
@@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head,
+ GFP_ATOMIC | GFP_DMA)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_init_ring_element(rx, index);
+ ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
if (ret)
goto cleanup;
}
return 0;
cleanup:
+ netif_warn(rx->adapter, ifup, rx->adapter->netdev,
+ "Error allocating memory for LAN743x\n");
+
lan743x_rx_ring_cleanup(rx);
return ret;
}
@@ -2645,7 +2670,7 @@ static int lan743x_netdev_set_mac_address(struct net_device *netdev,
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
@@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
}
/* open netdev when netdev is at running state while resume.
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 6080028c1df2..aaf7aaeaba0c 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -279,6 +279,7 @@
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
@@ -830,7 +831,7 @@ struct lan743x_rx_buffer_info {
unsigned int buffer_length;
};
-#define LAN743X_RX_RING_SIZE (65)
+#define LAN743X_RX_RING_SIZE (128)
#define RX_PROCESS_RESULT_NOTHING_TO_DO (0)
#define RX_PROCESS_RESULT_BUFFER_RECEIVED (1)
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index ab6d719d40f0..9380e396f648 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -491,9 +491,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
int perout_pin = 0;
unsigned int index = perout_request->index;
struct lan743x_ptp_perout *perout = &ptp->perout[index];
+ int ret = 0;
/* Reject requests with unsupported flags */
- if (perout_request->flags)
+ if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (on) {
@@ -518,6 +519,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve event channel %d for PEROUT\n",
index);
+ ret = -EBUSY;
goto failed;
}
@@ -529,6 +531,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
perout_pin);
+ ret = -EBUSY;
goto failed;
}
@@ -540,27 +543,93 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
period_sec += perout_request->period.nsec / 1000000000;
period_nsec = perout_request->period.nsec % 1000000000;
- if (period_sec == 0) {
- if (period_nsec >= 400000000) {
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on, ts_period;
+ s64 wf_high, period64, half;
+ s32 reminder;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ wf_high = timespec64_to_ns(&ts_on);
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+ period64 = timespec64_to_ns(&ts_period);
+
+ if (period64 < 200) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ if (wf_high >= period64) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "pulse width must be smaller than period\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* Check if we can do 50% toggle on an even value of period.
+ * If the period number is odd, then check if the requested
+ * pulse width is the same as one of pre-defined width values.
+ * Otherwise, return failure.
+ */
+ half = div_s64_rem(period64, 2, &reminder);
+ if (!reminder) {
+ if (half == wf_high) {
+ /* It's 50% match. Use the toggle option */
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
+ /* In this case, devide period value by 2 */
+ ts_period = ns_to_timespec64(div_s64(period64, 2));
+ period_sec = ts_period.tv_sec;
+ period_nsec = ts_period.tv_nsec;
+
+ goto program;
+ }
+ }
+ /* if we can't do toggle, then the width option needs to be the exact match */
+ if (wf_high == 200000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
- } else if (period_nsec >= 20000000) {
+ } else if (wf_high == 10000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
- } else if (period_nsec >= 2000000) {
+ } else if (wf_high == 1000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
- } else if (period_nsec >= 200000) {
+ } else if (wf_high == 100000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
- } else if (period_nsec >= 20000) {
+ } else if (wf_high == 10000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
- } else if (period_nsec >= 200) {
+ } else if (wf_high == 100) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
} else {
netif_warn(adapter, drv, adapter->netdev,
- "perout period too small, minimum is 200nS\n");
+ "duty cycle specified is not supported\n");
+ ret = -EOPNOTSUPP;
goto failed;
}
} else {
- pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
}
+program:
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
@@ -599,7 +668,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
failed:
lan743x_ptp_perout_off(adapter, index);
- return -ENODEV;
+ return ret;
}
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index cbece6e9bff2..4625d4fb4cde 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5)
}
iomem[idx] = devm_ioremap(sparx5->dev,
iores[idx]->start,
- iores[idx]->end - iores[idx]->start
- + 1);
+ resource_size(iores[idx]));
if (!iomem[idx]) {
dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
iores[idx]->name);
@@ -758,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
"port %u: missing serdes\n",
portno);
+ of_node_put(portnp);
goto cleanup_config;
}
config->portno = portno;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index cb68eaaac881..e042f117dc7a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -162,7 +162,7 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p)
sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
/* Record the address */
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -200,7 +200,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
{
struct sparx5_port *spx5_port;
struct net_device *ndev;
- u64 val;
ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
if (!ndev)
@@ -216,8 +215,7 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
- val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
- u64_to_ether_addr(val, ndev->dev_addr);
+ eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1);
return ndev;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index af70e2795125..fb74752de0ca 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -92,12 +92,11 @@ static void sparx5_phylink_validate(struct phylink_config *config,
}
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index cee75b561f59..c96ac81212f7 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -3,6 +3,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include "mana.h"
@@ -848,6 +850,15 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
+ req.drv_ver = 0; /* Unused*/
+ req.os_type = 0x10; /* Linux */
+ req.os_ver_major = LINUX_VERSION_MAJOR;
+ req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
+ req.os_ver_build = LINUX_VERSION_SUBLEVEL;
+ strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
+ strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
+ strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
+
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
@@ -1247,6 +1258,52 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
gc->irq_contexts = NULL;
}
+static int mana_gd_setup(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ int err;
+
+ mana_gd_init_registers(pdev);
+ mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+
+ err = mana_gd_setup_irqs(pdev);
+ if (err)
+ return err;
+
+ err = mana_hwc_create_channel(gc);
+ if (err)
+ goto remove_irq;
+
+ err = mana_gd_verify_vf_version(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ err = mana_gd_query_max_resources(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ err = mana_gd_detect_devices(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ return 0;
+
+destroy_hwc:
+ mana_hwc_destroy_channel(gc);
+remove_irq:
+ mana_gd_remove_irqs(pdev);
+ return err;
+}
+
+static void mana_gd_cleanup(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_hwc_destroy_channel(gc);
+
+ mana_gd_remove_irqs(pdev);
+}
+
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
@@ -1276,6 +1333,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!gc)
goto release_region;
+ mutex_init(&gc->eq_test_event_mutex);
+ pci_set_drvdata(pdev, gc);
+
bar0_va = pci_iomap(pdev, bar, 0);
if (!bar0_va)
goto free_gc;
@@ -1283,49 +1343,23 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
- pci_set_drvdata(pdev, gc);
- mana_gd_init_registers(pdev);
-
- mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
-
- err = mana_gd_setup_irqs(pdev);
+ err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
- mutex_init(&gc->eq_test_event_mutex);
-
- err = mana_hwc_create_channel(gc);
- if (err)
- goto remove_irq;
-
- err = mana_gd_verify_vf_version(pdev);
- if (err)
- goto remove_irq;
-
- err = mana_gd_query_max_resources(pdev);
- if (err)
- goto remove_irq;
-
- err = mana_gd_detect_devices(pdev);
+ err = mana_probe(&gc->mana, false);
if (err)
- goto remove_irq;
-
- err = mana_probe(&gc->mana);
- if (err)
- goto clean_up_gdma;
+ goto cleanup_gd;
return 0;
-clean_up_gdma:
- mana_hwc_destroy_channel(gc);
- vfree(gc->cq_table);
- gc->cq_table = NULL;
-remove_irq:
- mana_gd_remove_irqs(pdev);
+cleanup_gd:
+ mana_gd_cleanup(pdev);
unmap_bar:
pci_iounmap(pdev, bar0_va);
free_gc:
+ pci_set_drvdata(pdev, NULL);
vfree(gc);
release_region:
pci_release_regions(pdev);
@@ -1340,13 +1374,9 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- mana_remove(&gc->mana);
+ mana_remove(&gc->mana, false);
- mana_hwc_destroy_channel(gc);
- vfree(gc->cq_table);
- gc->cq_table = NULL;
-
- mana_gd_remove_irqs(pdev);
+ mana_gd_cleanup(pdev);
pci_iounmap(pdev, gc->bar0_va);
@@ -1357,6 +1387,52 @@ static void mana_gd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/* The 'state' parameter is not used. */
+static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_remove(&gc->mana, true);
+
+ mana_gd_cleanup(pdev);
+
+ return 0;
+}
+
+/* In case the NIC hardware stops working, the suspend and resume callbacks will
+ * fail -- if this happens, it's safer to just report an error than try to undo
+ * what has been done.
+ */
+static int mana_gd_resume(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ int err;
+
+ err = mana_gd_setup(pdev);
+ if (err)
+ return err;
+
+ err = mana_probe(&gc->mana, true);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
+static void mana_gd_shutdown(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "Shutdown was calledd\n");
+
+ mana_remove(&gc->mana, true);
+
+ mana_gd_cleanup(pdev);
+
+ pci_disable_device(pdev);
+}
+
#ifndef PCI_VENDOR_ID_MICROSOFT
#define PCI_VENDOR_ID_MICROSOFT 0x1414
#endif
@@ -1371,6 +1447,9 @@ static struct pci_driver mana_driver = {
.id_table = mana_id_table,
.probe = mana_gd_probe,
.remove = mana_gd_remove,
+ .suspend = mana_gd_suspend,
+ .resume = mana_gd_resume,
+ .shutdown = mana_gd_shutdown,
};
module_pci_driver(mana_driver);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index d5c485a6d284..34b971ff8ef8 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -309,9 +309,6 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
{
- if (!hwc_cq)
- return;
-
kfree(hwc_cq->comp_buf);
if (hwc_cq->gdma_cq)
@@ -363,7 +360,7 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
}
hwc_cq->gdma_cq = cq;
- comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
+ comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
if (!comp_buf) {
err = -ENOMEM;
goto out;
@@ -446,9 +443,6 @@ static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
struct hwc_wq *hwc_wq)
{
- if (!hwc_wq)
- return;
-
mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
if (hwc_wq->gdma_wq)
@@ -580,7 +574,7 @@ static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
return err;
}
- ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
+ ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -621,6 +615,7 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
+ /* Both were set in mana_hwc_init_event_handler(). */
if (WARN_ON(cq->id >= gc->max_num_cqs))
return -EPROTO;
@@ -636,9 +631,6 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
u32 max_req_msg_size, u32 max_resp_msg_size)
{
- struct hwc_wq *hwc_rxq = NULL;
- struct hwc_wq *hwc_txq = NULL;
- struct hwc_cq *hwc_cq = NULL;
int err;
err = mana_hwc_init_inflight_msg(hwc, q_depth);
@@ -651,44 +643,32 @@ static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
err = mana_hwc_create_cq(hwc, q_depth * 2,
mana_hwc_init_event_handler, hwc,
mana_hwc_rx_event_handler, hwc,
- mana_hwc_tx_event_handler, hwc, &hwc_cq);
+ mana_hwc_tx_event_handler, hwc, &hwc->cq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
goto out;
}
- hwc->cq = hwc_cq;
err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
- hwc_cq, &hwc_rxq);
+ hwc->cq, &hwc->rxq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
goto out;
}
- hwc->rxq = hwc_rxq;
err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
- hwc_cq, &hwc_txq);
+ hwc->cq, &hwc->txq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
goto out;
}
- hwc->txq = hwc_txq;
hwc->num_inflight_msg = q_depth;
hwc->max_req_msg_size = max_req_msg_size;
return 0;
out:
- if (hwc_txq)
- mana_hwc_destroy_wq(hwc, hwc_txq);
-
- if (hwc_rxq)
- mana_hwc_destroy_wq(hwc, hwc_rxq);
-
- if (hwc_cq)
- mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
-
- mana_gd_free_res_map(&hwc->inflight_msg_res);
+ /* mana_hwc_create_channel() will do the cleanup.*/
return err;
}
@@ -716,6 +696,9 @@ int mana_hwc_create_channel(struct gdma_context *gc)
gd->pdid = INVALID_PDID;
gd->doorbell = INVALID_DOORBELL;
+ /* mana_hwc_init_queues() only creates the required data structures,
+ * and doesn't touch the HWC device.
+ */
err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
HW_CHANNEL_MAX_REQUEST_SIZE,
HW_CHANNEL_MAX_RESPONSE_SIZE);
@@ -741,42 +724,50 @@ int mana_hwc_create_channel(struct gdma_context *gc)
return 0;
out:
- kfree(hwc);
+ mana_hwc_destroy_channel(gc);
return err;
}
void mana_hwc_destroy_channel(struct gdma_context *gc)
{
struct hw_channel_context *hwc = gc->hwc.driver_data;
- struct hwc_caller_ctx *ctx;
- mana_smc_teardown_hwc(&gc->shm_channel, false);
+ if (!hwc)
+ return;
+
+ /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
+ * non-zero, the HWC worked and we should tear down the HWC here.
+ */
+ if (gc->max_num_cqs > 0) {
+ mana_smc_teardown_hwc(&gc->shm_channel, false);
+ gc->max_num_cqs = 0;
+ }
- ctx = hwc->caller_ctx;
- kfree(ctx);
+ kfree(hwc->caller_ctx);
hwc->caller_ctx = NULL;
- mana_hwc_destroy_wq(hwc, hwc->txq);
- hwc->txq = NULL;
+ if (hwc->txq)
+ mana_hwc_destroy_wq(hwc, hwc->txq);
- mana_hwc_destroy_wq(hwc, hwc->rxq);
- hwc->rxq = NULL;
+ if (hwc->rxq)
+ mana_hwc_destroy_wq(hwc, hwc->rxq);
- mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
- hwc->cq = NULL;
+ if (hwc->cq)
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
mana_gd_free_res_map(&hwc->inflight_msg_res);
hwc->num_inflight_msg = 0;
- if (hwc->gdma_dev->pdid != INVALID_PDID) {
- hwc->gdma_dev->doorbell = INVALID_DOORBELL;
- hwc->gdma_dev->pdid = INVALID_PDID;
- }
+ hwc->gdma_dev->doorbell = INVALID_DOORBELL;
+ hwc->gdma_dev->pdid = INVALID_PDID;
kfree(hwc);
gc->hwc.driver_data = NULL;
gc->hwc.gdma_context = NULL;
+
+ vfree(gc->cq_table);
+ gc->cq_table = NULL;
}
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index fc98a5ba5ed0..d047ee876f12 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -374,8 +374,8 @@ int mana_alloc_queues(struct net_device *ndev);
int mana_attach(struct net_device *ndev);
int mana_detach(struct net_device *ndev, bool from_close);
-int mana_probe(struct gdma_dev *gd);
-void mana_remove(struct gdma_dev *gd);
+int mana_probe(struct gdma_dev *gd, bool resuming);
+void mana_remove(struct gdma_dev *gd, bool suspending);
extern const struct ethtool_ops mana_ethtool_ops;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 030ae89f3a33..72cbf45c42d8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1599,7 +1599,8 @@ static int mana_init_port(struct net_device *ndev)
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
&num_indirect_entries);
if (err) {
- netdev_err(ndev, "Failed to query info for vPort 0\n");
+ netdev_err(ndev, "Failed to query info for vPort %d\n",
+ port_idx);
goto reset_apc;
}
@@ -1610,7 +1611,7 @@ static int mana_init_port(struct net_device *ndev)
if (apc->num_queues > apc->max_queues)
apc->num_queues = apc->max_queues;
- ether_addr_copy(ndev->dev_addr, apc->mac_addr);
+ eth_hw_addr_set(ndev, apc->mac_addr);
return 0;
@@ -1667,24 +1668,23 @@ int mana_attach(struct net_device *ndev)
if (err)
return err;
- err = mana_alloc_queues(ndev);
- if (err) {
- kfree(apc->rxqs);
- apc->rxqs = NULL;
- return err;
+ if (apc->port_st_save) {
+ err = mana_alloc_queues(ndev);
+ if (err) {
+ mana_cleanup_port_context(apc);
+ return err;
+ }
}
- netif_device_attach(ndev);
-
apc->port_is_up = apc->port_st_save;
/* Ensure port state updated before txq state */
smp_wmb();
- if (apc->port_is_up) {
+ if (apc->port_is_up)
netif_carrier_on(ndev);
- netif_tx_wake_all_queues(ndev);
- }
+
+ netif_device_attach(ndev);
return 0;
}
@@ -1828,11 +1828,12 @@ free_net:
return err;
}
-int mana_probe(struct gdma_dev *gd)
+int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
+ struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
- struct mana_context *ac;
+ u16 num_ports = 0;
int err;
int i;
@@ -1844,44 +1845,70 @@ int mana_probe(struct gdma_dev *gd)
if (err)
return err;
- ac = kzalloc(sizeof(*ac), GFP_KERNEL);
- if (!ac)
- return -ENOMEM;
+ if (!resuming) {
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
- ac->gdma_dev = gd;
- ac->num_ports = 1;
- gd->driver_data = ac;
+ ac->gdma_dev = gd;
+ gd->driver_data = ac;
+ }
err = mana_create_eq(ac);
if (err)
goto out;
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &ac->num_ports);
+ MANA_MICRO_VERSION, &num_ports);
if (err)
goto out;
+ if (!resuming) {
+ ac->num_ports = num_ports;
+ } else {
+ if (ac->num_ports != num_ports) {
+ dev_err(dev, "The number of vPorts changed: %d->%d\n",
+ ac->num_ports, num_ports);
+ err = -EPROTO;
+ goto out;
+ }
+ }
+
+ if (ac->num_ports == 0)
+ dev_err(dev, "Failed to detect any vPort\n");
+
if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
ac->num_ports = MAX_PORTS_IN_MANA_DEV;
- for (i = 0; i < ac->num_ports; i++) {
- err = mana_probe_port(ac, i, &ac->ports[i]);
- if (err)
- break;
+ if (!resuming) {
+ for (i = 0; i < ac->num_ports; i++) {
+ err = mana_probe_port(ac, i, &ac->ports[i]);
+ if (err)
+ break;
+ }
+ } else {
+ for (i = 0; i < ac->num_ports; i++) {
+ rtnl_lock();
+ err = mana_attach(ac->ports[i]);
+ rtnl_unlock();
+ if (err)
+ break;
+ }
}
out:
if (err)
- mana_remove(gd);
+ mana_remove(gd, false);
return err;
}
-void mana_remove(struct gdma_dev *gd)
+void mana_remove(struct gdma_dev *gd, bool suspending)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
struct net_device *ndev;
+ int err;
int i;
for (i = 0; i < ac->num_ports; i++) {
@@ -1897,7 +1924,16 @@ void mana_remove(struct gdma_dev *gd)
*/
rtnl_lock();
- mana_detach(ndev, false);
+ err = mana_detach(ndev, false);
+ if (err)
+ netdev_err(ndev, "Failed to detach vPort %d: %d\n",
+ i, err);
+
+ if (suspending) {
+ /* No need to unregister the ndev. */
+ rtnl_unlock();
+ continue;
+ }
unregister_netdevice(ndev);
@@ -1910,6 +1946,10 @@ void mana_remove(struct gdma_dev *gd)
out:
mana_gd_deregister_device(gd);
+
+ if (suspending)
+ return;
+
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 7e74339f39ae..c3c81ae3fafd 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -211,9 +211,6 @@ static int mana_set_channels(struct net_device *ndev,
unsigned int old_count = apc->num_queues;
int err, err2;
- if (!apc->port_is_up)
- return -EOPNOTSUPP;
-
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 49def6934cad..15179b9529e1 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -65,7 +65,7 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
return 0;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index b6a73d151dec..8dd8c7f425d2 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH
depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
- depends on OF_NET
+ depends on OF
select MSCC_OCELOT_SWITCH_LIB
select GENERIC_PHY
help
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a08e4f530c1c..e6c18b598d5c 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -20,11 +20,13 @@ struct ocelot_mact_entry {
enum macaccess_entry_type type;
};
+/* Caller must hold &ocelot->mact_lock */
static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
}
+/* Caller must hold &ocelot->mact_lock */
static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
{
u32 val;
@@ -36,6 +38,7 @@ static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
+/* Caller must hold &ocelot->mact_lock */
static void ocelot_mact_select(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN],
unsigned int vid)
@@ -67,6 +70,7 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
unsigned int mc_ports;
+ int err;
/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
if (type == ENTRYTYPE_MACv4)
@@ -79,18 +83,28 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+ mutex_lock(&ocelot->mact_lock);
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
- return ocelot_mact_wait_for_completion(ocelot);
+ err = ocelot_mact_wait_for_completion(ocelot);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
}
EXPORT_SYMBOL(ocelot_mact_learn);
int ocelot_mact_forget(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], unsigned int vid)
{
+ int err;
+
+ mutex_lock(&ocelot->mact_lock);
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a forget command */
@@ -98,7 +112,11 @@ int ocelot_mact_forget(struct ocelot *ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
ANA_TABLES_MACACCESS);
- return ocelot_mact_wait_for_completion(ocelot);
+ err = ocelot_mact_wait_for_completion(ocelot);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
}
EXPORT_SYMBOL(ocelot_mact_forget);
@@ -114,7 +132,9 @@ static void ocelot_mact_init(struct ocelot *ocelot)
| ANA_AGENCTRL_LEARN_IGNORE_VLAN,
ANA_AGENCTRL);
- /* Clear the MAC table */
+ /* Clear the MAC table. We are not concurrent with anyone, so
+ * holding &ocelot->mact_lock is pointless.
+ */
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
@@ -162,48 +182,117 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- struct ocelot_vlan native_vlan)
+static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u32 val = 0;
+ struct ocelot_bridge_vlan *vlan;
+ int num_untagged = 0;
- ocelot_port->native_vlan = native_vlan;
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
- REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port);
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ }
+
+ return num_untagged;
+}
+
+static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+ int num_tagged = 0;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
+
+ if (!(vlan->untagged & BIT(port)))
+ num_tagged++;
+ }
+
+ return num_tagged;
+}
+
+/* We use native VLAN when we have to mix egress-tagged VLANs with exactly
+ * _one_ egress-untagged VLAN (_the_ native VLAN)
+ */
+static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
+{
+ return ocelot_port_num_tagged_vlans(ocelot, port) &&
+ ocelot_port_num_untagged_vlans(ocelot, port) == 1;
+}
+
+static struct ocelot_bridge_vlan *
+ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
+ return vlan;
+
+ return NULL;
+}
+
+/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
+ * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
+ * state of the port.
+ */
+static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ enum ocelot_port_tag_config tag_cfg;
+ bool uses_native_vlan = false;
if (ocelot_port->vlan_aware) {
- if (native_vlan.valid)
- /* Tag all frames except when VID == DEFAULT_VLAN */
- val = REW_TAG_CFG_TAG_CFG(1);
+ uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
+
+ if (uses_native_vlan)
+ tag_cfg = OCELOT_PORT_TAG_NATIVE;
+ else if (ocelot_port_num_untagged_vlans(ocelot, port))
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
else
- /* Tag all frames */
- val = REW_TAG_CFG_TAG_CFG(3);
+ tag_cfg = OCELOT_PORT_TAG_TRUNK;
} else {
- /* Port tagging disabled. */
- val = REW_TAG_CFG_TAG_CFG(0);
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
}
- ocelot_rmw_gix(ocelot, val,
+
+ ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+
+ if (uses_native_vlan) {
+ struct ocelot_bridge_vlan *native_vlan;
+
+ /* Not having a native VLAN is impossible, because
+ * ocelot_port_num_untagged_vlans has returned 1.
+ * So there is no use in checking for NULL here.
+ */
+ native_vlan = ocelot_port_find_native_vlan(ocelot, port);
+
+ ocelot_rmw_gix(ocelot,
+ REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port);
+ }
}
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- struct ocelot_vlan pvid_vlan)
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
- if (!ocelot_port->vlan_aware)
- pvid_vlan.vid = 0;
+ if (ocelot_port->vlan_aware && pvid_vlan)
+ pvid = pvid_vlan->vid;
ocelot_rmw_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
ANA_PORT_VLAN_CFG_VLAN_VID_M,
ANA_PORT_VLAN_CFG, port);
@@ -212,7 +301,7 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
*/
- if (!pvid_vlan.valid && ocelot_port->vlan_aware)
+ if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
@@ -222,31 +311,90 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
ANA_PORT_DROP_CFG, port);
}
-static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid)
+static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
+ u16 vid)
{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
+ bool untagged)
+{
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
int err;
- err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask);
- if (err)
+ if (vlan) {
+ portmask = vlan->portmask | BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ /* Bridge VLANs can be overwritten with a different
+ * egress-tagging setting, so make sure to override an untagged
+ * with a tagged VID if that's going on.
+ */
+ if (untagged)
+ vlan->untagged |= BIT(port);
+ else
+ vlan->untagged &= ~BIT(port);
+
+ return 0;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ portmask = BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err) {
+ kfree(vlan);
return err;
+ }
- ocelot->vlan_mask[vid] = vlan_mask;
+ vlan->vid = vid;
+ vlan->portmask = portmask;
+ if (untagged)
+ vlan->untagged = BIT(port);
+ INIT_LIST_HEAD(&vlan->list);
+ list_add_tail(&vlan->list, &ocelot->vlans);
return 0;
}
-static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid)
-{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] | BIT(port),
- vid);
-}
-
static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] & ~BIT(port),
- vid);
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
+ int err;
+
+ if (!vlan)
+ return 0;
+
+ portmask = vlan->portmask & ~BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ if (vlan->portmask)
+ return 0;
+
+ list_del(&vlan->list);
+ kfree(vlan);
+
+ return 0;
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
@@ -279,7 +427,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
- ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -288,14 +436,20 @@ EXPORT_SYMBOL(ocelot_port_vlan_filtering);
int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Deny changing the native VLAN, but always permit deleting it */
- if (untagged && ocelot_port->native_vlan.vid != vid &&
- ocelot_port->native_vlan.valid) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port already has a native VLAN");
- return -EBUSY;
+ if (untagged) {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_uses_native_vlan(ocelot, port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
+ return -EBUSY;
+ }
+ } else {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
+ return -EBUSY;
+ }
}
return 0;
@@ -307,27 +461,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
- err = ocelot_vlan_member_add(ocelot, port, vid);
+ err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
/* Default ingress vlan classification */
- if (pvid) {
- struct ocelot_vlan pvid_vlan;
-
- pvid_vlan.vid = vid;
- pvid_vlan.valid = true;
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (pvid)
+ ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
/* Untagged egress vlan clasification */
- if (untagged) {
- struct ocelot_vlan native_vlan;
-
- native_vlan.vid = vid;
- native_vlan.valid = true;
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -343,18 +487,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan.vid == vid) {
- struct ocelot_vlan pvid_vlan = {0};
-
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
- if (ocelot_port->native_vlan.vid == vid) {
- struct ocelot_vlan native_vlan = {0};
-
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -372,13 +509,13 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
/* Configure the port VLAN memberships */
for (vid = 1; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_set(ocelot, 0, vid);
+ ocelot_vlant_set_mask(ocelot, vid, 0);
/* Because VLAN filtering is enabled, we need VID 0 to get untagged
* traffic. It is added automatically if 8021q module is loaded, but
* we can't rely on it since module may be not loaded.
*/
- ocelot_vlan_member_set(ocelot, all_ports, 0);
+ ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -951,7 +1088,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_ifh_set_bypass(ifh, 1);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
ocelot_ifh_set_rew_op(ifh, rew_op);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
@@ -1053,6 +1190,7 @@ nla_put_failure:
}
EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
+/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry)
{
@@ -1103,33 +1241,40 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ int err = 0;
int i, j;
+ /* We could take the lock just around ocelot_mact_read, but doing so
+ * thousands of times in a row seems rather pointless and inefficient.
+ */
+ mutex_lock(&ocelot->mact_lock);
+
/* Loop through all the mac tables entries. */
for (i = 0; i < ocelot->num_mact_rows; i++) {
for (j = 0; j < 4; j++) {
struct ocelot_mact_entry entry;
bool is_static;
- int ret;
- ret = ocelot_mact_read(ocelot, port, i, j, &entry);
+ err = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...),
* skip it.
*/
- if (ret == -EINVAL)
+ if (err == -EINVAL)
continue;
- else if (ret)
- return ret;
+ else if (err)
+ break;
is_static = (entry.type == ENTRYTYPE_LOCKED);
- ret = cb(entry.mac, entry.vid, is_static, data);
- if (ret)
- return ret;
+ err = cb(entry.mac, entry.vid, is_static, data);
+ if (err)
+ break;
}
}
- return 0;
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
}
EXPORT_SYMBOL(ocelot_fdb_dump);
@@ -1680,12 +1825,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_vlan pvid = {0}, native_vlan = {0};
ocelot_port->bridge = NULL;
- ocelot_port_set_pvid(ocelot, port, pvid);
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+ ocelot_port_set_pvid(ocelot, port, NULL);
+ ocelot_port_manage_port_tag(ocelot, port);
ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2071,9 +2215,10 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ocelot_write_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
@@ -2114,6 +2259,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
+ mutex_init(&ocelot->mact_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
@@ -2130,6 +2276,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ INIT_LIST_HEAD(&ocelot->vlans);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 1952d6a1b98a..e43da09b8f91 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -25,6 +25,7 @@
#include "ocelot_rew.h"
#include "ocelot_qs.h"
+#define OCELOT_VLAN_UNAWARE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 8b843d3c9189..769a8159373e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -142,17 +142,77 @@ ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
return NULL;
}
+static int
+ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port,
+ struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ enum ocelot_tag_tpid_sel tpid;
+
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify custom TPID");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
u64 rate;
+ int err;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
@@ -273,26 +333,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_VLAN_MANGLE:
- if (filter->block_id != VCAP_IS1) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN modify action can only be offloaded to VCAP IS1");
- return -EOPNOTSUPP;
- }
- if (filter->goto_target != -1) {
+ if (filter->block_id == VCAP_IS1) {
+ err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port,
+ filter, a,
+ extack);
+ } else if (filter->block_id == VCAP_ES0) {
+ err = ocelot_flower_parse_egress_vlan_modify(filter, a,
+ extack);
+ } else {
NL_SET_ERR_MSG_MOD(extack,
- "Last action must be GOTO");
- return -EOPNOTSUPP;
+ "VLAN modify action can only be offloaded to VCAP IS1 or ES0");
+ err = -EOPNOTSUPP;
}
- if (!ocelot_port->vlan_aware) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can only modify VLAN under VLAN aware bridge");
- return -EOPNOTSUPP;
- }
- filter->action.vid_replace_ena = true;
- filter->action.pcp_dei_ena = true;
- filter->action.vid = a->vlan.vid;
- filter->action.pcp = a->vlan.prio;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ if (err)
+ return err;
break;
case FLOW_ACTION_PRIORITY:
if (filter->block_id != VCAP_IS1) {
@@ -340,7 +394,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
}
filter->action.tag_a_tpid_sel = tpid;
filter->action.push_outer_tag = OCELOT_ES0_TAG;
- filter->action.tag_a_vid_sel = 1;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID;
filter->action.vid_a_val = a->vlan.vid;
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -678,6 +732,31 @@ static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
return 0;
}
+/* If we have an egress VLAN modification rule, we need to actually write the
+ * delta between the input VLAN (from the key) and the output VLAN (from the
+ * action), but the action was parsed first. So we need to patch the delta into
+ * the action here.
+ */
+static int
+ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ if (filter->block_id != VCAP_ES0 ||
+ filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID)
+ return 0;
+
+ if (filter->vlan.vid.mask != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 VLAN rewriting needs a full VLAN in the key");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_a_val -= filter->vlan.vid.value;
+ filter->action.vid_a_val &= VLAN_VID_MASK;
+
+ return 0;
+}
+
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
@@ -701,6 +780,12 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
+ ret = ocelot_flower_patch_es0_vlan_modify(filter, extack);
+ if (ret) {
+ kfree(filter);
+ return ret;
+ }
+
/* The non-optional GOTOs for the TCAM skeleton don't need
* to be actually offloaded.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 4b0941f09f71..1fa58546abdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -116,16 +116,16 @@ static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid);
- ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2545727fd5b2..eaeba60b1bba 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -418,7 +418,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == 0)
+ if (vid == OCELOT_VLAN_UNAWARE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -553,7 +553,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = ocelot_port->pvid_vlan.vid;
+ w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -567,7 +567,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = ocelot_port->pvid_vlan.vid;
+ w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -602,11 +602,11 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1509,7 +1509,7 @@ static void vsc7514_phylink_validate(struct phylink_config *config,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != ocelot_port->phy_mode) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -1528,9 +1528,8 @@ static void vsc7514_phylink_validate(struct phylink_config *config,
phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void vsc7514_phylink_mac_config(struct phylink_config *config,
@@ -1705,10 +1704,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
- dev->dev_addr[ETH_ALEN - 1] += port;
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 291ae6817c26..38103b0255b0 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
err = PTR_ERR(target);
+ of_node_put(portnp);
goto out_teardown;
}
@@ -1134,10 +1135,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- err = devlink_register(devlink);
- if (err)
- goto out_ocelot_deinit;
-
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
goto out_ocelot_devlink_unregister;
@@ -1160,6 +1157,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
of_node_put(ports);
+ devlink_register(devlink);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -1169,8 +1167,6 @@ out_ocelot_release_ports:
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
out_ocelot_devlink_unregister:
- devlink_unregister(devlink);
-out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
@@ -1183,11 +1179,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
- devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c1a75b08ced7..5736fcdafd7a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -796,7 +796,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
return status;
}
-static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
+ const u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
@@ -3022,7 +3023,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return 0;
}
@@ -3738,7 +3739,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
- int i;
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
@@ -3828,8 +3828,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (status)
goto abort_with_ioremap;
- for (i = 0; i < ETH_ALEN; i++)
- netdev->dev_addr[i] = mgp->mac_addr[i];
+ eth_hw_addr_set(netdev, mgp->mac_addr);
myri10ge_select_firmware(mgp);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3f982033944b..82a22711ce45 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -809,6 +809,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long iosize;
void __iomem *ioaddr;
const int pcibar = 1; /* PCI base address register */
+ u8 addr[ETH_ALEN];
int prev_eedata;
u32 tmp;
@@ -859,10 +860,11 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eeprom_read(ioaddr, 6);
for (i = 0; i < 3; i++) {
int eedata = eeprom_read(ioaddr, i + 7);
- dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
- dev->dev_addr[i*2+1] = eedata >> 7;
+ addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ addr[i*2+1] = eedata >> 7;
prev_eedata = eedata;
}
+ eth_hw_addr_set(dev, addr);
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 72794d158871..49ea130c9067 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1649,9 +1649,11 @@ failed:
return ret;
}
-static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
+static void ns83820_getmac(struct ns83820 *dev, struct net_device *ndev)
{
+ u8 mac[ETH_ALEN];
unsigned i;
+
for (i=0; i<3; i++) {
u32 data;
@@ -1661,9 +1663,10 @@ static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
writel(i*2, dev->base + RFCR);
data = readl(dev->base + RFDR);
- *mac++ = data;
- *mac++ = data >> 8;
+ mac[i * 2] = data;
+ mac[i * 2 + 1] = data >> 8;
}
+ eth_hw_addr_set(ndev, mac);
}
static void ns83820_set_multicast(struct net_device *ndev)
@@ -2136,7 +2139,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* Disable Wake On Lan */
writel(0, dev->base + WCSR);
- ns83820_getmac(dev, ndev->dev_addr);
+ ns83820_getmac(dev, ndev);
/* Yes, we support dumb IP checksum on transmit */
ndev->features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3b6b2e61139e..d1c32c65db05 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5202,7 +5202,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* store the MAC address in CAM */
return do_s2io_prog_unicast(dev, dev->dev_addr);
@@ -5217,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
* as defined in errno.h file on failure.
*/
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
{
struct s2io_nic *sp = netdev_priv(dev);
register u64 mac_addr = 0, perm_addr = 0;
@@ -7954,7 +7954,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
- memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 5a6032212c19..a4266d1544ab 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1073,7 +1073,7 @@ static void s2io_reset(struct s2io_nic * sp);
static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp);
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr);
static void s2io_alarm_handle(struct timer_list *t);
static irqreturn_t
s2io_msix_ring_handle(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index df4a3f3da83a..1969009a91e7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1328,7 +1328,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
}
if (unlikely(!is_vxge_card_up(vdev))) {
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return VXGE_HW_OK;
}
@@ -1341,7 +1341,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return status;
}
@@ -4663,7 +4663,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
+ eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 605a1617b195..5d3df28c648f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -305,7 +305,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
index 2473fb5f75e5..2a5cc64227e9 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
static void
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
struct nfp_alink_stats *old,
- struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_sync *bstats,
struct gnet_stats_queue *qstats)
{
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 11c83a99b014..f469950c7265 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -182,15 +182,21 @@ static int
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
- unsigned int max_mtu;
+ struct nfp_bpf_vnic *bv;
+ struct bpf_prog *prog;
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return 0;
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (new_mtu > max_mtu) {
- nn_info(nn, "BPF offload active, MTU over %u not supported\n",
- max_mtu);
+ if (nn->xdp_hw.prog) {
+ prog = nn->xdp_hw.prog;
+ } else {
+ bv = nn->app_priv;
+ prog = bv->tc_prog;
+ }
+
+ if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
+ nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
return -EBUSY;
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index d0e17eebddd9..16841bb750b7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+ unsigned int mtu);
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 53851853562c..9d97cd281f18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return 0;
}
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+ unsigned int mtu)
+{
+ unsigned int fw_mtu, pkt_off;
+
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ pkt_off = min(prog->aux->max_pkt_offset, mtu);
+
+ return fw_mtu < pkt_off;
+}
+
static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
+ unsigned int max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
- fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
- if (fw_mtu < pkt_off) {
+ if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 36491835ac65..db297ee4d7ad 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- err = devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
- if (err)
- return err;
-
- devlink_params_publish(devlink);
- return 0;
+ return devlink_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2a432de11858..a3242b36e216 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -272,7 +272,8 @@ nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
act[act_idx].id == FLOW_ACTION_MIRRED)
- return netif_is_gretap(act[act_idx].dev);
+ return netif_is_gretap(act[act_idx].dev) ||
+ netif_is_ip6gretap(act[act_idx].dev);
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index a2926b1b3cff..784292b16290 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -703,7 +703,7 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
- if (netif_is_gretap(netdev))
+ if (netif_is_gretap(netdev) || netif_is_ip6gretap(netdev))
return tun_type == NFP_FL_TUNNEL_GRE;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 64c0ef57ad42..224089d04d98 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -360,7 +360,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
- if (!netif_is_gretap(netdev)) {
+ if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index ab70179728f6..dfb4468fe287 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -837,7 +837,7 @@ nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
}
static int
-__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
+__nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
{
struct nfp_tun_mac_addr_offload payload;
@@ -886,7 +886,7 @@ static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
}
static struct nfp_tun_offloaded_mac *
-nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
+nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
{
struct nfp_flower_priv *priv = app->priv;
@@ -1005,7 +1005,7 @@ err_free_ida:
static int
nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
- u8 *mac, bool mod)
+ const u8 *mac, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 2643ea5948f4..154399c5453f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
@@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 5bfa22accf2c..850bfdf83d0a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2067,7 +2067,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, pkts_polled))
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- if (r_vec->nfp_net->rx_coalesce_adapt_on) {
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
unsigned int start;
u64 pkts, bytes;
@@ -2082,7 +2082,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
net_dim(&r_vec->rx_dim, dim_sample);
}
- if (r_vec->nfp_net->tx_coalesce_adapt_on) {
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
unsigned int start;
u64 pkts, bytes;
@@ -3016,10 +3016,8 @@ static void nfp_net_rx_dim_work(struct work_struct *work)
/* copy RX interrupt coalesce parameters */
value = (moder.pkts << 16) | (factor * moder.usec);
- rtnl_lock();
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
- rtnl_unlock();
dim->state = DIM_START_MEASURE;
}
@@ -3047,10 +3045,8 @@ static void nfp_net_tx_dim_work(struct work_struct *work)
/* copy TX interrupt coalesce parameters */
value = (moder.pkts << 16) | (factor * moder.usec);
- rtnl_lock();
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
- rtnl_unlock();
dim->state = DIM_START_MEASURE;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index d10a93801344..751f76cd4f79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -55,7 +55,7 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
return;
}
- ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
+ eth_hw_addr_set(netdev, eth_port->mac_addr);
ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
}
@@ -701,10 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
- err = devlink_register(devlink);
- if (err)
- goto err_app_clean;
-
err = nfp_shared_buf_register(pf);
if (err)
goto err_devlink_unreg;
@@ -734,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_stop_app;
mutex_unlock(&pf->lock);
+ devlink_register(devlink);
return 0;
@@ -751,8 +748,6 @@ err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
- devlink_unregister(devlink);
-err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
@@ -763,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn, *next;
+ devlink_unregister(priv_to_devlink(pf));
mutex_lock(&pf->lock);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
@@ -779,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
- devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3b8e675087de..369f6ae700c7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -499,8 +499,7 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
{
struct nfp_reprs *reprs;
- reprs = kzalloc(sizeof(*reprs) +
- num_reprs * sizeof(struct net_device *), GFP_KERNEL);
+ reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL);
if (!reprs)
return NULL;
reprs->num_reprs = num_reprs;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c0e2f4394aef..87f2268b16d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -58,7 +58,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 346145d3180e..cfeb7620ae20 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1283,7 +1283,7 @@ static int nixge_probe(struct platform_device *pdev)
mac_addr = nixge_get_nvmem_address(&pdev->dev);
if (mac_addr && is_valid_ether_addr(mac_addr)) {
- ether_addr_copy(ndev->dev_addr, mac_addr);
+ eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ef3fb4cc90af..9b530d7509a4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3175,7 +3175,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
- memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, macaddr->sa_data);
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5711,6 +5711,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
u32 phystate_orig = 0, phystate;
int phyinitialized = 0;
static int printed_version;
+ u8 mac[ETH_ALEN];
if (!printed_version++)
pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
@@ -5884,50 +5885,52 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
txreg = readl(base + NvRegTransmitPoll);
if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
/*
* Set orig mac address back to the reversed version.
* This flag will be cleared during low power transition.
* Therefore, we should always put back the reversed address.
*/
- np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
- (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
- np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
+ np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
+ (mac[3] << 16) + (mac[2] << 24);
+ np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
} else {
/* need to reverse mac address to correct order */
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[0] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[1] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[4] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
dev_dbg(&pci_dev->dev,
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(dev, mac);
+ } else {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
dev_err(&pci_dev->dev,
"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
- dev->dev_addr);
+ mac);
eth_hw_addr_random(dev);
dev_err(&pci_dev->dev,
"Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d29fe562b3de..bc39558fe82b 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -419,7 +419,7 @@ struct netdata_local {
/*
* MAC support functions
*/
-static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
-
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;
@@ -1093,7 +1092,7 @@ static int lpc_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@ -1232,6 +1231,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@ -1347,10 +1347,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ec3e558f890e..71d234291fc5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2137,7 +2137,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(skaddr->sa_data)) {
ret_val = -EADDRNOTAVAIL;
} else {
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
ret_val = 0;
@@ -2555,7 +2555,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_adapter;
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
/*
* If the MAC is invalid (or just missing), display a warning
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 1a6336a56d3d..9c408328be0d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -592,6 +592,7 @@ static int hamachi_init_one(struct pci_dev *pdev,
void *ring_space;
dma_addr_t ring_dma;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -628,8 +629,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
- : readb(ioaddr + StationAddr + i);
+ addr[i] = read_eeprom(ioaddr, 4 + i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version)
if (hamachi_debug > 4)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index f5cd8f51be7c..12105f62cbdd 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -384,6 +384,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
#else
int bar = 1;
#endif
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -416,12 +417,13 @@ static int yellowfin_init_one(struct pci_dev *pdev,
if (drv_flags & DontUseEeprom)
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
+ addr[i] = ioread8(ioaddr + StnAddr + i);
else {
int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ addr[i] = read_eeprom(ioaddr, ee_offset + i);
}
+ eth_hw_addr_set(dev, addr);
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 7e096b2888b9..f0ace3a0e85c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -221,7 +221,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
adr0 = dev->dev_addr[2] << 24 |
dev->dev_addr[3] << 16 |
@@ -1722,7 +1722,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENODEV;
goto out;
}
- memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+ eth_hw_addr_set(dev, mac->mac_addr);
ret = mac_to_intf(mac);
if (ret < 0) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 66204106f83e..5e25411ff02f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -19,6 +19,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
#define DEVCMD_TIMEOUT 10
+#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
#define NORMAL_PPB 1000000000 /* one billion parts per billion */
@@ -69,8 +70,13 @@ struct ionic_admin_ctx {
};
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err);
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg);
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err);
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 39f59849720d..c58217027564 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -143,8 +143,6 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
- debugfs_create_u64("stop", 0400, q_dentry, &q->stop);
- debugfs_create_u64("wake", 0400, q_dentry, &q->wake);
debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
@@ -228,6 +226,50 @@ static int netdev_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(netdev);
+static int lif_filters_show(struct seq_file *seq, void *v)
+{
+ struct ionic_lif *lif = seq->private;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ seq_puts(seq, "id flow state type filter\n");
+ spin_lock_bh(&lif->rx_filters.lock);
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id) {
+ switch (le16_to_cpu(f->cmd.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan));
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan),
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_STEER_PKTCLASS:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n",
+ f->filter_id, f->flow_id, f->state,
+ le64_to_cpu(f->cmd.pkt_class));
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(lif_filters);
+
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
struct dentry *lif_dentry;
@@ -239,6 +281,8 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif)
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
+ debugfs_create_file("filters", 0400, lif->dentry,
+ lif, &lif_filters_fops);
}
void ionic_debugfs_del_lif(struct ionic_lif *lif)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 0d6858ab511c..d57e80d44c9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -581,7 +581,6 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
- DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 8311086fb1f4..e5acf3bd62b2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -220,9 +220,6 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 dbell_count;
- u64 stop;
- u64 wake;
u64 drop;
struct ionic_dev *idev;
unsigned int type;
@@ -269,7 +266,6 @@ struct ionic_cq {
bool done_color;
unsigned int num_descs;
unsigned int desc_size;
- u64 compl_count;
void *base;
dma_addr_t base_pa;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index c7d0e195d176..4297ed9024c0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -82,22 +82,16 @@ int ionic_devlink_register(struct ionic *ionic)
struct devlink_port_attrs attrs = {};
int err;
- err = devlink_register(dl);
- if (err) {
- dev_warn(ionic->dev, "devlink_register failed: %d\n", err);
- return err;
- }
-
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(&ionic->dl_port, &attrs);
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- devlink_unregister(dl);
return err;
}
devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ devlink_register(dl);
return 0;
}
@@ -105,6 +99,6 @@ void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
+ devlink_port_unregister(&ionic->dl_port);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3de1a03839e2..c54d735b9e2e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,13 +11,6 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
-static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
- "sw-dbg-stats",
-};
-
-#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
-
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -59,9 +52,6 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_STATS:
count = ionic_get_stats_count(lif);
break;
- case ETH_SS_PRIV_FLAGS:
- count = IONIC_PRIV_FLAGS_COUNT;
- break;
}
return count;
}
@@ -75,10 +65,6 @@ static void ionic_get_strings(struct net_device *netdev,
case ETH_SS_STATS:
ionic_get_stats_strings(lif, buf);
break;
- case ETH_SS_PRIV_FLAGS:
- memcpy(buf, ionic_priv_flags_strings,
- IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
- break;
}
}
@@ -228,8 +214,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
break;
}
- bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(ks->link_modes.advertising, ks->link_modes.supported);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
@@ -691,28 +676,6 @@ static int ionic_set_channels(struct net_device *netdev,
return err;
}
-static u32 ionic_get_priv_flags(struct net_device *netdev)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
-
- return priv_flags;
-}
-
-static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
-
- clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
-
- return 0;
-}
-
static int ionic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -1013,8 +976,6 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_strings = ionic_get_strings,
.get_ethtool_stats = ionic_get_stats,
.get_sset_count = ionic_get_sset_count,
- .get_priv_flags = ionic_get_priv_flags,
- .set_priv_flags = ionic_set_priv_flags,
.get_rxnfc = ionic_get_rxnfc,
.get_rxfh_indir_size = ionic_get_rxfh_indir_size,
.get_rxfh_key_size = ionic_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f3322ce044c..63f8a8163b5f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -287,11 +287,9 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
+static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
{
struct ionic_queue *q;
- struct ionic_lif *lif;
- int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
@@ -301,11 +299,12 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
},
};
- if (!qcq)
+ if (!qcq) {
+ netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
return -ENXIO;
+ }
q = &qcq->q;
- lif = q->lif;
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
@@ -318,17 +317,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
napi_disable(&qcq->napi);
}
- if (send_to_hw) {
- ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
- ctx.cmd.q_control.type = q->type;
- ctx.cmd.q_control.index = cpu_to_le32(q->index);
- dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ /* If there was a previous fw communcation error, don't bother with
+ * sending the adminq command and just return the same error value.
+ */
+ if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
+ return fw_err;
- err = ionic_adminq_post_wait(lif, &ctx);
- }
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
- return err;
+ return ionic_adminq_post_wait(lif, &ctx);
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -1241,137 +1242,6 @@ void ionic_get_stats64(struct net_device *netdev,
ns->tx_errors = ns->tx_aborted_errors;
}
-int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
- },
- };
- int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- bool mc = is_multicast_ether_addr(addr);
- struct ionic_rx_filter *f;
- int err = 0;
-
- memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f) {
- /* don't bother if we already have it and it is sync'd */
- if (f->state == IONIC_FILTER_STATE_SYNCED) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return 0;
- }
-
- /* mark preemptively as sync'd to block any parallel attempts */
- f->state = IONIC_FILTER_STATE_SYNCED;
- } else {
- /* save as SYNCED to catch any DEL requests while processing */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
- spin_unlock_bh(&lif->rx_filters.lock);
- if (err)
- return err;
-
- netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
-
- /* Don't bother with the write to FW if we know there's no room,
- * we can try again on the next sync attempt.
- */
- if ((lif->nucast + lif->nmcast) >= nfilters)
- err = -ENOSPC;
- else
- err = ionic_adminq_post_wait(lif, &ctx);
-
- spin_lock_bh(&lif->rx_filters.lock);
- if (err && err != -EEXIST) {
- /* set the state back to NEW so we can try again later */
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
- f->state = IONIC_FILTER_STATE_NEW;
- set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (err == -ENOSPC)
- return 0;
- else
- return err;
- }
-
- if (mc)
- lif->nmcast++;
- else
- lif->nucast++;
-
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_OLD) {
- /* Someone requested a delete while we were adding
- * so update the filter info with the results from the add
- * and the data will be there for the delete on the next
- * sync cycle.
- */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_OLD);
- } else {
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- return err;
-}
-
-int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
- int state;
- int err;
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
-
- netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
- addr, f->filter_id);
-
- state = f->state;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
-
- if (is_multicast_ether_addr(addr) && lif->nmcast)
- lif->nmcast--;
- else if (!is_multicast_ether_addr(addr) && lif->nucast)
- lif->nucast--;
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
- return err;
- }
-
- return 0;
-}
-
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
@@ -1407,7 +1277,7 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
- /* sync the mac filters */
+ /* sync the filters */
ionic_rx_filter_sync(lif);
/* check for overflow state
@@ -1417,14 +1287,12 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
* to see if we can disable NIC PROMISC
*/
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- if ((lif->nucast + lif->nmcast) >= nfilters) {
+
+ if (((lif->nucast + lif->nmcast) >= nfilters) ||
+ (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
- lif->uc_overflow = true;
- lif->mc_overflow = true;
- } else if (lif->uc_overflow) {
- lif->uc_overflow = false;
- lif->mc_overflow = false;
+ } else {
if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
if (!(nd_flags & IFF_ALLMULTI))
@@ -1809,59 +1677,30 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
- .vlan.vlan = cpu_to_le16(vid),
- },
- };
int err;
- netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
- err = ionic_adminq_post_wait(lif, &ctx);
+ err = ionic_lif_vlan_add(lif, vid);
if (err)
return err;
- spin_lock_bh(&lif->rx_filters.lock);
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return err;
+ return 0;
}
static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
-
- spin_lock_bh(&lif->rx_filters.lock);
-
- f = ionic_rx_filter_by_vlan(lif, vid);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
+ int err;
- netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
- vid, f->filter_id);
+ err = ionic_lif_vlan_del(lif, vid);
+ if (err)
+ return err;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
@@ -1953,19 +1792,19 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
}
if (lif->hwstamp_txq)
- err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
}
if (lif->hwstamp_rxq)
- err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
ionic_lif_quiesce(lif);
}
@@ -2165,7 +2004,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
goto err_out;
}
}
@@ -2187,13 +2026,13 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out_hwstamp_tx:
if (lif->hwstamp_rxq)
- derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
err_out_hwstamp_rx:
i = lif->nxqs;
err_out:
while (i--) {
- derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
- derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
return err;
@@ -2896,6 +2735,9 @@ int ionic_lif_alloc(struct ionic *ionic)
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
+ mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
+
spin_lock_init(&lif->adminq_lock);
spin_lock_init(&lif->deferred.lock);
@@ -2909,7 +2751,7 @@ int ionic_lif_alloc(struct ionic *ionic)
if (!lif->info) {
dev_err(dev, "Failed to allocate lif info, aborting\n");
err = -ENOMEM;
- goto err_out_free_netdev;
+ goto err_out_free_mutex;
}
ionic_debugfs_add_lif(lif);
@@ -2944,6 +2786,9 @@ err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
lif->info_pa = 0;
+err_out_free_mutex:
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
free_netdev(lif->netdev);
lif = NULL;
@@ -2974,11 +2819,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
- mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
- mutex_unlock(&lif->queue_lock);
}
if (netif_running(lif->netdev)) {
@@ -2989,6 +2833,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_reset(ionic);
ionic_qcqs_free(lif);
+ mutex_unlock(&lif->queue_lock);
+
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -3012,9 +2858,12 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err = ionic_port_init(ionic);
if (err)
goto err_out;
+
+ mutex_lock(&lif->queue_lock);
+
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out;
+ goto err_unlock;
err = ionic_lif_init(lif);
if (err)
@@ -3035,6 +2884,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
goto err_txrx_free;
}
+ mutex_unlock(&lif->queue_lock);
+
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
@@ -3051,6 +2902,8 @@ err_lifs_deinit:
ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
+err_unlock:
+ mutex_unlock(&lif->queue_lock);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
@@ -3084,6 +2937,9 @@ void ionic_lif_free(struct ionic_lif *lif)
kfree(lif->dbid_inuse);
lif->dbid_inuse = NULL;
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
+
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
free_netdev(lif->netdev);
@@ -3106,8 +2962,6 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
- mutex_destroy(&lif->config_lock);
- mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3273,8 +3127,6 @@ int ionic_lif_init(struct ionic_lif *lif)
return err;
lif->hw_index = le16_to_cpu(comp.hw_index);
- mutex_init(&lif->queue_lock);
- mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 4915184f3efb..9f7ab2f17f93 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -14,9 +14,6 @@
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
-#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
-#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
-
#define ADD_ADDR true
#define DEL_ADDR false
#define CAN_SLEEP true
@@ -37,7 +34,6 @@ struct ionic_tx_stats {
u64 clean;
u64 linearize;
u64 crc32_csum;
- u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
@@ -48,7 +44,6 @@ struct ionic_rx_stats {
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
u64 csum_error;
@@ -65,11 +60,6 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
-struct ionic_napi_stats {
- u64 poll_count;
- u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
-};
-
struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
@@ -85,7 +75,6 @@ struct ionic_qcq {
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
- struct ionic_napi_stats napi_stats;
unsigned int flags;
struct dentry *dentry;
};
@@ -142,7 +131,6 @@ struct ionic_lif_sw_stats {
enum ionic_lif_state_flags {
IONIC_LIF_F_INITED,
- IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
@@ -201,11 +189,11 @@ struct ionic_lif {
u16 rx_mode;
u64 hw_features;
bool registered;
- bool mc_overflow;
- bool uc_overflow;
u16 lif_type;
unsigned int nmcast;
unsigned int nucast;
+ unsigned int nvlans;
+ unsigned int max_vlans;
char name[IONIC_LIF_NAME_MAX_SZ];
union ionic_lif_identity *identity;
@@ -350,37 +338,4 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
void ionic_lif_rx_mode(struct ionic_lif *lif);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
-
-static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
-{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
- u8 num_sg_elems;
-
- q->dbell_count += dbell;
-
- num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
- if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
- num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
-
- q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
-}
-
-static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
- unsigned int work_done)
-{
- qcq->napi_stats.poll_count++;
-
- if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1))
- work_done = IONIC_MAX_NUM_NAPI_CNTR - 1;
-
- qcq->napi_stats.work_done_cntr[work_done]++;
-}
-
-#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
-#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
-#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
- debug_stats_napi_poll(qcq, work_done)
-
#endif /* _IONIC_LIF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 6f07bf509efe..875f4ec42efe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>
+#include <linux/ctype.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -211,24 +212,28 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
}
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err)
+{
+ netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(status), err);
+}
+
static int ionic_adminq_check_err(struct ionic_lif *lif,
struct ionic_admin_ctx *ctx,
- bool timeout)
+ const bool timeout,
+ const bool do_msg)
{
- struct net_device *netdev = lif->netdev;
- const char *opcode_str;
- const char *status_str;
int err = 0;
if (ctx->comp.comp.status || timeout) {
- opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
- status_str = ionic_error_to_str(ctx->comp.comp.status);
err = timeout ? -ETIMEDOUT :
ionic_error_to_errno(ctx->comp.comp.status);
- netdev_err(netdev, "%s (%d) failed: %s (%d)\n",
- opcode_str, ctx->cmd.cmd.opcode,
- timeout ? "TIMEOUT" : status_str, err);
+ if (do_msg)
+ ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode,
+ ctx->comp.comp.status, err);
if (timeout)
ionic_adminq_flush(lif);
@@ -297,24 +302,52 @@ err_out:
return err;
}
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err)
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg)
{
struct net_device *netdev = lif->netdev;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
unsigned long remaining;
const char *name;
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+
if (err) {
- if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
- }
return err;
}
- remaining = wait_for_completion_timeout(&ctx->work,
- HZ * (ulong)DEVCMD_TIMEOUT);
- return ionic_adminq_check_err(lif, ctx, (remaining == 0));
+ time_start = jiffies;
+ time_limit = time_start + HZ * (ulong)DEVCMD_TIMEOUT;
+ do {
+ remaining = wait_for_completion_timeout(&ctx->work,
+ IONIC_ADMINQ_TIME_SLICE);
+
+ /* check for done */
+ if (remaining)
+ break;
+
+ /* interrupt the wait if FW stopped */
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ if (do_msg)
+ netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ return -ENXIO;
+ }
+
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+
+ dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ return ionic_adminq_check_err(lif, ctx,
+ time_after_eq(time_done, time_limit),
+ do_msg);
}
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
@@ -323,7 +356,16 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
err = ionic_adminq_post(lif, ctx);
- return ionic_adminq_wait(lif, ctx, err);
+ return ionic_adminq_wait(lif, ctx, err, true);
+}
+
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ int err;
+
+ err = ionic_adminq_post(lif, ctx);
+
+ return ionic_adminq_wait(lif, ctx, err, false);
}
static void ionic_dev_cmd_clean(struct ionic *ionic)
@@ -450,13 +492,23 @@ int ionic_identify(struct ionic *ionic)
}
mutex_unlock(&ionic->dev_cmd_lock);
- dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version);
-
if (err) {
- dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
+ dev_err(ionic->dev, "Cannot identify ionic: %d\n", err);
goto err_out;
}
+ if (isprint(idev->dev_info.fw_version[0]) &&
+ isascii(idev->dev_info.fw_version[0]))
+ dev_info(ionic->dev, "FW: %.*s\n",
+ (int)(sizeof(idev->dev_info.fw_version) - 1),
+ idev->dev_info.fw_version);
+ else
+ dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)idev->dev_info.fw_version[0],
+ (u8)idev->dev_info.fw_version[1],
+ (u8)idev->dev_info.fw_version[2],
+ (u8)idev->dev_info.fw_version[3]);
+
err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
&ionic->ident.lif);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index eed2db69d708..887046838b3b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -348,7 +348,7 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
@@ -373,7 +373,7 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_settime64(struct ptp_clock_info *info,
@@ -402,7 +402,7 @@ static int ionic_phc_settime64(struct ptp_clock_info *info,
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_gettimex64(struct ptp_clock_info *info,
@@ -459,7 +459,7 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info)
spin_unlock_irqrestore(&phc->lock, irqflags);
- ionic_adminq_wait(phc->lif, &ctx, err);
+ ionic_adminq_wait(phc->lif, &ctx, err, true);
return phc->aux_work_delay;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 69728f9013cb..f6e785f949f9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -239,6 +239,21 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
return NULL;
}
+static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
+ case IONIC_RX_FILTER_MATCH_MAC:
+ return ionic_rx_filter_by_addr(lif, ac->mac.addr);
+ default:
+ netdev_err(lif->netdev, "unsupported filter match %d",
+ le16_to_cpu(ac->match));
+ return NULL;
+ }
+}
+
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
{
struct ionic_rx_filter *f;
@@ -286,6 +301,228 @@ int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
return 0;
}
+static int ionic_lif_filter_add(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ };
+ struct ionic_rx_filter *f;
+ int nfilters;
+ int err = 0;
+
+ ctx.cmd.rx_filter_add = *ac;
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f) {
+ /* don't bother if we already have it and it is sync'd */
+ if (f->state == IONIC_FILTER_STATE_SYNCED) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return 0;
+ }
+
+ /* mark preemptively as sync'd to block any parallel attempts */
+ f->state = IONIC_FILTER_STATE_SYNCED;
+ } else {
+ /* save as SYNCED to catch any DEL requests while processing */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+ if (err)
+ return err;
+
+ /* Don't bother with the write to FW if we know there's no room,
+ * we can try again on the next sync attempt.
+ * Since the FW doesn't have a way to tell us the vlan limit,
+ * we start max_vlans at 0 until we hit the ENOSPC error.
+ */
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
+ __func__, ctx.cmd.rx_filter_add.vlan.vlan);
+ if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
+ err = -ENOSPC;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
+ __func__, ctx.cmd.rx_filter_add.mac.addr);
+ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
+ if ((lif->nucast + lif->nmcast) >= nfilters)
+ err = -ENOSPC;
+ break;
+ }
+
+ if (err != -ENOSPC)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ if (err && err != -EEXIST) {
+ /* set the state back to NEW so we can try again later */
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
+ f->state = IONIC_FILTER_STATE_NEW;
+
+ /* If -ENOSPC we won't waste time trying to sync again
+ * until there is a delete that might make room
+ */
+ if (err != -ENOSPC)
+ set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (err == -ENOSPC) {
+ if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+ return 0;
+ }
+
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
+ ctx.cmd.rx_filter_add.vlan.vlan);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
+ ctx.cmd.rx_filter_add.mac.addr);
+ break;
+ }
+
+ return err;
+ }
+
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ lif->nvlans++;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
+ lif->nmcast++;
+ else
+ lif->nucast++;
+ break;
+ }
+
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_OLD) {
+ /* Someone requested a delete while we were adding
+ * so update the filter info with the results from the add
+ * and the data will be there for the delete on the next
+ * sync cycle.
+ */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_OLD);
+ } else {
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return err;
+}
+
+int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+static int ionic_lif_filter_del(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+ int state;
+ int err;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, ac);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return -ENOENT;
+ }
+
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
+ __func__, ac->vlan.vlan, f->filter_id);
+ lif->nvlans--;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
+ __func__, ac->mac.addr, f->filter_id);
+ if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
+ lif->nmcast--;
+ else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
+ lif->nucast--;
+ break;
+ }
+
+ state = f->state;
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
+ ionic_rx_filter_free(lif, f);
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (state != IONIC_FILTER_STATE_NEW) {
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err && err != -EEXIST)
+ return err;
+ }
+
+ return 0;
+}
+
+int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
struct sync_item {
struct list_head list;
struct ionic_rx_filter f;
@@ -340,14 +577,14 @@ loop_out:
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
- (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
- (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index a66e35f0833b..87b2666f248b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -44,5 +44,7 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
void ionic_rx_filter_sync(struct ionic_lif *lif);
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode);
int ionic_rx_filters_need_sync(struct ionic_lif *lif);
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid);
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid);
#endif /* _IONIC_RX_FILTER_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index c14de5fcedea..fd6806b4a1b9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,33 +151,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
- IONIC_TX_Q_STAT_DESC(stop),
- IONIC_TX_Q_STAT_DESC(wake),
- IONIC_TX_Q_STAT_DESC(drop),
- IONIC_TX_Q_STAT_DESC(dbell_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = {
- IONIC_CQ_STAT_DESC(compl_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = {
- IONIC_INTR_STAT_DESC(rearm_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
- IONIC_NAPI_STAT_DESC(poll_count),
-};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
-#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
-#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc)
-#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc)
-#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc)
#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
@@ -253,21 +231,6 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
total += tx_queues * IONIC_NUM_TX_STATS;
total += rx_queues * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- /* tx debug stats */
- total += tx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_TX_Q_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_MAX_NUM_SG_CNTR);
-
- /* rx debug stats */
- total += rx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_NUM_DBG_NAPI_STATS +
- IONIC_MAX_NUM_NAPI_CNTR);
- }
-
return total;
}
@@ -279,22 +242,6 @@ static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_TX_STATS; i++)
ethtool_sprintf(buf, "tx_%d_%s", q_num,
ionic_tx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_%s", q_num,
- ionic_txq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++)
- ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i);
}
static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
@@ -305,22 +252,6 @@ static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_RX_STATS; i++)
ethtool_sprintf(buf, "rx_%d_%s", q_num,
ionic_rx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num,
- ionic_dbg_napi_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i);
}
static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
@@ -350,7 +281,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_tx_stats *txstats;
- struct ionic_qcq *txqcq;
int i;
txstats = &lif->txqstats[q_num];
@@ -359,38 +289,12 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- txqcq = lif->txqcqs[q_num];
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->q,
- &ionic_txq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txstats->sg_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_rx_stats *rxstats;
- struct ionic_qcq *rxqcq;
int i;
rxstats = &lif->rxqstats[q_num];
@@ -399,31 +303,6 @@ static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- rxqcq = lif->rxqcqs[q_num];
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = rxqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 37c39581b659..94384f5d2a22 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -14,8 +14,6 @@
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q, ring_dbell);
-
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -23,8 +21,6 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
-
- DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -507,8 +503,6 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -546,8 +540,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -591,9 +583,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
- DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
-
return rx_work_done;
}
@@ -735,7 +724,6 @@ static void ionic_tx_clean(struct ionic_queue *q,
} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
netif_wake_subqueue(q->lif->netdev, qi);
- q->wake++;
}
desc_info->bytes = skb->len;
@@ -1174,7 +1162,6 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
if (unlikely(!ionic_q_has_space(q, ndescs))) {
netif_stop_subqueue(q->lif->netdev, q->index);
- q->stop++;
stopped = 1;
/* Might race with ionic_tx_clean, check again */
@@ -1269,7 +1256,6 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
err_out_drop:
- q->stop++;
q->drop++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 344ea1143454..07dd3c3b1771 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -463,6 +463,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 addr[ETH_ALEN];
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
@@ -474,7 +475,8 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
p = (unsigned char *)&mac_addr;
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = *(p + 5 - i);
+ addr[i] = *(p + 5 - i);
+ eth_hw_addr_set(netdev, addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -500,7 +502,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p)
}
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
adapter->macaddr_set(adapter, addr->sa_data);
if (netif_running(netdev)) {
@@ -842,7 +844,7 @@ netxen_check_options(struct netxen_adapter *adapter)
adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
/* Get FW Mini Coredump template and store it */
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (adapter->mdump.md_template == NULL ||
adapter->fw_version > prev_fw_version) {
kfree(adapter->mdump.md_template);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d58e021614cd..d613095b78e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,6 +23,8 @@
#include <linux/qed/qed_if.h>
#include "qed_debug.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
+#include "qed_mfw_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
@@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
- ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
~((1 << (p_hwfn->cdev->cache_shift)) - 1))
-#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
- (val == (cond1) ? true1 : \
- (val == (cond2) ? true2 : def))
+ ((val) == (cond1) ? true1 : \
+ ((val) == (cond2) ? true2 : def))
/* forward */
struct qed_ptt_pool;
@@ -510,7 +512,7 @@ enum qed_hsi_def_type {
struct qed_simd_fp_handler {
void *token;
- void (*func)(void *);
+ void (*func)(void *cookie);
};
enum qed_slowpath_wq_flag {
@@ -703,8 +705,6 @@ struct qed_dev {
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
-#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5)
u16 vendor_id;
@@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
#define NUM_OF_BTB_BLOCKS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
-
/**
- * @brief qed_concrete_to_sw_fid - get the sw function id from
- * the concrete value.
+ * qed_concrete_to_sw_fid(): Get the sw function id from
+ * the concrete value.
*
- * @param concrete_fid
+ * @cdev: Qed dev pointer.
+ * @concrete_fid: Concrete fid.
*
- * @return inline u8
+ * Return: inline u8.
*/
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
@@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
}
#define PKT_LB_TC 9
-#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
@@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
-#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
@@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_LLT (BIT(7))
#define PQ_FLAGS_MTC (BIT(8))
-/* physical queue index for cm context intialization */
+/* physical queue index for cm context initialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
@@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+#define GET_GTT_REG_ADDR(__base, __offset, __idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx)))
+
+#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
+
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
-#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
- (cdev->regview) + \
- (offset))
+#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
+ ((cdev)->regview) + \
+ (offset)))
#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
@@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
#define DOORBELL(cdev, db_addr, val) \
writel((u32)val, (void __iomem *)((u8 __iomem *)\
- (cdev->doorbells) + (db_addr)))
+ ((cdev)->doorbells) + (db_addr)))
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
qed_device_num_ports((_p_hwfn)->cdev))
@@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_clear_all_filters(struct qed_dev *cdev);
+unsigned long qed_get_epoch_time(void);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index cb0f2a3a1ac9..452494f8c298 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -54,22 +54,22 @@
/* connection context union */
union conn_context {
- struct e4_core_conn_context core_ctx;
- struct e4_eth_conn_context eth_ctx;
- struct e4_iscsi_conn_context iscsi_ctx;
- struct e4_fcoe_conn_context fcoe_ctx;
- struct e4_roce_conn_context roce_ctx;
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct fcoe_conn_context fcoe_ctx;
+ struct roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct e4_iscsi_task_context iscsi_ctx;
- struct e4_fcoe_task_context fcoe_ctx;
+ struct iscsi_task_context iscsi_ctx;
+ struct fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct e4_rdma_task_context roce_ctx;
+ struct rdma_task_context roce_ctx;
};
struct src_ent {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8adb7ed0c12d..168ce2c50385 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -28,24 +28,23 @@ struct qed_tid_mem {
};
/**
- * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
*
+ * @p_hwfn: HW device data.
+ * @p_info: In/out.
*
- * @param p_hwfn
- * @param p_info in/out
- *
- * @return int
+ * Return: Int.
*/
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
/**
- * @brief qed_cxt_get_tid_mem_info
+ * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
*
- * @param p_hwfn
- * @param p_info
+ * @p_hwfn: HW device data.
+ * @p_info: in/out.
*
- * @return int
+ * Return: int.
*/
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
@@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/**
- * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ * qed_cxt_set_pf_params(): Set the PF params for cxt init.
+ *
+ * @p_hwfn: HW device data.
+ * @rdma_tasks: Requested maximum.
*
- * @param p_hwfn
- * @param rdma_tasks - requested maximum
- * @return int
+ * Return: int.
*/
int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
- * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
*
- * @param p_hwfn
- * @param last_line
+ * @p_hwfn: HW device data.
+ * @last_line: Last_line.
*
- * @return int
+ * Return: Int
*/
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
/**
- * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
+ *
+ * @p_hwfn: HW device data.
+ * @used_lines: Used lines.
*
- * @param p_hwfn
- * @param used_lines
+ * Return: Int.
*/
u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
- * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_free
+ * qed_cxt_mngr_free() - Context manager free.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ * qed_cxt_mngr_setup(): Reset the acquired CIDs.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*/
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
- *
+ * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ * qed_qm_init_pf(): Initailze the QM PF phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @is_pf_loading: Is pf pending.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_pf_loading
+ * Return: Void.
*/
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading);
/**
- * @brief Reconfigures QM pf on the fly
+ * qed_qm_reconf(): Reconfigures QM pf on the fly.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_CXT_PF_CID (0xff)
/**
- * @brief qed_cxt_release - Release a cid
+ * qed_cxt_release_cid(): Release a cid.
*
- * @param p_hwfn
- * @param cid
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ *
+ * Return: Void.
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
/**
- * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
+ *
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @param p_hwfn
- * @param cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * Return: Void.
*/
void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
/**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid);
/**
- * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
- * for a vf-queue
+ * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
+ * for a vf-queue.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @return int
+ * Return: Int.
*/
int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid, u8 vfid);
@@ -334,7 +346,10 @@ struct qed_cxt_mngr {
/* Maximal number of L2 steering filters */
u32 arfs_count;
- u8 task_type_id;
+ u16 iscsi_task_pages;
+ u16 fcoe_task_pages;
+ u16 roce_task_pages;
+ u16 eth_task_pages;
u16 task_ctx_size;
u16 conn_ctx_size;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
new file mode 100644
index 000000000000..9d5a0c9e1ca0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+#ifndef _QED_DBG_HSI_H
+#define _QED_DBG_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_MSTAT,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
+ BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
+ MAX_BLOCK_ID
+};
+
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ u16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ u16 names_offset;
+ u16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ u16 regs_offset;
+
+};
+
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+/* Attention register result */
+struct dbg_attn_reg_result {
+ u32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val;
+ u32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ u16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* Mode header */
+struct dbg_mode_hdr {
+ u16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ u16 block_attn_offset;
+ u32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+ u32 sts_clr_address;
+ u32 mask_address;
+};
+
+/* Attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
+};
+
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
+ u8 has_latency_events;
+ u16 names_offset;
+};
+
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
+/* Block Debug line data */
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
+/* Condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* Memory data for registers dump */
+struct dbg_dump_mem {
+ u32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ u32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+/* Register data for registers dump */
+struct dbg_dump_reg {
+ u32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* Split header for registers dump */
+struct dbg_dump_split_hdr {
+ u32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* Condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ u32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ u16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ u32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ u16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ u16 rule_id; /* Failing rule index */
+ u16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ u16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ u16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ u16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ u16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ u32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* Idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
+ u8 line_num;
+ u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
+};
+
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus constraint operation types */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/* Debug Bus trigger state data */
+struct dbg_bus_trigger_state_data {
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ u32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 enabled;
+ u8 mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ u32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ u32 app_version;
+ u8 state;
+ u8 mode_256b_en;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
+ u8 trigger_en;
+ u8 filter_constraint_dword_mask;
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
+ u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[132];
+ struct dbg_bus_storm_data storms[6];
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug Bus Storm modes */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG,
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ u8 params_initialized;
+ u8 reserved1;
+ u16 reserved2;
+ u32 param_val[48];
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_DUMP_DORQ,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_RESERVD1,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ DBG_GRC_PARAM_CRASH,
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_RESERVED0,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_RESERVED1,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
+ MAX_DBG_STATUS
+};
+
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ u32 buf_size;
+ u8 buf_size_set;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+struct pretend_params {
+ u8 split_type;
+ u8 reserved;
+ u16 split_id;
+};
+
+/* Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[132];
+ u8 chip_id;
+ u8 hw_type;
+ u8 num_ports;
+ u8 num_pfs_per_port;
+ u8 num_vfs;
+ u8 initialized;
+ u8 use_dmae;
+ u8 reserved;
+ struct pretend_params pretend;
+ u32 num_regs_read;
+};
+
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: A pointer to the binary data with debug arrays.
+ *
+ * Return: enum dbg status.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_read_regs(): Reads registers into a buffer (using GRC).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf: Destination buffer.
+ * @addr: Source GRC address in dwords.
+ * @len: Number of registers to read.
+ *
+ * Return: Void.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
+ * qed_read_fw_info(): Reads FW info from the chip.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_info: (Out) a pointer to write the FW info into.
+ *
+ * Return: True if the FW info was read successfully from one of the Storms,
+ * or false if all Storms are in reset.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+ */
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * qed_dbg_grc_config(): Sets the value of a GRC parameter.
+ *
+ * @p_hwfn: HW device data.
+ * @grc_param: GRC parameter.
+ * @val: Value to set.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - Grc_param is invalid.
+ * - Val is outside the allowed boundaries.
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val);
+
+/**
+ * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their
+ * default value.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
+ * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the collected GRC data into.
+ * @buf_size_in_dwords:Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified dump buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size
+ * for idle check results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_idle_chk_dump: Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the idle check data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the mcp trace data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * - The trace meta data cannot be read (from NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the reg fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the IGU fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * - The specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_protection_override_get_dump_buf_size(): Returns the required
+ * buffer size for protection override window results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for protection
+ * override data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_protection_override_dump(): Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the protection override data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * @return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the FW Asserts data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_read_attn(): Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @block: Block ID.
+ * @attn_type: Attention type.
+ * @clear_status: Indicates if the attention status should be cleared.
+ * @results: (OUT) Pointer to write the read results into.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
+/**
+ * qed_dbg_print_attn(): Prints attention registers values in the
+ * specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
+ char *format_str;
+};
+
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_NAME_LEN 16
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: a pointer to the binary data with debug arrays.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_dbg_alloc_user_data(): Allocates user debug data.
+ *
+ * @p_hwfn: HW device data.
+ * @user_data_ptr: (OUT) a pointer to the allocated memory.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
+
+/**
+ * qed_dbg_get_status_str(): Returns a string for the specified status.
+ *
+ * @status: A debug status code.
+ *
+ * Return: A string for the specified status.
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+
+/**
+ * qed_get_idle_chk_results_buf_size(): Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * qed_print_idle_chk_results(): Prints idle check results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the idle check results.
+ * @num_errors: (OUT) number of errors found in idle check.
+ * @num_warnings: (OUT) number of warnings found in idle check.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+
+/**
+ * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @meta_buf: Meta buffer.
+ *
+ * Return: Void.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ */
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
+
+/**
+ * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP Trace dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Rrror if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_mcp_trace_results(): Prints MCP Trace results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_dwords: Member of dwords that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MVP trace dump buffer, starting from the header.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_bytes: Number of bytes that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf);
+
+/**
+ * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_reg_fifo_results(): Prints reg fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_igu_fifo_results(): Prints IGU fifo results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the IGU fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_protection_override_results_buf_size(): Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_protection_override_results(): Prints protection override
+ * results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_fw_asserts_results(): Prints FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer, starting from the header.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the FW Asserts results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_dbg_parse_attn(): Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e1798925b444..ea839e605577 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data {
extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
#ifdef CONFIG_DCB
-int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params);
-int qed_dcbx_config_params(struct qed_hwfn *,
- struct qed_ptt *, struct qed_dcbx_set *, bool);
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit);
#endif
/* QED local interface routines */
int
-qed_dcbx_mib_update_event(struct qed_hwfn *,
- struct qed_ptt *, enum qed_mib_read_type);
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 6ab3e60d4928..e3edca187ddf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/module.h>
@@ -10,6 +10,7 @@
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm)
return (r[0] & ~r[1]) != imm[0];
}
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] | imm[0]) != imm[1];
+}
+
static u32 cond1(const u32 *r, const u32 *imm)
{
return r[0] != imm[0];
@@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
+ cond14,
};
#define NUM_PHYS_BLOCKS 84
@@ -208,10 +215,61 @@ enum dbg_bus_frame_modes {
DBG_BUS_NUM_FRAME_MODES
};
+/* Debug bus SEMI frame modes */
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */
+ DBG_BUS_SEMI_NUM_FRAME_MODES
+};
+
+/* Debug bus filter types */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */
+ DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */
+ DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */
+ DBG_BUS_FILTER_TYPE_ON /* Filter always on */
+};
+
+/* Debug bus pre-trigger recording types */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */
+};
+
+/* Debug bus post-trigger recording types */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */
+ DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */
+};
+
+/* Debug bus other engine mode */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
+};
+
+/* DBG block Framing mode definitions */
+struct framing_mode_defs {
+ u8 id;
+ u8 blocks_dword_mask;
+ u8 storms_dword_mask;
+ u8 semi_framing_mode_id;
+ u8 full_buf_thr;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u8 dwords_per_cycle;
+ u8 num_framing_modes;
u32 num_ilt_pages;
+ struct framing_mode_defs *framing_modes;
};
/* HW type constant definitions */
@@ -334,7 +392,7 @@ struct split_type_defs {
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
#define FIELD_DWORD_OFFSET(type, field) \
- (int)(FIELD_BIT_OFFSET(type, field) / 32)
+ ((int)(FIELD_BIT_OFFSET(type, field) / 32))
#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
@@ -431,11 +489,13 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 9
+#define NUM_COMMON_GLOBAL_PARAMS 11
#define MAX_RECURSION_DEPTH 10
+#define FW_IMG_KUKU 0
#define FW_IMG_MAIN 1
+#define FW_IMG_L2B 2
#define REG_FIFO_ELEMENT_DWORDS 2
#define REG_FIFO_DEPTH_ELEMENTS 32
@@ -464,10 +524,25 @@ struct split_type_defs {
/***************************** Constant Arrays *******************************/
+/* DBG block framing mode definitions, in descending preference order */
+static struct framing_mode_defs s_framing_mode_defs[4] = {
+ {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
+ DBG_BUS_SEMI_FRAME_MODE_4FAST,
+ 10},
+ {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
+ 10},
+ {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
+ {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
+};
+
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
- {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
+ {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
+ s_framing_mode_defs},
+ {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
+ s_framing_mode_defs}
};
/* Storm constant definitions array */
@@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
TCM_REG_CTX_RBC_ACCS,
{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
@@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2,
- MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2,
- MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_DBG_FRAME_MODE,
+ MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE,
+ MSEM_REG_DBG_MODE1_CFG,
MSEM_REG_SYNC_DBG_EMPTY,
MSEM_REG_DBG_GPRE_VECT,
MCM_REG_CTX_RBC_ACCS,
@@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2,
- USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2,
- USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_DBG_FRAME_MODE,
+ USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE,
+ USEM_REG_DBG_MODE1_CFG,
USEM_REG_SYNC_DBG_EMPTY,
USEM_REG_DBG_GPRE_VECT,
UCM_REG_CTX_RBC_ACCS,
@@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2,
- XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2,
- XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_DBG_FRAME_MODE,
+ XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE,
+ XSEM_REG_DBG_MODE1_CFG,
XSEM_REG_SYNC_DBG_EMPTY,
XSEM_REG_DBG_GPRE_VECT,
XCM_REG_CTX_RBC_ACCS,
@@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2,
- YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2,
- YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_DBG_FRAME_MODE,
+ YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE,
+ YSEM_REG_DBG_MODE1_CFG,
YSEM_REG_SYNC_DBG_EMPTY,
YSEM_REG_DBG_GPRE_VECT,
YCM_REG_CTX_RBC_ACCS,
@@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2,
- PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2,
- PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_DBG_FRAME_MODE,
+ PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE,
+ PSEM_REG_DBG_MODE1_CFG,
PSEM_REG_SYNC_DBG_EMPTY,
PSEM_REG_DBG_GPRE_VECT,
PCM_REG_CTX_RBC_ACCS,
@@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = {
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
- {"reserved3", 0, 0, 0}
+ {"reserved3", 0, 0, 0},
+ {"reserved4", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
@@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = {
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
- {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
static struct split_type_defs s_split_type_defs[] = {
@@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = {
{"vf"}
};
+/******************************** Variables **********************************/
+
+/* The version of the calling app */
+static u32 s_app_ver;
+
/**************************** Private Functions ******************************/
+static void qed_static_asserts(void)
+{
+}
+
/* Reads and returns a single dword from the specified unaligned buffer */
static u32 qed_read_unaligned_dword(u8 *buf)
{
@@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
if (dev_data->initialized)
return DBG_STATUS_OK;
+ if (!s_app_ver)
+ return DBG_STATUS_APP_VERSION_NOT_SET;
+
/* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
@@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
- /* qed_rq() fetches data in CPU byteorder. Swap it back to
- * the device's to get right structure layout.
- */
- cpu_to_le32_array(dest, size);
-
/* Read FW version info from Storm RAM */
size = le32_to_cpu(fw_info_location.size);
if (!size || size > sizeof(*fw_info))
@@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
-
- cpu_to_le32_array(dest, size);
}
/* Dumps the specified string to the specified buffer.
@@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid FW version string\n");
switch (fw_info.ver.image_id) {
+ case FW_IMG_KUKU:
+ strcpy(fw_img_str, "kuku");
+ break;
case FW_IMG_MAIN:
strcpy(fw_img_str, "main");
break;
+ case FW_IMG_L2B:
+ strcpy(fw_img_str, "l2b");
+ break;
default:
strcpy(fw_img_str, "unknown");
break;
@@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
s_hw_type_defs[dev_data->hw_type].name);
offset += qed_dump_num_param(dump_buf + offset,
dump, "pci-func", p_hwfn->abs_pf_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "epoch", qed_get_epoch_time());
if (dev_data->chip_id == CHIP_BB)
offset += qed_dump_num_param(dump_buf + offset,
dump, "path", QED_PATH_ID(p_hwfn));
@@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
continue;
reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0_BB_K2;
+ SEM_FAST_REG_STALL_0;
qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
@@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
const struct dbg_attn_reg *attn_reg_arr;
+ u32 block_id, sts_clr_address;
u8 reg_idx, num_attn_regs;
- u32 block_id;
for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
@@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ sts_clr_address = reg_data->sts_clr_address;
/* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
qed_rd(p_hwfn, p_ptt,
- DWORDS_TO_BYTES(reg_data->
- sts_clr_address));
+ DWORDS_TO_BYTES(sts_clr_address));
}
}
}
+/* Finds the meta data image in NVRAM */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+ int nvm_result;
+
+ /* Call NVRAM get file command */
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att, false);
+
+ /* Check response */
+ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
+ return DBG_STATUS_OK;
+}
+
+/* Reads data from NVRAM */
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
+ s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0, param = 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
+ (u32 *)((u8 *)ret_buf + read_offset),
+ false))
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* the following parameters are dumped:
* - count: no. of dumped entries
@@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes);
-
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf);
-
/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
has_dbg_bus = GET_FIELD(block_per_chip->flags,
DBG_BLOCK_CHIP_HAS_DBG_BUS);
- /* read+clear for NWS parity is not working, skip NWS block */
- if (block_id == BLOCK_NWS)
- continue;
-
if (!is_removed && has_dbg_bus &&
GET_FIELD(block_per_chip->dbg_bus_mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0) {
@@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 dwords_read, offset = 0;
bool parities_masked = false;
+ u32 dwords_read, offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
*/
static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *
- dump_buf,
+ u32 *dump_buf,
bool dump,
u16 rule_id,
const struct dbg_idle_chk_rule *rule,
@@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Finds the meta data image in NVRAM */
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
- struct mcp_file_att file_att;
- int nvm_result;
-
- /* Call NVRAM get file command */
- nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att);
-
- /* Check response */
- if (nvm_result ||
- (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
-
- /* Update return values */
- *nvram_offset_bytes = file_att.nvm_start_addr;
- *nvram_size_bytes = file_att.len;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
- image_type, *nvram_offset_bytes, *nvram_size_bytes);
-
- /* Check alignment */
- if (*nvram_size_bytes & 0x3)
- return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
-
- return DBG_STATUS_OK;
-}
-
-/* Reads data from NVRAM */
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
- s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0, param = 0;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "nvram_read: reading image of size %d bytes from NVRAM\n",
- nvram_size_bytes);
-
- do {
- bytes_to_copy =
- (bytes_left >
- MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
-
- /* Call NVRAM read command */
- SET_MFW_FIELD(param,
- DRV_MB_PARAM_NVM_OFFSET,
- nvram_offset_bytes + read_offset);
- SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM, param,
- &ret_mcp_resp,
- &ret_mcp_param, &ret_read_size,
- (u32 *)((u8 *)ret_buf + read_offset)))
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Update read offset */
- read_offset += ret_read_size;
- bytes_left -= ret_read_size;
- } while (bytes_left > 0);
-
- return DBG_STATUS_OK;
-}
-
/* Get info on the MCP Trace data in the scratchpad:
* - trace_data_grc_addr (OUT): trace data GRC address in bytes
* - trace_data_size (OUT): trace data size in bytes (without the header)
@@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Dumps the specified ILT pages to the specified buffer.
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
- bool dump,
- u32 start_page_id,
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
+ bool *dump, u32 start_page_id,
u32 num_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids, u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
- u32 page_id, end_page_id, offset = 0;
+ u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
+ u32 page_id, end_page_id, offset = *given_offset;
+ struct phys_mem_desc *mem_desc = NULL;
+ bool continue_dump = *dump;
+ u32 partial_page_size = 0;
if (num_pages == 0)
return offset;
@@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
end_page_id = start_page_id + num_pages - 1;
for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
- struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
-
- /**
- *
- * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
- * break;
- */
-
+ mem_desc = &ilt_pages[page_id];
if (!ilt_pages[page_id].virt_addr)
continue;
if (dump_page_ids) {
- /* Copy page ID to dump buffer */
- if (dump)
+ /* Copy page ID to dump buffer
+ * (if dump is needed and buffer is not full)
+ */
+ if ((continue_dump) &&
+ (offset + 1 > buf_size_in_dwords)) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ if (continue_dump)
*(dump_buf + offset) = page_id;
offset++;
} else {
/* Copy page memory to dump buffer */
- if (dump)
+ if ((continue_dump) &&
+ (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords)) {
+ if (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords) {
+ partial_page_size =
+ buf_size_in_dwords - offset;
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr,
+ partial_page_size);
+ continue_dump = false;
+ actual_dump_size_in_dwords =
+ offset + partial_page_size;
+ }
+ }
+
+ if (continue_dump)
memcpy(dump_buf + offset,
mem_desc->virt_addr, mem_desc->size);
offset += BYTES_TO_DWORDS(mem_desc->size);
}
}
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
+
return offset;
}
@@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
*/
static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- bool dump,
+ u32 *given_offset,
+ bool *dump,
u32 valid_conn_pf_pages,
u32 valid_conn_vf_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids,
+ u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 pf_start_line, start_page_id, offset = 0;
+ u32 pf_start_line, start_page_id, offset = *given_offset;
u32 cdut_pf_init_pages, cdut_vf_init_pages;
u32 cdut_pf_work_pages, cdut_vf_work_pages;
u32 base_data_offset, size_param_offset;
+ u32 src_pages;
+ u32 section_header_and_param_size;
u32 cdut_pf_pages, cdut_vf_pages;
+ u32 actual_dump_size_in_dwords;
+ bool continue_dump = *dump;
+ bool update_size = *dump;
const char *section_name;
- u8 i;
+ u32 i;
+ actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
@@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+ section_header_and_param_size = qed_dump_section_hdr(NULL,
+ false,
+ section_name,
+ 1) +
+ qed_dump_num_param(NULL, false, "size", 0);
+
+ if ((continue_dump) &&
+ (offset + section_header_and_param_size > buf_size_in_dwords)) {
+ continue_dump = false;
+ update_size = false;
+ actual_dump_size_in_dwords = offset;
+ }
- offset +=
- qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ continue_dump, section_name, 1);
/* Dump size parameter (0 for now, overwritten with real size later) */
size_param_offset = offset;
- offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ continue_dump, "size", 0);
base_data_offset = offset;
/* CDUC pages are ordered as follows:
@@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
/* Dump connection PF pages */
start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_pf_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, valid_conn_pf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump connection VF pages */
start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_vf_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* CDUT pages are ordered as follows:
@@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
/* Dump task PF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_init_pages - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_pf_work_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, cdut_pf_work_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump task VF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += cdut_vf_pages)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_vf_work_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ cdut_vf_work_pages, ilt_pages,
+ dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /*Dump Searcher pages */
+ if (clients[ILT_CLI_SRC].active) {
+ start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
+ src_pages = clients[ILT_CLI_SRC].last.val -
+ clients[ILT_CLI_SRC].first.val + 1;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, src_pages, ilt_pages,
+ dump_page_ids, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* Overwrite size param */
- if (dump)
- qed_dump_num_param(dump_buf + size_param_offset,
- dump, "size", offset - base_data_offset);
+ if (update_size) {
+ u32 section_size = (*dump == continue_dump) ?
+ offset - base_data_offset :
+ actual_dump_size_in_dwords - base_data_offset;
+ if (section_size > 0)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ *dump, "size", section_size);
+ else if ((section_size == 0) && (*dump != continue_dump))
+ actual_dump_size_in_dwords -=
+ section_header_and_param_size;
+ }
+
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
return offset;
}
-/* Performs ILT Dump to the specified buffer.
+/* Dumps a section containing the global parameters.
+ * Part of ilt dump process
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+static u32
+qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 cduc_page_size,
+ u32 conn_ctx_size,
+ u32 cdut_page_size,
+ u32 *full_dump_size_param_offset,
+ u32 *actual_dump_size_param_offset)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
- u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
- u32 num_cids_per_page, conn_ctx_size;
- u32 cduc_page_size, cdut_page_size;
- struct phys_mem_desc *ilt_pages;
- u8 conn_type;
-
- cduc_page_size = 1 <<
- (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- cdut_page_size = 1 <<
- (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
- num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
- ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 offset = 0;
- /* Dump global params - 22 must match number of params below */
offset += qed_dump_common_global_params(p_hwfn, p_ptt,
- dump_buf + offset, dump, 22);
+ dump_buf + offset,
+ dump, 30);
offset += qed_dump_str_param(dump_buf + offset,
- dump, "dump-type", "ilt-dump");
+ dump,
+ "dump-type", "ilt-dump");
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cduc-page-size", cduc_page_size);
+ "cduc-page-size",
+ cduc_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-first-page-id",
@@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-pf-pages",
- clients
- [ILT_CLI_CDUC].pf_total_lines);
+ clients[ILT_CLI_CDUC].pf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-vf-pages",
- clients
- [ILT_CLI_CDUC].vf_total_lines);
+ clients[ILT_CLI_CDUC].vf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"max-conn-ctx-size",
conn_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cdut-page-size", cdut_page_size);
+ "cdut-page-size",
+ cdut_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cdut-first-page-id",
@@ -4711,19 +4852,16 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
p_hwfn->p_cxt_mngr->task_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "task-type-id",
- p_hwfn->p_cxt_mngr->task_type_id);
- offset += qed_dump_num_param(dump_buf + offset,
- dump,
"first-vf-id-in-pf",
p_hwfn->p_cxt_mngr->first_vf_in_pf);
- offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
- dump,
- "num-vfs-in-pf",
- p_hwfn->p_cxt_mngr->vf_count);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "ptr-size-bytes", sizeof(void *));
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes",
+ sizeof(void *));
offset += qed_dump_num_param(dump_buf + offset,
dump,
"pf-start-line",
@@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
dump,
"ilt-shadow-size",
p_hwfn->p_cxt_mngr->ilt_shadow_size);
+
+ *full_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "dump-size-full", 0);
+
+ *actual_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "dump-size-actual", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "iscsi_task_pages",
+ p_hwfn->p_cxt_mngr->iscsi_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fcoe_task_pages",
+ p_hwfn->p_cxt_mngr->fcoe_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "roce_task_pages",
+ p_hwfn->p_cxt_mngr->roce_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "eth_task_pages",
+ p_hwfn->p_cxt_mngr->eth_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-first-page-id",
+ clients[ILT_CLI_SRC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-last-page-id",
+ clients[ILT_CLI_SRC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-is-active",
+ clients[ILT_CLI_SRC].active);
+
/* Additional/Less parameters require matching of number in call to
* dump_common_global_params()
*/
- /* Dump section containing number of PF CIDs per connection type */
+ return offset;
+}
+
+/* Dump section containing number of PF CIDs per connection type.
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_pf_cids)
+{
+ u32 num_pf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "num_pf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_pf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_pf_cids =
- p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
-
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
if (dump)
*(dump_buf + offset) = num_pf_cids;
- valid_conn_pf_cids += num_pf_cids;
+ *valid_conn_pf_cids += num_pf_cids;
}
- /* Dump section containing number of VF CIDs per connection type */
- offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "num_vf_cids_per_conn_type", 1);
+ return offset;
+}
+
+/* Dump section containing number of VF CIDs per connection type
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_vf_cids)
+{
+ u32 num_vf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "num_vf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_vf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_vf_cids =
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_vf_cids =
p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
-
if (dump)
*(dump_buf + offset) = num_vf_cids;
- valid_conn_vf_cids += num_vf_cids;
+ *valid_conn_vf_cids += num_vf_cids;
+ }
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * buf_size_in_dwords - The dumped buffer size.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
+{
+#if ((!defined VMWARE) && (!defined UEFI))
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+#endif
+ u32 valid_conn_vf_cids = 0,
+ valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
+ u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ u32 actual_dump_size_in_dwords = 0;
+ struct phys_mem_desc *ilt_pages;
+ u32 actul_dump_off = 0;
+ u32 last_section_size;
+ u32 full_dump_off = 0;
+ u32 section_size = 0;
+ bool continue_dump;
+ u32 page_id;
+
+ last_section_size = qed_dump_last_section(NULL, 0, false);
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ continue_dump = dump;
+
+ /* if need to dump then save memory for the last section
+ * (last section calculates CRC of dumped data)
+ */
+ if (dump) {
+ if (buf_size_in_dwords >= last_section_size) {
+ buf_size_in_dwords -= last_section_size;
+ } else {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
}
- /* Dump section containing physical memory descs for each ILT page */
+ /* Dump global params */
+
+ /* if need to dump then first check that there is enough memory
+ * in dumped buffer for this section calculate the size of this
+ * section without dumping. if there is not enough memory - then
+ * stop the dumping.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ NULL,
+ false,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ continue_dump,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+
+ /* Dump section containing number of PF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_pf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_pf_cids);
+
+ /* Dump section containing number of VF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_vf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_vf_cids);
+
+ /* Dump section containing physical memory descriptors for each
+ * ILT page.
+ */
num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+
+ /* If need to dump then first check that there is enough memory
+ * in dumped buffer for the section header.
+ */
+ if (continue_dump) {
+ section_size = qed_dump_section_hdr(NULL,
+ false,
+ "ilt_page_desc",
+ 1) +
+ qed_dump_num_param(NULL,
+ false,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "ilt_page_desc", 1);
+ continue_dump, "ilt_page_desc", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump,
+ continue_dump,
"size",
num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
- /* Copy memory descriptors to dump buffer */
- if (dump) {
- u32 page_id;
-
+ /* Copy memory descriptors to dump buffer
+ * If need to dump then dump till the dump buffer size
+ */
+ if (continue_dump) {
for (page_id = 0; page_id < num_pages;
- page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
- memcpy(dump_buf + offset,
- &ilt_pages[page_id],
- DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
+ if (continue_dump &&
+ (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
+ buf_size_in_dwords)) {
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES
+ (PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ if (continue_dump) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+ }
} else {
offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
}
@@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
num_cids_per_page);
/* Dump ILT pages IDs */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, true);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, true, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump ILT pages memory */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, false);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, false, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ real_dumped_size =
+ (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
+ qed_dump_num_param(dump_buf + full_dump_off, dump,
+ "full-dump-size", offset + last_section_size);
+ qed_dump_num_param(dump_buf + actul_dump_off,
+ dump,
+ "actual-dump-size",
+ real_dumped_size + last_section_size);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ real_dumped_size += qed_dump_last_section(dump_buf,
+ real_dumped_size, dump);
- return offset;
+ return real_dumped_size;
}
/***************************** Public Functions *******************************/
@@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_set_app_ver(u32 ver)
+{
+ if (ver < TOOLS_VERSION)
+ return DBG_STATUS_UNSUPPORTED_APP_VERSION;
+
+ s_app_ver = ver;
+
+ return DBG_STATUS_OK;
+}
+
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info)
{
@@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_static_asserts();
+
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
@@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
if (status != DBG_STATUS_OK)
return status;
- *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
return DBG_STATUS_OK;
}
@@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
- enum dbg_status status;
-
- *num_dumped_dwords = 0;
-
- status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
- p_ptt,
- &needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
- return status;
-
- if (buf_size_in_dwords < needed_buf_size_in_dwords)
- return DBG_STATUS_DUMP_BUF_TOO_SMALL;
-
- *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn,
+ p_ptt,
+ dump_buf, buf_size_in_dwords, true);
/* Reveret GRC params to their default */
qed_dbg_grc_set_params_default(p_hwfn);
@@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = {
"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
- "When triggering on Storm data, the Storm to trigger on must be specified"
+ "When triggering on Storm data, the Storm to trigger on must be specified",
+
+ /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
+ "Failed to request MDUMP2 Offsize",
+
+ /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
+ "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
+
+ /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
+ "Invalid Signature found at start of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
+ "Invalid Log Size of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
+ "Invalid Log Header of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
+ "Invalid Log Data of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
+ "Could not extract number of ports from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
+ "Could not extract MFW (link) status from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
+ "Could not display linkdump of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
+ "Could not read PHY CFG of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
+ "Could not read PLL Mode of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
+ "Could not read TSCF/TSCE Lane Regs of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
+ "Could not allocate MDUMP2 reg-val internal buffer"
};
/* Idle check severity names array */
@@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN];
/**************************** Private Functions ******************************/
+static void qed_user_static_asserts(void)
+{
+}
+
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
return (a + b) % size;
@@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
/* Skip register names until the required reg_id is
* reached.
*/
- for (; reg_id > curr_reg_id;
- curr_reg_id++,
- parsing_str += strlen(parsing_str) + 1);
+ for (; reg_id > curr_reg_id; curr_reg_id++)
+ parsing_str += strlen(parsing_str) + 1;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 *num_errors,
u32 *num_warnings)
{
+ u32 num_section_params = 0, num_rules, num_rules_not_dumped;
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
- u32 num_section_params = 0, num_rules;
/* Offset in results_buf in bytes */
u32 results_offset = 0;
@@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_section_params,
results_buf, &results_offset);
- /* Read idle_chk section */
+ /* Read idle_chk section
+ * There may be 1 or 2 idle_chk section parameters:
+ * - 1st is "num_rules"
+ * - 2nd is "num_rules_not_dumped" (optional)
+ */
+
dump_buf += qed_read_section_hdr(dump_buf,
&section_name, &num_section_params);
- if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ if (strcmp(section_name, "idle_chk") ||
+ (num_section_params != 2 && num_section_params != 1))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &num_rules);
if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ if (num_section_params > 1) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &num_rules_not_dumped);
+ if (strcmp(param_name, "num_rules_not_dumped"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ } else {
+ num_rules_not_dumped = 0;
+ }
if (num_rules) {
u32 rules_print_size;
@@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nIdle Check completed successfully\n");
+ if (num_rules_not_dumped)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
+ num_rules_not_dumped);
+
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
@@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_user_static_asserts();
+
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
results_buf, &parsed_buf_size, true);
@@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++, bit_idx++) {
+ for (j = 0; j < num_reg_attn; j++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
@@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx)))
- continue;
+ if (reg_result->sts_val & BIT(bit_idx)) {
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
+ masked_str = reg_result->mask_val &
+ BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr =
+ GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr * 4, bit_idx, masked_str);
+ }
- /* An attention bit with value=1 was found
- * Find attention name
- */
- attn_name_offset =
- block_attn_name_offsets[attn_idx_val];
- attn_name = attn_name_base + attn_name_offset;
- attn_type_str =
- (attn_type ==
- ATTN_TYPE_INTERRUPT ? "Interrupt" :
- "Parity");
- masked_str = reg_result->mask_val & BIT(bit_idx) ?
- " [masked]" : "";
- sts_addr = GET_FIELD(reg_result->data,
- DBG_ATTN_REG_RESULT_STS_ADDRESS);
- DP_NOTICE(p_hwfn,
- "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
- block_name, attn_type_str, attn_name,
- sts_addr * 4, bit_idx, masked_str);
+ bit_idx++;
}
}
return DBG_STATUS_OK;
}
-static DEFINE_MUTEX(qed_dbg_lock);
-
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
&num_warnnings);
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
+#define MAX_PHY_RESULT_BUFFER 9000
+
+/******************************** Feature Meta data section ******************/
+
+#define GRC_NUM_STR_FUNCS 2
+#define IDLE_CHK_NUM_STR_FUNCS 1
+#define MCP_TRACE_NUM_STR_FUNCS 1
+#define REG_FIFO_NUM_STR_FUNCS 1
+#define IGU_FIFO_NUM_STR_FUNCS 1
+#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
+#define FW_ASSERTS_NUM_STR_FUNCS 1
+#define ILT_NUM_STR_FUNCS 1
+#define PHY_NUM_STR_FUNCS 20
+
/* Feature meta data lookup table */
static struct {
char *name;
+ u32 num_funcs;
enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *size);
enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
@@ -7411,40 +7865,46 @@ static struct {
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+ const struct qed_func_lookup *hsi_func_lookup;
} qed_features_lookup[] = {
{
- "grc", qed_dbg_grc_get_dump_buf_size,
- qed_dbg_grc_dump, NULL, NULL}, {
- "idle_chk",
+ "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL, NULL}, {
+ "idle_chk", IDLE_CHK_NUM_STR_FUNCS,
qed_dbg_idle_chk_get_dump_buf_size,
qed_dbg_idle_chk_dump,
qed_print_idle_chk_results_wrapper,
- qed_get_idle_chk_results_buf_size}, {
- "mcp_trace",
+ qed_get_idle_chk_results_buf_size,
+ NULL}, {
+ "mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
qed_dbg_mcp_trace_get_dump_buf_size,
qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
- qed_get_mcp_trace_results_buf_size}, {
- "reg_fifo",
+ qed_get_mcp_trace_results_buf_size,
+ NULL}, {
+ "reg_fifo", REG_FIFO_NUM_STR_FUNCS,
qed_dbg_reg_fifo_get_dump_buf_size,
qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
- qed_get_reg_fifo_results_buf_size}, {
- "igu_fifo",
+ qed_get_reg_fifo_results_buf_size,
+ NULL}, {
+ "igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
qed_dbg_igu_fifo_get_dump_buf_size,
qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
- qed_get_igu_fifo_results_buf_size}, {
- "protection_override",
+ qed_get_igu_fifo_results_buf_size,
+ NULL}, {
+ "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
qed_dbg_protection_override_get_dump_buf_size,
qed_dbg_protection_override_dump,
qed_print_protection_override_results,
- qed_get_protection_override_results_buf_size}, {
- "fw_asserts",
+ qed_get_protection_override_results_buf_size,
+ NULL}, {
+ "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size}, {
- "ilt",
- qed_dbg_ilt_get_dump_buf_size,
- qed_dbg_ilt_dump, NULL, NULL},};
+ qed_get_fw_asserts_results_buf_size,
+ NULL}, {
+ "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 text_size_bytes, null_char_pos, i;
+ u32 txt_size_bytes, null_char_pos, i;
+ u32 *dbuf, dwords;
enum dbg_status rc;
char *text_buf;
@@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
if (!qed_features_lookup[feature_idx].results_buf_size)
return DBG_STATUS_OK;
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = feature->dumped_dwords;
+
/* Obtain size of formatted output */
- rc = qed_features_lookup[feature_idx].
- results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, &text_size_bytes);
+ rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
+ dbuf,
+ dwords,
+ &txt_size_bytes);
if (rc != DBG_STATUS_OK)
return rc;
- /* Make sure that the allocated size is a multiple of dword (4 bytes) */
- null_char_pos = text_size_bytes - 1;
- text_size_bytes = (text_size_bytes + 3) & ~0x3;
+ /* Make sure that the allocated size is a multiple of dword
+ * (4 bytes).
+ */
+ null_char_pos = txt_size_bytes - 1;
+ txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
- if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"formatted size of feature was too small %d. Aborting\n",
- text_size_bytes);
+ txt_size_bytes);
return DBG_STATUS_INVALID_ARGS;
}
- /* Allocate temp text buf */
- text_buf = vzalloc(text_size_bytes);
- if (!text_buf)
+ /* allocate temp text buf */
+ text_buf = vzalloc(txt_size_bytes);
+ if (!text_buf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "failed to allocate text buffer. Aborting\n");
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
/* Decode feature opcodes to string on temp buf */
- rc = qed_features_lookup[feature_idx].
- print_results(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, text_buf);
+ rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
+ dbuf,
+ dwords,
+ text_buf);
if (rc != DBG_STATUS_OK) {
vfree(text_buf);
return rc;
@@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
* The bytes that were added as a result of the dword alignment are also
* padded with '\n' characters.
*/
- for (i = null_char_pos; i < text_size_bytes; i++)
+ for (i = null_char_pos; i < txt_size_bytes; i++)
text_buf[i] = '\n';
/* Dump printable feature to log */
if (p_hwfn->cdev->print_dbg_data)
- qed_dbg_print_feature(text_buf, text_size_bytes);
+ qed_dbg_print_feature(text_buf, txt_size_bytes);
- /* Just return the original binary buffer if requested */
+ /* Dump binary data as is to the output file */
if (p_hwfn->cdev->dbg_bin_dump) {
vfree(text_buf);
- return DBG_STATUS_OK;
+ return rc;
}
- /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
* and formatted text buffer.
*/
vfree(feature->dump_buf);
feature->dump_buf = text_buf;
- feature->buf_size = text_size_bytes;
- feature->dumped_dwords = text_size_bytes / 4;
+ feature->buf_size = txt_size_bytes;
+ feature->dumped_dwords = txt_size_bytes / 4;
+
return rc;
}
@@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 buf_size_dwords;
+ u32 buf_size_dwords, *dbuf, *dwords;
enum dbg_status rc;
DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
@@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
if (!feature->dump_buf)
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
- rc = qed_features_lookup[feature_idx].
- perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
- feature->buf_size / sizeof(u32),
- &feature->dumped_dwords);
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = &feature->dumped_dwords;
+ rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
+ dbuf,
+ feature->buf_size /
+ sizeof(u32),
+ dwords);
/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
- * In this case the buffer holds valid binary data, but we wont able
+ * In this case the buffer holds valid binary data, but we won't able
* to parse it (since parsing relies on data in NVRAM which is only
* accessible when MFW is responsive). skip the formatting but return
* success so that binary data is provided.
@@ -7777,7 +8252,8 @@ enum debug_print_features {
static u32 qed_calc_regdump_header(struct qed_dev *cdev,
enum debug_print_features feature,
- int engine, u32 feature_size, u8 omit_engine)
+ int engine, u32 feature_size,
+ u8 omit_engine, u8 dbg_bin_dump)
{
u32 res = 0;
@@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
feature, feature_size);
SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
- SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
@@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- int grc_params[MAX_DBG_GRC_PARAMS], i;
+ int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
u32 offset = 0, feature_size;
- int rc;
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
@@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ cdev->dbg_bin_dump = 1;
mutex_lock(&qed_dbg_lock);
- cdev->dbg_bin_dump = true;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, REG_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
@@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IGU_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
@@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev,
+ PROTECTION_OVERRIDE,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev,
@@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, FW_ASSERTS,
- cur_engine, feature_size,
- omit_engine);
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
@@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
feature_size = qed_dbg_ilt_size(cdev);
- if (!cdev->disable_ilt_dump &&
- feature_size < ILT_DUMP_MAX_SIZE) {
+ if (!cdev->disable_ilt_dump && feature_size <
+ ILT_DUMP_MAX_SIZE) {
rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
@@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
qed_calc_regdump_header(cdev, ILT_DUMP,
cur_engine,
feature_size,
- omit_engine);
- offset += feature_size + REGDUMP_HEADER_SIZE;
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
rc);
}
}
- /* GRC dump - must be last because when mcp stuck it will
+ /* Grc dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
@@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, GRC_DUMP,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
@@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
- /* Re-populate nvm attribute info */
- qed_mcp_nvm_info_free(p_hwfn);
- qed_mcp_nvm_info_populate(p_hwfn);
-
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
(u8 *)buffer + offset +
@@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
+ rc);
}
- /* nvm default */
+ /* nvm default */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, DEFAULT_CFG,
+ cur_engine, feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
- rc);
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ "QED_NVM_IMAGE_DEFAULT_CFG", rc);
}
/* nvm meta */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_META);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
+ rc);
}
/* nvm mdump */
@@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, MDUMP, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
}
- cdev->dbg_bin_dump = false;
mutex_unlock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = 0;
return 0;
}
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
u8 cur_engine, org_engine;
cdev->disable_ilt_dump = false;
@@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
"calculating idle_chk and grcdump register length for current engine\n");
qed_set_debug_engine(cdev, cur_engine);
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE +
- qed_dbg_protection_override_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
-
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
if (ilt_len < ILT_DUMP_MAX_SIZE) {
total_ilt_len += ilt_len;
@@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_set_debug_engine(cdev, org_engine);
/* Engine common */
- regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
@@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_features[feature];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8119,9 +8616,8 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
return qed_feature->buf_size;
}
+int qed_dbg_phy_size(struct qed_dev *cdev)
+{
+ /* return max size of phy info and
+ * phy mac_stat multiplied by the number of ports
+ */
+ return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
+}
+
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
return cdev->engine_for_debug;
@@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
const u8 *dbg_values = NULL;
int i;
+ /* Sync ver with debugbus qed code */
+ qed_dbg_set_app_ver(TOOLS_VERSION);
+
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e71af82d3200..b0d4b937cf4a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
-#ifndef _QED_DEBUGFS_H
-#define _QED_DEBUGFS_H
+#ifndef _QED_DEBUG_H
+#define _QED_DEBUG_H
enum qed_dbg_features {
DBG_FEATURE_GRC,
@@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_phy_size(struct qed_dev *cdev);
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
int qed_dbg_all_data_size(struct qed_dev *cdev);
u8 qed_get_debug_engine(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0410c3604abd..cc4ec2bb36db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -25,6 +25,7 @@
#include "qed_dev_api.h"
#include "qed_fcoe.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
}
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN])
+ u8 ppfid, const u8 mac_addr[ETH_ALEN])
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
qed_rdma_info_free(p_hwfn);
}
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
- qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
/* num RLs can't exceed resource amount of rls or vports */
- num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
- RESC_NUM(p_hwfn, QED_VPORT));
+ num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
/* Make sure after we reserve there's something left */
if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
@@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
bool four_port;
/* pq and vport bases for this PF */
- qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
/* rate limiting and weighted fair queueing are always enabled */
qm_info->vport_rl_en = true;
@@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
*/
/* flags for pq init */
-#define PQ_INIT_SHARE_VPORT (1 << 0)
-#define PQ_INIT_PF_RL (1 << 1)
-#define PQ_INIT_VF_RL (1 << 2)
+#define PQ_INIT_SHARE_VPORT BIT(0)
+#define PQ_INIT_PF_RL BIT(1)
+#define PQ_INIT_VF_RL BIT(2)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
@@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
}
- rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
if (rc)
goto alloc_err;
@@ -2375,6 +2377,49 @@ alloc_err:
return rc;
}
+static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ if (fw_return_code != COMMON_ERR_CODE_ERROR)
+ goto eqe_unexpected;
+
+ if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
+ le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
+ qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
+ return 0;
+ }
+
+eqe_unexpected:
+ DP_ERR(p_hwfn,
+ "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
+ opcode, fw_return_code, echo);
+ return -EINVAL;
+}
+
+static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ case COMMON_EVENT_VF_FLR:
+ return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
+ fw_return_code);
+ case COMMON_EVENT_FW_ERROR:
+ return qed_fw_err_handler(p_hwfn, opcode,
+ le16_to_cpu(echo), data,
+ fw_return_code);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
+ opcode, echo);
+ return -EINVAL;
+ }
+}
+
void qed_resc_setup(struct qed_dev *cdev)
{
int i;
@@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_l2_setup(p_hwfn);
qed_iov_setup(p_hwfn);
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_common_eqe_event);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn);
@@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
-
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
@@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
cache_line_size);
}
- if (L1_CACHE_BYTES > wr_mbs)
+ if (wr_mbs < L1_CACHE_BYTES)
DP_INFO(p_hwfn,
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
L1_CACHE_BYTES, wr_mbs);
@@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct qed_qm_common_rt_init_params params;
+ struct qed_qm_common_rt_init_params *params;
struct qed_dev *cdev = p_hwfn->cdev;
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to allocate common init params\n");
+
+ return -ENOMEM;
+ }
+
qed_init_cau_rt_data(cdev);
/* Program GTT windows */
@@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qm_info->pf_wfq_en = true;
}
- memset(&params, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
- params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.pf_rl_en = qm_info->pf_rl_en;
- params.pf_wfq_en = qm_info->pf_wfq_en;
- params.global_rl_en = qm_info->vport_rl_en;
- params.vport_wfq_en = qm_info->vport_wfq_en;
- params.port_params = qm_info->qm_port_params;
+ params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params->pf_rl_en = qm_info->pf_rl_en;
+ params->pf_wfq_en = qm_info->pf_wfq_en;
+ params->global_rl_en = qm_info->vport_rl_en;
+ params->vport_wfq_en = qm_info->vport_wfq_en;
+ params->port_params = qm_info->qm_port_params;
- qed_qm_common_rt_init(p_hwfn, &params);
+ qed_qm_common_rt_init(p_hwfn, params);
qed_cxt_hw_init_common(p_hwfn);
@@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
- return rc;
+ goto out;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
@@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
- qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+out:
+ kfree(params);
+
return rc;
}
@@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
- p_hwfn->wid_count = (u16) n_cpus;
+ p_hwfn->wid_count = (u16)n_cpus;
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
@@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
- PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
return qed_hsi_def_val[type][chip_id];
}
+
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
+
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
case QED_LL2_RAM_QUEUE:
@@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
- * supported - skip the the max values setting, release the lock if
+ * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
*/
@@ -3934,7 +3992,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
} else if (rc == -EINVAL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
- } else if (!rc && !resc_lock_params.b_granted) {
+ } else if (!resc_lock_params.b_granted) {
DP_NOTICE(p_hwfn,
"Failed to acquire the resource lock for the resource allocation commands\n");
return -EBUSY;
@@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
@@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d3c1f3879be8..f8682356d0cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -15,44 +15,52 @@
#include "qed_int.h"
/**
- * @brief qed_init_dp - initialize the debug level
+ * qed_init_dp(): Initialize the debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Module debug parameter.
+ * @dp_level: Module debug level.
+ *
+ * Return: Void.
*/
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
/**
- * @brief qed_init_struct - initialize the device structure to
- * its defaults
+ * qed_init_struct(): Initialize the device structure to
+ * its defaults.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_struct(struct qed_dev *cdev);
/**
- * @brief qed_resc_free -
+ * qed_resc_free: Free device resources.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_resc_free(struct qed_dev *cdev);
/**
- * @brief qed_resc_alloc -
+ * qed_resc_alloc(): Alloc device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_resc_alloc(struct qed_dev *cdev);
/**
- * @brief qed_resc_setup -
+ * qed_resc_setup(): Setup device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
*/
void qed_resc_setup(struct qed_dev *cdev);
@@ -105,94 +113,96 @@ struct qed_hw_init_params {
};
/**
- * @brief qed_hw_init -
+ * qed_hw_init(): Init Qed hardware.
*
- * @param cdev
- * @param p_params
+ * @cdev: Qed dev pointer.
+ * @p_params: Pointers to params.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
- * @brief qed_hw_timers_stop_all - stop the timers HW block
+ * qed_hw_timers_stop_all(): Stop the timers HW block.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return void
+ * Return: void.
*/
void qed_hw_timers_stop_all(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop -
+ * qed_hw_stop(): Stop Qed hardware.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_hw_stop(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop_fastpath -should be called incase
- * slowpath is still required for the device,
- * but fastpath is not.
+ * qed_hw_stop_fastpath(): Should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
- * @brief qed_hw_start_fastpath -restart fastpath traffic,
- * only if hw_stop_fastpath was called
+ * qed_hw_start_fastpath(): Restart fastpath traffic,
+ * only if hw_stop_fastpath was called.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
/**
- * @brief qed_hw_prepare -
+ * qed_hw_prepare(): Prepare Qed hardware.
*
- * @param cdev
- * @param personality - personality to initialize
+ * @cdev: Qed dev pointer.
+ * @personality: Personality to initialize.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_prepare(struct qed_dev *cdev,
int personality);
/**
- * @brief qed_hw_remove -
+ * qed_hw_remove(): Remove Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_hw_remove(struct qed_dev *cdev);
/**
- * @brief qed_ptt_acquire - Allocate a PTT window
+ * qed_ptt_acquire(): Allocate a PTT window.
*
- * Should be called at the entry point to the driver (at the beginning of an
- * exported function)
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: struct qed_ptt.
*
- * @return struct qed_ptt
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function).
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_release - Release PTT Window
+ * qed_ptt_release(): Release PTT Window.
*
- * Should be called at the end of a flow - at the end of the function that
- * acquired the PTT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -205,15 +215,17 @@ enum qed_dmae_address_type_t {
};
/**
- * @brief qed_dmae_host2grc - copy data from source addr to
- * dmae registers using the given ptt
+ * qed_dmae_host2grc(): Copy data from source addr to
+ * dmae registers using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param grc_addr (dmae_data_offset)
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
@@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_grc2host - Read data from dmae data offset
- * to source address using the given ptt
+ * qed_dmae_grc2host(): Read data from dmae data offset
+ * to source address using the given ptt.
+ *
+ * @p_ptt: P_ptt.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @dest_addr: Destination Address.
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_ptt
- * @param grc_addr (dmae_data_offset)
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_host2host - copy data from to source address
- * to a destination adress (for SRIOV) using the given ptt
+ * qed_dmae_host2host(): Copy data from to source address
+ * to a destination adrress (for SRIOV) using the given
+ * ptt.
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @dest_addr: Destination address.
+ * @size_in_dwords: size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
/**
- * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ * qed_fw_l2_queue(): Get absolute L2 queue ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id,
u16 *dst_id);
/**
- * @@brief qed_fw_vport - Get absolute vport ID
+ * qed_fw_vport(): Get absolute vport ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_vport(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ * qed_fw_rss_eng(): Get absolute RSS engine ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
- * banks that are allocated to the PF.
+ * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return u8 - Number of LLH filter banks
+ * Return: u8 Number of LLH filter banks.
*/
u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
@@ -314,45 +331,50 @@ enum qed_eng {
};
/**
- * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
- * LLH filter bank.
+ * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given
+ * LLH filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
u8 ppfid, enum qed_eng eng);
/**
- * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
+ * qed_llh_set_roce_affinity(): Set the RoCE engine affinity.
*
- * @param cdev
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
/**
- * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
- * bank.
+ * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @mac_addr: MAC to add.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param mac_addr - MAC to add
+ * Return: Int.
*/
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN]);
+ u8 ppfid, const u8 mac_addr[ETH_ALEN]);
/**
- * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
- * filter bank.
+ * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Ppfid.
+ * @mac_addr: MAC to remove
*
- * @param p_ptt
- * @param p_filter - MAC to remove
+ * Return: Void.
*/
void qed_llh_remove_mac_filter(struct qed_dev *cdev,
u8 ppfid, u8 mac_addr[ETH_ALEN]);
@@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t {
};
/**
- * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
- * given filter bank.
+ * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
- * @param type - type of filters and comparing
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
+ *
+ * Return: Int.
*/
int
qed_llh_add_protocol_filter(struct qed_dev *cdev,
@@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
- * the given filter bank.
+ * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
*/
void
qed_llh_remove_protocol_filter(struct qed_dev *cdev,
@@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * *@brief Cleanup of previous driver remains prior to load
+ * qed_final_cleanup(): Cleanup of previous driver remains prior to load.
*
- * @param p_hwfn
- * @param p_ptt
- * @param id - For PF, engine-relative. For VF, PF-relative.
- * @param is_vf - true iff cleanup is made for a VF.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @id: For PF, engine-relative. For VF, PF-relative.
+ * @is_vf: True iff cleanup is made for a VF.
*
- * @return int
+ * Return: Int.
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
- * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue.
*
- * @param p_hwfn
- * @param p_coal - store coalesce value read from the hardware.
- * @param p_handle
+ * @p_hwfn: HW device data.
+ * @coal: Store coalesce value read from the hardware.
+ * @handle: P_handle.
*
- * @return int
+ * Return: Int.
**/
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
/**
- * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
@@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
+ * @rx_coal: Rx Coalesce value in micro seconds.
+ * @tx_coal: TX Coalesce value in micro seconds.
+ * @p_handle: P_handle.
*
- * @param rx_coal - Rx Coalesce value in micro seconds.
- * @param tx_coal - TX Coalesce value in micro seconds.
- * @param p_handle
- *
- * @return int
+ * Return: Int.
**/
int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
/**
- * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_enable - true/false
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_enable: True/False.
*
- * @return int
+ * Return: Int.
*/
int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * qed_db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address of where db_data is stored.
+ * @db_width: Doorbell is 32b pr 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_width - doorbell is 32b pr 64b
- * @param db_space - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int qed_db_recovery_add(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev,
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * qed_db_recovery_del() - remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
+ * @cdev: Qed dev pointer.
+ * @db_addr: doorbell address.
+ * @db_data: address where db_data is stored. Serves as key for the
* entry to delete.
+ *
+ * Return: Int.
*/
int qed_db_recovery_del(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
-
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 78070682f2df..6bb4e165b592 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
qdevlink = devlink_priv(dl);
qdevlink->cdev = cdev;
- rc = devlink_register(dl);
- if (rc)
- goto err_free;
-
rc = devlink_params_register(dl, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
if (rc)
@@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
QED_DEVLINK_PARAM_ID_IWARP_CMT,
value);
- devlink_params_publish(dl);
cdev->iwarp_cmt = false;
qed_fw_reporters_create(dl);
-
+ devlink_register(dl);
return dl;
err_unregister:
- devlink_unregister(dl);
-
-err_free:
devlink_free(dl);
return ERR_PTR(rc);
@@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink)
if (!devlink)
return;
+ devlink_unregister(devlink);
qed_fw_reporters_destroy(devlink);
devlink_params_unregister(devlink, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
- devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index b768f0698170..3764190b948e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -30,6 +30,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct e4_fcoe_conn_context *p_cxt = NULL;
+ struct fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
memset(p_cxt, 0, sizeof(*p_cxt));
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct e4_fcoe_task_context *p_task_ctx = NULL;
+ struct fcoe_task_context *p_task_ctx = NULL;
u32 i, lc;
int rc;
@@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
lc = 0;
SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
@@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index fb1baa2da2d0..f2cedbd9489c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _QED_HSI_H
@@ -38,7 +38,7 @@ enum common_event_opcode {
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
- COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_FW_ERROR,
COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
@@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode {
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* LL2 SP error code */
+enum core_ll2_error_code {
+ LL2_OK = 0,
+ LL2_ERROR,
+ MAX_CORE_LL2_ERROR_CODE
+};
+
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
@@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+struct core_ll2_rx_per_queue_stat {
+ struct core_ll2_tstorm_per_queue_stat tstorm_stat;
+ struct core_ll2_ustorm_per_queue_stat ustorm_stat;
+};
+
+struct core_ll2_tx_per_queue_stat {
+ struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+};
+
/* Structure for doorbell data, in PWM mode, for RX producers update. */
struct core_pwm_prod_update_data {
__le16 icid; /* internal CID */
@@ -135,6 +151,15 @@ struct core_pwm_prod_update_data {
struct core_ll2_rx_prod prod; /* Producers */
};
+/* Ramrod data for rx/tx queue statistics query ramrod */
+struct core_queue_stats_query_ramrod_data {
+ u8 rx_stat;
+ u8 tx_stat;
+ __le16 reserved[3];
+ struct regpair rx_stat_addr;
+ struct regpair tx_stat_addr;
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe {
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
- __le16 reserved0;
+ u8 packet_source;
+ u8 reserved0;
__le32 reserved1[3];
};
@@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe {
__le16 qp_id;
__le32 src_qp;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved;
+ u8 packet_source;
+ u8 reserved[3];
};
/* Core RX CQE for Light L2 */
@@ -245,6 +272,15 @@ union core_rx_cqe_union {
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* RX packet source. */
+enum core_rx_pkt_source {
+ CORE_RX_PKT_SOURCE_NETWORK = 0,
+ CORE_RX_PKT_SOURCE_LB,
+ CORE_RX_PKT_SOURCE_TX,
+ CORE_RX_PKT_SOURCE_LL2_TX,
+ MAX_CORE_RX_PKT_SOURCE
+};
+
/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
@@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data {
u8 update_qm_pq_id_flg;
u8 reserved0;
__le16 qm_pq_id;
- __le32 reserved1;
+ __le32 reserved1[1];
};
/* Enum flag for what type of dcb data to update */
@@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
+ struct regpair spq_base_addr;
+ __le32 reserved0[2];
__le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
+ __le16 reserved1[111];
};
-struct e4_xstorm_core_conn_ag_ctx {
+struct xstorm_core_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 consolid_prod;
@@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 word15;
};
-struct e4_tstorm_core_conn_ag_ctx {
+struct tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_core_conn_ag_ctx {
+struct ustorm_core_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx {
};
/* core connection context */
-struct e4_core_conn_context {
+struct core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
@@ -930,12 +964,12 @@ struct eth_rx_rate_limit {
/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {
- u8 valid;
u8 vport_id;
u8 ind_table_index;
- u8 reserved;
__le16 ind_table_value;
__le16 reserved1;
+ u8 reserved;
+ u8 valid;
};
struct eth_ustorm_per_pf_stat {
@@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
-/* Event Ring malicious VF data */
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
+/* FW error data */
+struct fw_err_data {
+ u8 recovery_scope;
+ u8 err_id;
+ __le16 entity_id;
+ u8 reserved[4];
+};
+
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
@@ -987,8 +1022,8 @@ union event_ring_data {
struct iscsi_eqe_data iscsi_info;
struct iscsi_connect_done_results iscsi_conn_done_info;
union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct fw_err_data err_data;
};
/* Event Ring Entry */
@@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct {
u8 major_ver_arr[2];
};
+/* Integration Phase */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3,
+ INTEG_PHASE_BB_B0_NO_MCP = 10,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11,
+ MAX_INTEG_PHASE
+};
+
+/* Ports mode */
enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
@@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues {
MAX_IWARP_LL2_TX_QUEUES
};
-/* Malicious VF error ID */
-enum malicious_vf_error_id {
- MALICIOUS_VF_NO_ERROR,
+/* Function error ID */
+enum func_err_id {
+ FUNC_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID,
VF_ZONE_FUNC_NOT_ENABLED,
@@ -1087,13 +1131,33 @@ enum malicious_vf_error_id {
CORE_PACKET_SIZE_TOO_LARGE,
CORE_ILLEGAL_BD_FLAGS,
CORE_GSI_PACKET_VIOLATION,
- MAX_MALICIOUS_VF_ERROR_ID,
+ MAX_FUNC_ERR_ID
+};
+
+/* FW error handling mode */
+enum fw_err_mode {
+ FW_ERR_FATAL_ASSERT,
+ FW_ERR_DRV_REPORT,
+ MAX_FW_ERR_MODE
+};
+
+/* FW error recovery scope */
+enum fw_err_recovery_scope {
+ ERR_SCOPE_INVALID,
+ ERR_SCOPE_TX_Q,
+ ERR_SCOPE_RX_Q,
+ ERR_SCOPE_QP,
+ ERR_SCOPE_VPORT,
+ ERR_SCOPE_FUNC,
+ ERR_SCOPE_PORT,
+ ERR_SCOPE_ENGINE,
+ MAX_FW_ERR_RECOVERY_SCOPE
};
/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD];
};
/* Mstorm VF zone */
@@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config {
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
+ struct regpair consolid_q_pbl_base_addr;
struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id;
u8 base_vf_id;
@@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data {
u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
struct outer_tag_config_struct outer_tag_config;
+ u8 pf_fp_err_mode;
+ u8 consolid_q_num_pages;
+ u8 reserved[6];
};
/* Data for port update ramrod */
@@ -1230,6 +1297,13 @@ enum ports_mode {
MAX_PORTS_MODE
};
+/* Protocol-common error code */
+enum protocol_common_error_code {
+ COMMON_ERR_CODE_OK = 0,
+ COMMON_ERR_CODE_ERROR,
+ MAX_PROTOCOL_COMMON_ERROR_CODE
+};
+
/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
enum protocol_version_array_key {
ETH_VER_KEY = 0,
@@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct e4_mstorm_core_conn_ag_ctx {
+struct mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_ystorm_core_conn_ag_ctx {
+struct ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1704,6 +1778,7 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
+
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map_e4 {
+struct qm_rf_pq_map {
__le32 reg;
-#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1831,769 +1906,6 @@ struct virt_mem_desc {
u32 size; /* In bytes */
};
-/****************************************/
-/* Debug Tools HSI constants and macros */
-/****************************************/
-
-enum block_id {
- BLOCK_GRC,
- BLOCK_MISCS,
- BLOCK_MISC,
- BLOCK_DBU,
- BLOCK_PGLUE_B,
- BLOCK_CNIG,
- BLOCK_CPMU,
- BLOCK_NCSI,
- BLOCK_OPTE,
- BLOCK_BMB,
- BLOCK_PCIE,
- BLOCK_MCP,
- BLOCK_MCP2,
- BLOCK_PSWHST,
- BLOCK_PSWHST2,
- BLOCK_PSWRD,
- BLOCK_PSWRD2,
- BLOCK_PSWWR,
- BLOCK_PSWWR2,
- BLOCK_PSWRQ,
- BLOCK_PSWRQ2,
- BLOCK_PGLCS,
- BLOCK_DMAE,
- BLOCK_PTU,
- BLOCK_TCM,
- BLOCK_MCM,
- BLOCK_UCM,
- BLOCK_XCM,
- BLOCK_YCM,
- BLOCK_PCM,
- BLOCK_QM,
- BLOCK_TM,
- BLOCK_DORQ,
- BLOCK_BRB,
- BLOCK_SRC,
- BLOCK_PRS,
- BLOCK_TSDM,
- BLOCK_MSDM,
- BLOCK_USDM,
- BLOCK_XSDM,
- BLOCK_YSDM,
- BLOCK_PSDM,
- BLOCK_TSEM,
- BLOCK_MSEM,
- BLOCK_USEM,
- BLOCK_XSEM,
- BLOCK_YSEM,
- BLOCK_PSEM,
- BLOCK_RSS,
- BLOCK_TMLD,
- BLOCK_MULD,
- BLOCK_YULD,
- BLOCK_XYLD,
- BLOCK_PRM,
- BLOCK_PBF_PB1,
- BLOCK_PBF_PB2,
- BLOCK_RPB,
- BLOCK_BTB,
- BLOCK_PBF,
- BLOCK_RDIF,
- BLOCK_TDIF,
- BLOCK_CDU,
- BLOCK_CCFC,
- BLOCK_TCFC,
- BLOCK_IGU,
- BLOCK_CAU,
- BLOCK_UMAC,
- BLOCK_XMAC,
- BLOCK_MSTAT,
- BLOCK_DBG,
- BLOCK_NIG,
- BLOCK_WOL,
- BLOCK_BMBN,
- BLOCK_IPC,
- BLOCK_NWM,
- BLOCK_NWS,
- BLOCK_MS,
- BLOCK_PHY_PCIE,
- BLOCK_LED,
- BLOCK_AVS_WRAP,
- BLOCK_PXPREQBUS,
- BLOCK_BAR0_MAP,
- BLOCK_MCP_FIO,
- BLOCK_LAST_INIT,
- BLOCK_PRS_FC,
- BLOCK_PBF_FC,
- BLOCK_NIG_LB_FC,
- BLOCK_NIG_LB_FC_PLLH,
- BLOCK_NIG_TX_FC_PLLH,
- BLOCK_NIG_TX_FC,
- BLOCK_NIG_RX_FC_PLLH,
- BLOCK_NIG_RX_FC,
- MAX_BLOCK_ID
-};
-
-/* binary debug buffer types */
-enum bin_dbg_buffer_type {
- BIN_BUF_DBG_MODE_TREE,
- BIN_BUF_DBG_DUMP_REG,
- BIN_BUF_DBG_DUMP_MEM,
- BIN_BUF_DBG_IDLE_CHK_REGS,
- BIN_BUF_DBG_IDLE_CHK_IMMS,
- BIN_BUF_DBG_IDLE_CHK_RULES,
- BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
- BIN_BUF_DBG_ATTN_BLOCKS,
- BIN_BUF_DBG_ATTN_REGS,
- BIN_BUF_DBG_ATTN_INDEXES,
- BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BLOCKS,
- BIN_BUF_DBG_BLOCKS_CHIP_DATA,
- BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BLOCKS_USER_DATA,
- BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
- BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
- BIN_BUF_DBG_RESET_REGS,
- BIN_BUF_DBG_PARSING_STRINGS,
- MAX_BIN_DBG_BUFFER_TYPE
-};
-
-
-/* Attention bit mapping */
-struct dbg_attn_bit_mapping {
- u16 data;
-#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
-#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
-};
-
-/* Attention block per-type data */
-struct dbg_attn_block_type_data {
- u16 names_offset;
- u16 reserved1;
- u8 num_regs;
- u8 reserved2;
- u16 regs_offset;
-
-};
-
-/* Block attentions */
-struct dbg_attn_block {
- struct dbg_attn_block_type_data per_type_data[2];
-};
-
-/* Attention register result */
-struct dbg_attn_reg_result {
- u32 data;
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
- u16 block_attn_offset;
- u16 reserved;
- u32 sts_val;
- u32 mask_val;
-};
-
-/* Attention block result */
-struct dbg_attn_block_result {
- u8 block_id;
- u8 data;
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
- u16 names_offset;
- struct dbg_attn_reg_result reg_results[15];
-};
-
-/* Mode header */
-struct dbg_mode_hdr {
- u16 data;
-#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
-#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
-};
-
-/* Attention register */
-struct dbg_attn_reg {
- struct dbg_mode_hdr mode;
- u16 block_attn_offset;
- u32 data;
-#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
- u32 sts_clr_address;
- u32 mask_address;
-};
-
-/* Attention types */
-enum dbg_attn_type {
- ATTN_TYPE_INTERRUPT,
- ATTN_TYPE_PARITY,
- MAX_DBG_ATTN_TYPE
-};
-
-/* Block debug data */
-struct dbg_block {
- u8 name[15];
- u8 associated_storm_letter;
-};
-
-/* Chip-specific block debug data */
-struct dbg_block_chip {
- u8 flags;
-#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
-#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
-#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
-#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
- u8 dbg_client_id;
- u8 reset_reg_id;
- u8 reset_reg_bit_offset;
- struct dbg_mode_hdr dbg_bus_mode;
- u16 reserved1;
- u8 reserved2;
- u8 num_of_dbg_bus_lines;
- u16 dbg_bus_lines_offset;
- u32 dbg_select_reg_addr;
- u32 dbg_dword_enable_reg_addr;
- u32 dbg_shift_reg_addr;
- u32 dbg_force_valid_reg_addr;
- u32 dbg_force_frame_reg_addr;
-};
-
-/* Chip-specific block user debug data */
-struct dbg_block_chip_user {
- u8 num_of_dbg_bus_lines;
- u8 has_latency_events;
- u16 names_offset;
-};
-
-/* Block user debug data */
-struct dbg_block_user {
- u8 name[16];
-};
-
-/* Block Debug line data */
-struct dbg_bus_line {
- u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
- u8 group_sizes;
-};
-
-/* Condition header for registers dump */
-struct dbg_dump_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u8 block_id; /* block ID */
- u8 data_size; /* size in dwords of the data following this header */
-};
-
-/* Memory data for registers dump */
-struct dbg_dump_mem {
- u32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- u32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
-};
-
-/* Register data for registers dump */
-struct dbg_dump_reg {
- u32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
-};
-
-/* Split header for registers dump */
-struct dbg_dump_split_hdr {
- u32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
-};
-
-/* Condition header for idle check */
-struct dbg_idle_chk_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u16 data_size; /* size in dwords of the data following this header */
-};
-
-/* Idle Check condition register */
-struct dbg_idle_chk_cond_reg {
- u32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- u16 num_entries;
- u8 entry_size;
- u8 start_entry;
-};
-
-/* Idle Check info register */
-struct dbg_idle_chk_info_reg {
- u32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- u16 size; /* register size in dwords */
- struct dbg_mode_hdr mode; /* Mode header */
-};
-
-/* Idle Check register */
-union dbg_idle_chk_reg {
- struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
- struct dbg_idle_chk_info_reg info_reg; /* info register */
-};
-
-/* Idle Check result header */
-struct dbg_idle_chk_result_hdr {
- u16 rule_id; /* Failing rule index */
- u16 mem_entry_id; /* Failing memory entry index */
- u8 num_dumped_cond_regs; /* number of dumped condition registers */
- u8 num_dumped_info_regs; /* number of dumped condition registers */
- u8 severity; /* from dbg_idle_chk_severity_types enum */
- u8 reserved;
-};
-
-/* Idle Check result register header */
-struct dbg_idle_chk_result_reg_hdr {
- u8 data;
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
- u8 start_entry; /* index of the first checked entry */
- u16 size; /* register size in dwords */
-};
-
-/* Idle Check rule */
-struct dbg_idle_chk_rule {
- u16 rule_id; /* Idle Check rule ID */
- u8 severity; /* value from dbg_idle_chk_severity_types enum */
- u8 cond_id; /* Condition ID */
- u8 num_cond_regs; /* number of condition registers */
- u8 num_info_regs; /* number of info registers */
- u8 num_imms; /* number of immediates in the condition */
- u8 reserved1;
- u16 reg_offset; /* offset of this rules registers in the idle check
- * register array (in dbg_idle_chk_reg units).
- */
- u16 imm_offset; /* offset of this rules immediate values in the
- * immediate values array (in dwords).
- */
-};
-
-/* Idle Check rule parsing data */
-struct dbg_idle_chk_rule_parsing_data {
- u32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
-};
-
-/* Idle check severity types */
-enum dbg_idle_chk_severity_types {
- /* idle check failure should cause an error */
- IDLE_CHK_SEVERITY_ERROR,
- /* idle check failure should cause an error only if theres no traffic */
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
- /* idle check failure should cause a warning */
- IDLE_CHK_SEVERITY_WARNING,
- MAX_DBG_IDLE_CHK_SEVERITY_TYPES
-};
-
-/* Reset register */
-struct dbg_reset_reg {
- u32 data;
-#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
-#define DBG_RESET_REG_ADDR_SHIFT 0
-#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
-#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
-#define DBG_RESET_REG_RESERVED_MASK 0x7F
-#define DBG_RESET_REG_RESERVED_SHIFT 25
-};
-
-/* Debug Bus block data */
-struct dbg_bus_block_data {
- u8 enable_mask;
- u8 right_shift;
- u8 force_valid_mask;
- u8 force_frame_mask;
- u8 dword_mask;
- u8 line_num;
- u8 hw_id;
- u8 flags;
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
-#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
-};
-
-enum dbg_bus_clients {
- DBG_BUS_CLIENT_RBCN,
- DBG_BUS_CLIENT_RBCP,
- DBG_BUS_CLIENT_RBCR,
- DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCF,
- DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_OTHER_ENGINE,
- DBG_BUS_CLIENT_TIMESTAMP,
- DBG_BUS_CLIENT_CPU,
- DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCQ,
- DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCV,
- MAX_DBG_BUS_CLIENTS
-};
-
-/* Debug Bus constraint operation types */
-enum dbg_bus_constraint_ops {
- DBG_BUS_CONSTRAINT_OP_EQ,
- DBG_BUS_CONSTRAINT_OP_NE,
- DBG_BUS_CONSTRAINT_OP_LT,
- DBG_BUS_CONSTRAINT_OP_LTC,
- DBG_BUS_CONSTRAINT_OP_LE,
- DBG_BUS_CONSTRAINT_OP_LEC,
- DBG_BUS_CONSTRAINT_OP_GT,
- DBG_BUS_CONSTRAINT_OP_GTC,
- DBG_BUS_CONSTRAINT_OP_GE,
- DBG_BUS_CONSTRAINT_OP_GEC,
- MAX_DBG_BUS_CONSTRAINT_OPS
-};
-
-/* Debug Bus trigger state data */
-struct dbg_bus_trigger_state_data {
- u8 msg_len;
- u8 constraint_dword_mask;
- u8 storm_id;
- u8 reserved;
-};
-
-/* Debug Bus memory address */
-struct dbg_bus_mem_addr {
- u32 lo;
- u32 hi;
-};
-
-/* Debug Bus PCI buffer data */
-struct dbg_bus_pci_buf_data {
- struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
- struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
- u32 size; /* PCI buffer size in bytes */
-};
-
-/* Debug Bus Storm EID range filter params */
-struct dbg_bus_storm_eid_range_params {
- u8 min; /* Minimal event ID to filter on */
- u8 max; /* Maximal event ID to filter on */
-};
-
-/* Debug Bus Storm EID mask filter params */
-struct dbg_bus_storm_eid_mask_params {
- u8 val; /* Event ID value */
- u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
-};
-
-/* Debug Bus Storm EID filter params */
-union dbg_bus_storm_eid_params {
- struct dbg_bus_storm_eid_range_params range;
- struct dbg_bus_storm_eid_mask_params mask;
-};
-
-/* Debug Bus Storm data */
-struct dbg_bus_storm_data {
- u8 enabled;
- u8 mode;
- u8 hw_id;
- u8 eid_filter_en;
- u8 eid_range_not_mask;
- u8 cid_filter_en;
- union dbg_bus_storm_eid_params eid_filter_params;
- u32 cid;
-};
-
-/* Debug Bus data */
-struct dbg_bus_data {
- u32 app_version;
- u8 state;
- u8 mode_256b_en;
- u8 num_enabled_blocks;
- u8 num_enabled_storms;
- u8 target;
- u8 one_shot_en;
- u8 grc_input_en;
- u8 timestamp_input_en;
- u8 filter_en;
- u8 adding_filter;
- u8 filter_pre_trigger;
- u8 filter_post_trigger;
- u8 trigger_en;
- u8 filter_constraint_dword_mask;
- u8 next_trigger_state;
- u8 next_constraint_id;
- struct dbg_bus_trigger_state_data trigger_states[3];
- u8 filter_msg_len;
- u8 rcv_from_other_engine;
- u8 blocks_dword_mask;
- u8 blocks_dword_overlap;
- u32 hw_id_mask;
- struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[132];
- struct dbg_bus_storm_data storms[6];
-};
-
-/* Debug bus states */
-enum dbg_bus_states {
- DBG_BUS_STATE_IDLE,
- DBG_BUS_STATE_READY,
- DBG_BUS_STATE_RECORDING,
- DBG_BUS_STATE_STOPPED,
- MAX_DBG_BUS_STATES
-};
-
-/* Debug Bus Storm modes */
-enum dbg_bus_storm_modes {
- DBG_BUS_STORM_MODE_PRINTF,
- DBG_BUS_STORM_MODE_PRAM_ADDR,
- DBG_BUS_STORM_MODE_DRA_RW,
- DBG_BUS_STORM_MODE_DRA_W,
- DBG_BUS_STORM_MODE_LD_ST_ADDR,
- DBG_BUS_STORM_MODE_DRA_FSM,
- DBG_BUS_STORM_MODE_FAST_DBGMUX,
- DBG_BUS_STORM_MODE_RH,
- DBG_BUS_STORM_MODE_RH_WITH_STORE,
- DBG_BUS_STORM_MODE_FOC,
- DBG_BUS_STORM_MODE_EXT_STORE,
- MAX_DBG_BUS_STORM_MODES
-};
-
-/* Debug bus target IDs */
-enum dbg_bus_targets {
- DBG_BUS_TARGET_ID_INT_BUF,
- DBG_BUS_TARGET_ID_NIG,
- DBG_BUS_TARGET_ID_PCI,
- MAX_DBG_BUS_TARGETS
-};
-
-/* GRC Dump data */
-struct dbg_grc_data {
- u8 params_initialized;
- u8 reserved1;
- u16 reserved2;
- u32 param_val[48];
-};
-
-/* Debug GRC params */
-enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM,
- DBG_GRC_PARAM_DUMP_MSTORM,
- DBG_GRC_PARAM_DUMP_USTORM,
- DBG_GRC_PARAM_DUMP_XSTORM,
- DBG_GRC_PARAM_DUMP_YSTORM,
- DBG_GRC_PARAM_DUMP_PSTORM,
- DBG_GRC_PARAM_DUMP_REGS,
- DBG_GRC_PARAM_DUMP_RAM,
- DBG_GRC_PARAM_DUMP_PBUF,
- DBG_GRC_PARAM_DUMP_IOR,
- DBG_GRC_PARAM_DUMP_VFC,
- DBG_GRC_PARAM_DUMP_CM_CTX,
- DBG_GRC_PARAM_DUMP_PXP,
- DBG_GRC_PARAM_DUMP_RSS,
- DBG_GRC_PARAM_DUMP_CAU,
- DBG_GRC_PARAM_DUMP_QM,
- DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_DUMP_DORQ,
- DBG_GRC_PARAM_DUMP_CFC,
- DBG_GRC_PARAM_DUMP_IGU,
- DBG_GRC_PARAM_DUMP_BRB,
- DBG_GRC_PARAM_DUMP_BTB,
- DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_RESERVD1,
- DBG_GRC_PARAM_DUMP_MULD,
- DBG_GRC_PARAM_DUMP_PRS,
- DBG_GRC_PARAM_DUMP_DMAE,
- DBG_GRC_PARAM_DUMP_TM,
- DBG_GRC_PARAM_DUMP_SDM,
- DBG_GRC_PARAM_DUMP_DIF,
- DBG_GRC_PARAM_DUMP_STATIC,
- DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_RESERVED2,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
- DBG_GRC_PARAM_EXCLUDE_ALL,
- DBG_GRC_PARAM_CRASH,
- DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM,
- DBG_GRC_PARAM_DUMP_PHY,
- DBG_GRC_PARAM_NO_MCP,
- DBG_GRC_PARAM_NO_FW_VER,
- DBG_GRC_PARAM_RESERVED3,
- DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
- DBG_GRC_PARAM_DUMP_ILT_CDUC,
- DBG_GRC_PARAM_DUMP_ILT_CDUT,
- DBG_GRC_PARAM_DUMP_CAU_EXT,
- MAX_DBG_GRC_PARAMS
-};
-
-/* Debug status codes */
-enum dbg_status {
- DBG_STATUS_OK,
- DBG_STATUS_APP_VERSION_NOT_SET,
- DBG_STATUS_UNSUPPORTED_APP_VERSION,
- DBG_STATUS_DBG_BLOCK_NOT_RESET,
- DBG_STATUS_INVALID_ARGS,
- DBG_STATUS_OUTPUT_ALREADY_SET,
- DBG_STATUS_INVALID_PCI_BUF_SIZE,
- DBG_STATUS_PCI_BUF_ALLOC_FAILED,
- DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
- DBG_STATUS_NO_MATCHING_FRAMING_MODE,
- DBG_STATUS_VFC_READ_ERROR,
- DBG_STATUS_STORM_ALREADY_ENABLED,
- DBG_STATUS_STORM_NOT_ENABLED,
- DBG_STATUS_BLOCK_ALREADY_ENABLED,
- DBG_STATUS_BLOCK_NOT_ENABLED,
- DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_256B,
- DBG_STATUS_FILTER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_NOT_ENABLED,
- DBG_STATUS_CANT_ADD_CONSTRAINT,
- DBG_STATUS_TOO_MANY_TRIGGER_STATES,
- DBG_STATUS_TOO_MANY_CONSTRAINTS,
- DBG_STATUS_RECORDING_NOT_STARTED,
- DBG_STATUS_DATA_DIDNT_TRIGGER,
- DBG_STATUS_NO_DATA_RECORDED,
- DBG_STATUS_DUMP_BUF_TOO_SMALL,
- DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
- DBG_STATUS_UNKNOWN_CHIP,
- DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
- DBG_STATUS_BLOCK_IN_RESET,
- DBG_STATUS_INVALID_TRACE_SIGNATURE,
- DBG_STATUS_INVALID_NVRAM_BUNDLE,
- DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
- DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
- DBG_STATUS_NVRAM_READ_FAILED,
- DBG_STATUS_IDLE_CHK_PARSE_FAILED,
- DBG_STATUS_MCP_TRACE_BAD_DATA,
- DBG_STATUS_MCP_TRACE_NO_META,
- DBG_STATUS_MCP_COULD_NOT_HALT,
- DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED0,
- DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
- DBG_STATUS_IGU_FIFO_BAD_DATA,
- DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
- DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
- DBG_STATUS_REG_FIFO_BAD_DATA,
- DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
- DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_RESERVED1,
- DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INSUFFICIENT_HW_IDS,
- DBG_STATUS_DBG_BUS_IN_USE,
- DBG_STATUS_INVALID_STORM_DBG_MODE,
- DBG_STATUS_OTHER_ENGINE_BB_ONLY,
- DBG_STATUS_FILTER_SINGLE_HW_ID,
- DBG_STATUS_TRIGGER_SINGLE_HW_ID,
- DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
- MAX_DBG_STATUS
-};
-
-/* Debug Storms IDs */
-enum dbg_storms {
- DBG_TSTORM_ID,
- DBG_MSTORM_ID,
- DBG_USTORM_ID,
- DBG_XSTORM_ID,
- DBG_YSTORM_ID,
- DBG_PSTORM_ID,
- MAX_DBG_STORMS
-};
-
-/* Idle Check data */
-struct idle_chk_data {
- u32 buf_size;
- u8 buf_size_set;
- u8 reserved1;
- u16 reserved2;
-};
-
-struct pretend_params {
- u8 split_type;
- u8 reserved;
- u16 split_id;
-};
-
-/* Debug Tools data (per HW function)
- */
-struct dbg_tools_data {
- struct dbg_grc_data grc;
- struct dbg_bus_data bus;
- struct idle_chk_data idle_chk;
- u8 mode_enable[40];
- u8 block_in_reset[132];
- u8 chip_id;
- u8 hw_type;
- u8 num_ports;
- u8 num_pfs_per_port;
- u8 num_vfs;
- u8 initialized;
- u8 use_dmae;
- u8 reserved;
- struct pretend_params pretend;
- u32 num_regs_read;
-};
-
-/* ILT Clients */
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_RGFS,
- ILT_CLI_TGFS,
- MAX_ILT_CLIENTS
-};
-
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req {
/* QM per global RL init parameters */
struct init_qm_global_rl_params {
+ u8 type;
+ u8 reserved0;
+ u16 reserved1;
u32 rate_limit;
};
@@ -2658,18 +1973,33 @@ struct init_qm_port_params {
/* QM per-PQ init parameters */
struct init_qm_pq_params {
- u8 vport_id;
+ u16 vport_id;
+ u16 rl_id;
+ u8 rl_valid;
u8 tc_id;
u8 wrr_group;
- u8 rl_valid;
- u16 rl_id;
u8 port_id;
- u8 reserved;
+};
+
+/* QM per RL init parameters */
+struct init_qm_rl_params {
+ u32 vport_rl;
+ u8 vport_rl_type;
+ u8 reserved[3];
+};
+
+/* QM Rate Limiter types */
+enum init_qm_rl_type {
+ QM_RL_TYPE_NORMAL,
+ QM_RL_TYPE_QCN,
+ MAX_INIT_QM_RL_TYPE
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
u16 wfq;
+ u16 reserved;
+ u16 tc_wfq[NUM_OF_TCS];
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2728,14 +2058,14 @@ struct fw_info_location {
};
enum init_modes {
- MODE_RESERVED,
+ MODE_BB_A0_DEPRECATED,
MODE_BB,
MODE_K2,
MODE_ASIC,
- MODE_RESERVED2,
- MODE_RESERVED3,
- MODE_RESERVED4,
- MODE_RESERVED5,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -2743,8 +2073,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_RESERVED6,
- MODE_RESERVED7,
+ MODE_SKIP_PRAM_INIT,
+ MODE_EMUL_MAC,
MAX_INIT_MODES
};
@@ -3009,706 +2339,6 @@ struct iro {
u16 size;
};
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
- * arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_read_regs - Reads registers into a buffer (using GRC).
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf - Destination buffer.
- * @param addr - Source GRC address in dwords.
- * @param len - Number of registers to read.
- */
-void qed_read_regs(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
-
-/**
- * @brief qed_read_fw_info - Reads FW info from the chip.
- *
- * The FW info contains FW-related information, such as the FW version,
- * FW image (main/L2B/kuku), FW timestamp, etc.
- * The FW info is read from the internal RAM of the first Storm that is not in
- * reset.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param fw_info - Out: a pointer to write the FW info into.
- *
- * @return true if the FW info was read successfully from one of the Storms,
- * or false if all Storms are in reset.
- */
-bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct fw_info *fw_info);
-/**
- * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
- *
- * @param p_hwfn - HW device data
- * @param grc_param - GRC parameter
- * @param val - Value to set.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - grc_param is invalid
- * - val is outside the allowed boundaries
- */
-enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param, u32 val);
-
-/**
- * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
- * default value.
- *
- * @param p_hwfn - HW device data
- */
-void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
-/**
- * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
- * GRC Dump.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the collected GRC data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified dump buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
- * for idle check results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the idle check
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the idle check data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
- * for mcp trace results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the mcp trace data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * - the trace meta data cannot be read (from NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
- * for grc trace fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the reg fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
- * for the IGU fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the IGU fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
- * buffer size for protection override window results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for protection
- * override data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status
-qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_protection_override_dump - Reads protection override window
- * entries and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the protection override data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-/**
- * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
- * size for FW Asserts results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the FW Asserts data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_read_attn - Reads the attention registers of the specified
- * block and type, and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param block - Block ID.
- * @param attn_type - Attention type.
- * @param clear_status - Indicates if the attention status should be cleared.
- * @param results - OUT: Pointer to write the read results into
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block,
- enum dbg_attn_type attn_type,
- bool clear_status,
- struct dbg_attn_block_result *results);
-
-/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the
- * specified results struct.
- *
- * @param p_hwfn
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
-/******************************* Data Types **********************************/
-
-struct mcp_trace_format {
- u32 data;
-#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
-#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
-#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
-#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
-#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
-#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_OFFSET 24
-
- char *format_str;
-};
-
-/* MCP Trace Meta data structure */
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
-/******************************** Constants **********************************/
-
-#define MAX_NAME_LEN 16
-
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
- * debug arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_dbg_alloc_user_data - Allocates user debug data.
- *
- * @param p_hwfn - HW device data
- * @param user_data_ptr - OUT: a pointer to the allocated memory.
- */
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
- void **user_data_ptr);
-
-/**
- * @brief qed_dbg_get_status_str - Returns a string for the specified status.
- *
- * @param status - a debug status code.
- *
- * @return a string for the specified status
- */
-const char *qed_dbg_get_status_str(enum dbg_status status);
-
-/**
- * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
- * for idle check results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-/**
- * @brief qed_print_idle_chk_results - Prints idle check results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the idle check results.
- * @param num_errors - OUT: number of errors found in idle check.
- * @param num_warnings - OUT: number of warnings found in idle check.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors,
- u32 *num_warnings);
-
-/**
- * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
- *
- * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
- * no NVRAM access).
- *
- * @param data - pointer to MCP Trace meta data
- * @param size - size of MCP Trace meta data in dwords
- */
-void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
- const u32 *meta_buf);
-
-/**
- * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
- * for MCP Trace results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - MCP Trace dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_mcp_trace_results - Prints MCP Trace results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_bytes - number of bytes that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
- * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
- * Should be called after continuous MCP Trace parsing.
- *
- * @param p_hwfn - HW device data
- */
-void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
- * for reg_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_reg_fifo_results - Prints reg fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
- * for igu_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_igu_fifo_results - Prints IGU fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the IGU fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_protection_override_results_buf_size - Returns the required
- * buffer size for protection override results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status
-qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_protection_override_results - Prints protection override
- * results.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
- * for FW Asserts results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_fw_asserts_results - Prints FW Asserts results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the FW Asserts results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
- * the specified results struct.
- *
- * @param p_hwfn - HW device data
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3745,19 +2375,28 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Win 13 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+/* Returns the VOQ based on port and TC */
+#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \
+ PURE_LB_TC ? NUM_OF_PHYS_TCS *\
+ MAX_NUM_PORTS_BB + \
+ (port) : (port) * \
+ (max_phys_tcs_per_port) + (tc))
+
+struct init_qm_pq_params;
+
/**
- * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
*
- * Returns the required host memory size in 4KB units.
- * Must be called before all QM init HSI functions.
+ * @num_pf_cids: Number of connections used by this PF.
+ * @num_vf_cids: Number of connections used by VFs of this PF.
+ * @num_tids: Number of tasks used by this PF.
+ * @num_pf_pqs: Number of PQs used by this PF.
+ * @num_vf_pqs: Number of PQs used by VFs of this PF.
*
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * Return: The required host memory size in 4KB units.
*
- * @return The required host memory size in 4KB units.
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
*/
u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
@@ -3771,8 +2410,19 @@ struct qed_qm_common_rt_init_params {
bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS];
};
+/**
+ * qed_qm_common_rt_init(): Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params);
@@ -3789,85 +2439,116 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u16 start_vport;
u16 num_vports;
+ u16 start_rl;
+ u16 num_rls;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
+ struct init_qm_rl_params *rl_params;
};
+/**
+ * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
/**
- * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
+ * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @pf_id: PF ID
+ * @pf_wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
+ * qed_init_pf_rl(): Initializes the rate limit of the specified PF
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF ID.
+ * @pf_rl: rate limit in Mb/sec units
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
- * with the VPORT for each TC. This array is filled by
- * qed_qm_pf_rt_init
- * @param vport_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @first_tx_pq_id: An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_global_rl - Initializes the rate limit of the specified
- * rate limiter
+ * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified
+ * VPORT and TC.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param rl_id - RL ID
- * @param rate_limit - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC.
+ * (filled by qed_qm_pf_rt_init).
+ * @weight: VPORT+TC WFQ weight.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 weight);
+
+/**
+ * qed_init_global_rl(): Initializes the rate limit of the specified
+ * rate limiter.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @rl_id: RL ID.
+ * @rate_limit: Rate limit in Mb/sec units
+ * @vport_rl_type: Vport RL type.
+ *
+ * Return: 0 on success, -1 on error.
*/
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 rl_id, u32 rate_limit);
+ u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type);
/**
- * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @is_release_cmd: true for release, false for stop.
+ * @is_tx_pq: true for Tx PQs, false for Other PQs.
+ * @start_pq: first PQ ID to stop
+ * @num_pqs: Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting for
- * QM command done.
+ * Return: Bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3875,53 +2556,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs);
/**
- * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - vxlan destination udp port.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: vxlan destination udp port.
+ *
+ * Return: Void.
*/
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @vxlan_enable: vxlan enable flag.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * Return: Void.
*/
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_gre_enable - eth GRE enable enable flag.
- * @param ip_gre_enable - IP GRE enable enable flag.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_gre_enable: Eth GRE enable flag.
+ * @ip_gre_enable: IP GRE enable flag.
+ *
+ * Return: Void.
*/
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable);
/**
- * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: Geneve destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - geneve destination udp port.
+ * Retur: Void.
*/
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_geneve_enable: Eth GENEVE enable flag.
+ * @ip_geneve_enable: IP GENEVE enable flag.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_geneve_enable - eth GENEVE enable enable flag.
- * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ * Return: Void.
*/
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3931,25 +2623,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable);
/**
- * @brief qed_gft_disable - Disable GFT
+ * qed_gft_disable(): Disable GFT.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to disable GFT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to disable GFT.
+ *
+ * Return: Void.
*/
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
- * @brief qed_gft_config - Enable and configure HW for GFT
+ * qed_gft_config(): Enable and configure HW for GFT.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to enable GFT.
+ * @tcp: Set profile tcp packets.
+ * @udp: Set profile udp packet.
+ * @ipv4: Set profile ipv4 packet.
+ * @ipv6: Set profile ipv6 packet.
+ * @profile_type: Define packet same fields. Use enum gft_profile_type.
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to enable GFT.
- * @param tcp - set profile tcp packets.
- * @param udp - set profile udp packet.
- * @param ipv4 - set profile ipv4 packet.
- * @param ipv6 - set profile ipv6 packet.
- * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ * Return: Void.
*/
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3959,438 +2655,135 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool ipv4, bool ipv6, enum gft_profile_type profile_type);
/**
- * @brief qed_enable_context_validation - Enable and configure context
- * validation.
+ * qed_enable_context_validation(): Enable and configure context
+ * validation.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
+ * Return: Void.
*/
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * qed_calc_session_ctx_validation(): Calcualte validation byte for
+ * session context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @cid: Context cid.
+ *
+ * Return: Void.
*/
void qed_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 cid);
/**
- * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
+ * qed_calc_task_ctx_validation(): Calcualte validation byte for task
+ * context.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @tid: Context tid.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * Return: Void.
*/
void qed_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 tid);
/**
- * @brief qed_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * qed_memset_session_ctx(): Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Size to initialzie.
+ * @ctx_type: Context type.
*
- * @param p_hwfn -
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * Return: Void.
*/
void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
/**
- * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
- * validation bytes.
+ * qed_memset_task_ctx(): Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: size to initialzie.
+ * @ctx_type: context type.
+ *
+ * Return: Void.
*/
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
- * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
- * If the severity of the error will be
- * above the level, the FW will assert.
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param assert_level - An array of assert levels for each storm.
+ * qed_set_rdma_error_level(): Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @assert_level: An array of assert levels for each storm.
*
+ * Return: Void.
*/
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
/**
- * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
*
- * @param p_hwfn - HW device data
- * @param fw_overlay_in_buf - the input FW overlay buffer.
- * @param buf_size - the size of the input FW overlay buffer in bytes.
- * must be aligned to dwords.
- * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_in_buf: The input FW overlay buffer.
+ * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
*
- * @return a pointer to the allocated overlays memory,
+ * Return: A pointer to the allocated overlays memory,
* or NULL in case of failures.
*/
struct phys_mem_desc *
qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
- const u32 * const fw_overlay_in_buf,
+ const u32 *const fw_overlay_in_buf,
u32 buf_size_in_bytes);
/**
- * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_overlay_mem: the allocated FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param p_ptt - ptt window used for writing the registers.
- * @param fw_overlay_mem - the allocated FW overlay memory.
+ * Return: Void.
*/
void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct phys_mem_desc *fw_overlay_mem);
/**
- * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_mem: The allocated FW overlay memory to free.
+ *
+ * Return: Void.
*/
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem);
+ struct phys_mem_desc **fw_overlay_mem);
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
- (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
- (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-
-/* Ustorm eth queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
- (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
- (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
-
-/* Xstorm common PQ info */
-#define XSTORM_PQ_INFO_OFFSET(pq_id) \
- (IRO[8].base + ((pq_id) * IRO[8].m1))
-#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
-
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
-
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
-
-/* Xstorm overlay buffer host address */
-#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
-#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
-
-/* Ystorm overlay buffer host address */
-#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
-#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
-
-/* Pstorm overlay buffer host address */
-#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
-#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
-
-/* Tstorm overlay buffer host address */
-#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
-#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
-
-/* Mstorm overlay buffer host address */
-#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
-#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
-
-/* Ustorm overlay buffer host address */
-#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
-#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
-
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
-
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
-
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
-
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
-
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
-
-/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode
- */
-#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[28].base + ((queue_id) * IRO[28].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
-
-/* Mstorm pf statistics */
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
-
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
-
-/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[31].base + ((pf_id) * IRO[31].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
-
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
-
-/* Pstorm pf statistics */
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
-
-/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[34].base + ((eth_type_id) * IRO[34].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
-
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
-
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
-
-/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update
- */
-#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
-
-/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[38].base + ((queue_id) * IRO[38].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
-
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[39].base + ((rss_id) * IRO[39].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
-
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[40].base + ((rss_id) * IRO[40].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
-
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
-
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
-
-/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id
+#define PCICFG_OFFSET 0x2000
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs programmed
+ * for each PF.
+ * Since registers from 0x000-0x7ff are spilt across functions, each PF will
+ * have the same location for the same 4 bits
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
- ((bdq_id) * IRO[43].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
-
-/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
- ((bdq_id) * IRO[44].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
-
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[45].base + ((storage_func_id) * IRO[45].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
-
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[46].base + ((storage_func_id) * IRO[46].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
-
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[47].base + ((storage_func_id) * IRO[47].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
-
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[48].base + ((storage_func_id) * IRO[48].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
-
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[49].base + ((storage_func_id) * IRO[49].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
-
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[50].base + ((storage_func_id) * IRO[50].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
-
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
-
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
-
-/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
-
-/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
-
-/* Xstorm error level for assert */
-#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[55].base + ((pf_id) * IRO[55].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
-
-/* Ystorm error level for assert */
-#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[56].base + ((pf_id) * IRO[56].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
-
-/* Pstorm error level for assert */
-#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[57].base + ((pf_id) * IRO[57].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
-
-/* Tstorm error level for assert */
-#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[58].base + ((pf_id) * IRO[58].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
-
-/* Mstorm error level for assert */
-#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[59].base + ((pf_id) * IRO[59].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
-
-/* Ustorm error level for assert */
-#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[60].base + ((pf_id) * IRO[60].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
-
-/* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[61].base + ((pf_id) * IRO[61].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
-
-/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
-
-/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
- (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
-
-/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
-
-/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
-
-/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
@@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32580
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_OFFSET 33092
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_OFFSET 33604
#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
-
-#define RUNTIME_ARRAY_SIZE 34472
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983
+
+#define RUNTIME_ARRAY_SIZE 34984
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct e4_xstorm_eth_conn_ag_ctx {
+struct xstorm_eth_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct e4_ystorm_eth_conn_ag_ctx {
+struct ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct e4_tstorm_eth_conn_ag_ctx {
+struct tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_eth_conn_ag_ctx {
+struct ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct e4_eth_conn_context {
+struct eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
@@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_ADD_UDP_FILTER,
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
ETH_RAMROD_RGFS_FILTER_ADD,
ETH_RAMROD_RGFS_FILTER_DEL,
@@ -5596,10 +3991,12 @@ struct eth_vport_rss_config {
u8 update_rss_ind_table;
u8 update_rss_capabilities;
u8 tbl_size;
- __le32 reserved2[2];
+ u8 ind_table_mask_valid;
+ u8 reserved2[3];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS];
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
+ __le32 reserved3;
};
/* eth vport RSS mode */
@@ -5674,8 +4071,20 @@ enum gft_filter_update_action {
MAX_GFT_FILTER_UPDATE_ACTION
};
+/* Ramrod data for rx create gft action */
+struct rx_create_gft_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for rx create openflow action */
+struct rx_create_openflow_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx add openflow filter */
-struct rx_add_openflow_filter_data {
+struct rx_openflow_filter_ramrod_data {
__le16 action_icid;
u8 priority;
u8 reserved0;
@@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
-/* Ramrod data for rx create gft action */
-struct rx_create_gft_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
-/* Ramrod data for rx create openflow action */
-struct rx_create_openflow_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data {
};
/* Ramrod data for rx Add UDP Filter */
-struct rx_udp_filter_data {
+struct rx_udp_filter_ramrod_data {
__le16 action_icid;
__le16 vlan_id;
u8 ip_type;
@@ -5784,7 +4181,7 @@ struct rx_udp_filter_data {
/* Add or delete GFT filter - filter is packet header of type of packet wished
* to pass certain FW flow.
*/
-struct rx_update_gft_filter_data {
+struct rx_update_gft_filter_ramrod_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
__le16 action_icid;
@@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data {
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
__le16 pxp_st_index;
- __le16 comp_agg_size;
+ u8 comp_agg_size;
+ u8 reserved3;
__le16 queue_zone_id;
__le16 reserved2;
__le16 pbl_size;
@@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn {
u8 ctl_frame_ethtype_check_en;
u8 update_in_to_in_pri_map_mode;
u8 in_to_in_pri_map[8];
- u8 reserved[6];
+ u8 update_tx_dst_port_mode_flg;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
+ u8 reserved[1];
};
struct vport_update_ramrod_mcast {
@@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
+struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
u8 state;
u8 flags0;
@@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
__le32 reg4;
};
-struct e4_mstorm_eth_conn_ag_ctx {
+struct mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_xstorm_eth_hw_conn_ag_ctx {
+struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
@@ -6640,49 +5042,49 @@ struct ystorm_rdma_task_st_ctx {
struct regpair temp[4];
};
-struct e4_ystorm_rdma_task_ag_ctx {
+struct ystorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 msem_ctx_upd_seq;
u8 flags0;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6696,49 +5098,49 @@ struct e4_ystorm_rdma_task_ag_ctx {
__le32 fbo_hi;
};
-struct e4_mstorm_rdma_task_ag_ctx {
+struct mstorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6762,56 +5164,56 @@ struct ustorm_rdma_task_st_ctx {
struct regpair temp[6];
};
-struct e4_ustorm_rdma_task_ag_ctx {
+struct ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 dif_rxmit_cons;
@@ -6828,16 +5230,853 @@ struct e4_ustorm_rdma_task_ag_ctx {
};
/* RDMA task context */
-struct e4_rdma_task_context {
+struct rdma_task_context {
struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
struct tdif_task_context tdif_context;
- struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+#define TOE_MAX_RAMROD_PER_PF 8
+#define TOE_TX_PAGE_SIZE_BYTES 4096
+#define TOE_GRQ_PAGE_SIZE_BYTES 4096
+#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096
+
+#define TOE_RX_MAX_RSS_CHAINS 64
+#define TOE_TX_MAX_TSS_CHAINS 64
+#define TOE_RSS_INDIRECTION_TABLE_SIZE 128
+
+/* The toe storm context of Mstorm */
+struct mstorm_toe_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The toe storm context of Pstorm */
+struct pstorm_toe_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+/* The toe storm context of Ystorm */
+struct ystorm_toe_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The toe storm context of Xstorm */
+struct xstorm_toe_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct ystorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7
+ u8 completion_opcode;
+ u8 byte3;
+ __le16 word0;
+ __le32 rel_seq;
+ __le32 rel_seq_threshold;
+ __le16 app_prod;
+ __le16 app_cons;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct xstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 word2;
+ __le16 word3;
+ __le16 bd_prod;
+ __le16 word5;
+ __le16 word6;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 local_adv_wnd_seq;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_toe_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The toe storm context of Tstorm */
+struct tstorm_toe_conn_st_ctx {
+ __le32 reserved[16];
+};
+
+/* The toe storm context of Ustorm */
+struct ustorm_toe_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+/* toe connection context */
+struct toe_conn_context {
+ struct ystorm_toe_conn_st_ctx ystorm_st_context;
+ struct pstorm_toe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_toe_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct ystorm_toe_conn_ag_ctx ystorm_ag_context;
+ struct xstorm_toe_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_toe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_toe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_toe_conn_st_ctx tstorm_st_context;
+ struct mstorm_toe_conn_st_ctx mstorm_st_context;
+ struct ustorm_toe_conn_st_ctx ustorm_st_context;
+};
+
+/* toe init ramrod header */
+struct toe_init_ramrod_header {
+ u8 first_rss;
+ u8 num_rss;
+ u8 reserved[6];
+};
+
+/* toe pf init parameters */
+struct toe_pf_init_params {
+ __le32 push_timeout;
+ __le16 grq_buffer_size;
+ __le16 grq_sb_id;
+ u8 grq_sb_index;
+ u8 max_seg_retransmit;
+ u8 doubt_reachability;
+ u8 ll2_rx_queue_id;
+ __le16 grq_fetch_threshold;
+ u8 reserved1[2];
+ struct regpair grq_page_addr;
+};
+
+/* toe tss parameters */
+struct toe_tss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe rss parameters */
+struct toe_rss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe init ramrod data */
+struct toe_init_ramrod_data {
+ struct toe_init_ramrod_header hdr;
+ struct tcp_init_params tcp_params;
+ struct toe_pf_init_params pf_params;
+ struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS];
+ struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS];
+};
+
+/* toe offload parameters */
+struct toe_offload_params {
+ struct regpair tx_bd_page_addr;
+ struct regpair tx_app_page_addr;
+ __le32 more_to_send_seq;
+ __le16 rcv_indication_size;
+ u8 rss_tss_id;
+ u8 ignore_grq_push;
+ struct regpair rx_db_data_ptr;
+};
+
+/* TOE offload ramrod data - DMAed by firmware */
+struct toe_offload_ramrod_data {
+ struct tcp_offload_params tcp_ofld_params;
+ struct toe_offload_params toe_ofld_params;
+};
+
+/* TOE ramrod command IDs */
+enum toe_ramrod_cmd_id {
+ TOE_RAMROD_UNUSED,
+ TOE_RAMROD_FUNC_INIT,
+ TOE_RAMROD_INITATE_OFFLOAD,
+ TOE_RAMROD_FUNC_CLOSE,
+ TOE_RAMROD_SEARCHER_DELETE,
+ TOE_RAMROD_TERMINATE,
+ TOE_RAMROD_QUERY,
+ TOE_RAMROD_UPDATE,
+ TOE_RAMROD_EMPTY,
+ TOE_RAMROD_RESET_SEND,
+ TOE_RAMROD_INVALIDATE,
+ MAX_TOE_RAMROD_CMD_ID
+};
+
+/* Toe RQ buffer descriptor */
+struct toe_rx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_RX_BD_START_MASK 0x1
+#define TOE_RX_BD_START_SHIFT 0
+#define TOE_RX_BD_END_MASK 0x1
+#define TOE_RX_BD_END_SHIFT 1
+#define TOE_RX_BD_NO_PUSH_MASK 0x1
+#define TOE_RX_BD_NO_PUSH_SHIFT 2
+#define TOE_RX_BD_SPLIT_MASK 0x1
+#define TOE_RX_BD_SPLIT_SHIFT 3
+#define TOE_RX_BD_RESERVED0_MASK 0xFFF
+#define TOE_RX_BD_RESERVED0_SHIFT 4
+ __le32 reserved1;
+};
+
+/* TOE RX completion queue opcodes (opcode 0 is illegal) */
+enum toe_rx_cmp_opcode {
+ TOE_RX_CMP_OPCODE_GA = 1,
+ TOE_RX_CMP_OPCODE_GR = 2,
+ TOE_RX_CMP_OPCODE_GNI = 3,
+ TOE_RX_CMP_OPCODE_GAIR = 4,
+ TOE_RX_CMP_OPCODE_GAIL = 5,
+ TOE_RX_CMP_OPCODE_GRI = 6,
+ TOE_RX_CMP_OPCODE_GJ = 7,
+ TOE_RX_CMP_OPCODE_DGI = 8,
+ TOE_RX_CMP_OPCODE_CMP = 9,
+ TOE_RX_CMP_OPCODE_REL = 10,
+ TOE_RX_CMP_OPCODE_SKP = 11,
+ TOE_RX_CMP_OPCODE_URG = 12,
+ TOE_RX_CMP_OPCODE_RT_TO = 13,
+ TOE_RX_CMP_OPCODE_KA_TO = 14,
+ TOE_RX_CMP_OPCODE_MAX_RT = 15,
+ TOE_RX_CMP_OPCODE_DBT_RE = 16,
+ TOE_RX_CMP_OPCODE_SYN = 17,
+ TOE_RX_CMP_OPCODE_OPT_ERR = 18,
+ TOE_RX_CMP_OPCODE_FW2_TO = 19,
+ TOE_RX_CMP_OPCODE_2WY_CLS = 20,
+ TOE_RX_CMP_OPCODE_RST_RCV = 21,
+ TOE_RX_CMP_OPCODE_FIN_RCV = 22,
+ TOE_RX_CMP_OPCODE_FIN_UPL = 23,
+ TOE_RX_CMP_OPCODE_INIT = 32,
+ TOE_RX_CMP_OPCODE_RSS_UPDATE = 33,
+ TOE_RX_CMP_OPCODE_CLOSE = 34,
+ TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80,
+ TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81,
+ TOE_RX_CMP_OPCODE_TERMINATE = 82,
+ TOE_RX_CMP_OPCODE_QUERY = 83,
+ TOE_RX_CMP_OPCODE_RESET_SEND = 84,
+ TOE_RX_CMP_OPCODE_INVALIDATE = 85,
+ TOE_RX_CMP_OPCODE_EMPTY = 86,
+ TOE_RX_CMP_OPCODE_UPDATE = 87,
+ MAX_TOE_RX_CMP_OPCODE
+};
+
+/* TOE rx ooo completion data */
+struct toe_rx_cqe_ooo_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ u8 isle_num;
+ u8 reserved0;
+};
+
+/* TOE rx in order completion data */
+struct toe_rx_cqe_in_order_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ __le16 reserved1;
+};
+
+/* Union for TOE rx completion data */
+union toe_rx_cqe_data_union {
+ struct toe_rx_cqe_ooo_params ooo_params;
+ struct toe_rx_cqe_in_order_params in_order_params;
+ struct regpair raw_data;
+};
+
+/* TOE rx completion element */
+struct toe_rx_cqe {
+ __le16 icid;
+ u8 completion_opcode;
+ u8 reserved0;
+ __le32 reserved1;
+ union toe_rx_cqe_data_union data;
+};
+
+/* toe RX doorbel data */
+struct toe_rx_db_data {
+ __le32 local_adv_wnd_seq;
+ __le32 reserved[3];
+};
+
+/* Toe GRQ buffer descriptor */
+struct toe_rx_grq_bd {
+ struct regpair addr;
+ __le16 buff_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/* Toe transmission application buffer descriptor */
+struct toe_tx_app_buff_desc {
+ __le32 next_buffer_start_seq;
+ __le32 reserved;
+};
+
+/* Toe transmission application buffer descriptor page pointer */
+struct toe_tx_app_buff_page_pointer {
+ struct regpair next_page_addr;
+};
+
+/* Toe transmission buffer descriptor */
+struct toe_tx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_TX_BD_PUSH_MASK 0x1
+#define TOE_TX_BD_PUSH_SHIFT 0
+#define TOE_TX_BD_NOTIFY_MASK 0x1
+#define TOE_TX_BD_NOTIFY_SHIFT 1
+#define TOE_TX_BD_LARGE_IO_MASK 0x1
+#define TOE_TX_BD_LARGE_IO_SHIFT 2
+#define TOE_TX_BD_BD_CONS_MASK 0x1FFF
+#define TOE_TX_BD_BD_CONS_SHIFT 3
+ __le32 next_bd_start_seq;
+};
+
+/* TOE completion opcodes */
+enum toe_tx_cmp_opcode {
+ TOE_TX_CMP_OPCODE_DATA,
+ TOE_TX_CMP_OPCODE_TERMINATE,
+ TOE_TX_CMP_OPCODE_EMPTY,
+ TOE_TX_CMP_OPCODE_RESET_SEND,
+ TOE_TX_CMP_OPCODE_INVALIDATE,
+ TOE_TX_CMP_OPCODE_RST_RCV,
+ MAX_TOE_TX_CMP_OPCODE
+};
+
+/* Toe transmission completion element */
+struct toe_tx_cqe {
+ __le16 icid;
+ u8 opcode;
+ u8 reserved;
+ __le32 size;
+};
+
+/* Toe transmission page pointer bd */
+struct toe_tx_page_pointer_bd {
+ struct regpair next_page_addr;
+ struct regpair prev_page_addr;
+};
+
+/* Toe transmission completion element page pointer */
+struct toe_tx_page_pointer_cqe {
+ struct regpair next_page_addr;
+};
+
+/* toe update parameters */
+struct toe_update_params {
+ __le16 flags;
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0
+#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF
+#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1
+ __le16 rcv_indication_size;
+ __le16 reserved1[2];
+};
+
+/* TOE update ramrod data - DMAed by firmware */
+struct toe_update_ramrod_data {
+ struct tcp_update_params tcp_upd_params;
+ struct toe_update_params toe_upd_params;
+};
+
+struct mstorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* TOE doorbell data */
+struct toe_db_data {
+ u8 params;
+#define TOE_DB_DATA_DEST_MASK 0x3
+#define TOE_DB_DATA_DEST_SHIFT 0
+#define TOE_DB_DATA_AGG_CMD_MASK 0x3
+#define TOE_DB_DATA_AGG_CMD_SHIFT 2
+#define TOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define TOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define TOE_DB_DATA_RESERVED_MASK 0x1
+#define TOE_DB_DATA_RESERVED_SHIFT 5
+#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
};
/* rdma function init ramrod data */
@@ -6911,6 +6150,8 @@ enum rdma_event_opcode {
RDMA_EVENT_CREATE_SRQ,
RDMA_EVENT_MODIFY_SRQ,
RDMA_EVENT_DESTROY_SRQ,
+ RDMA_EVENT_START_NAMESPACE_TRACKING,
+ RDMA_EVENT_STOP_NAMESPACE_TRACKING,
MAX_RDMA_EVENT_OPCODE
};
@@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- u8 searcher_mode;
- u8 pvrdma_mode;
+ u8 flags;
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2
+#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F
+#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3
+ u8 dpt_byte_threshold_log;
+ u8 dpt_common_queue_id;
u8 max_num_ns_log;
- u8 reserved;
};
/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params dptq_params;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* rdma namespace tracking ramrod data */
+struct rdma_namespace_tracking_ramrod_data {
+ u8 name_space;
+ u8 reserved[7];
+};
+
/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
@@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id {
RDMA_RAMROD_CREATE_SRQ,
RDMA_RAMROD_MODIFY_SRQ,
RDMA_RAMROD_DESTROY_SRQ,
+ RDMA_RAMROD_START_NS_TRACKING,
+ RDMA_RAMROD_STOP_NS_TRACKING,
MAX_RDMA_RAMROD_CMD_ID
};
@@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context {
struct regpair temp[9];
};
-struct e4_tstorm_rdma_task_ag_ctx {
+struct tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct e4_ustorm_rdma_conn_ag_ctx {
+struct ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 nvmf_only;
__le16 conn_dpi;
@@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_conn_ag_ctx {
+struct xstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx {
__le32 reg6;
};
-struct e4_tstorm_roce_conn_ag_ctx {
+struct tstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx {
};
/* roce connection context */
-struct e4_roce_conn_context {
+struct roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_roce_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_roce_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_roce_conn_st_ctx mstorm_st_context;
@@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data {
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 name_space;
u8 reserved3[3];
__le16 regular_latency_phy_queue;
@@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data {
u8 reserved3[3];
};
+/* RoCE Create Suspended qp requester runtime ramrod data */
+struct roce_create_suspended_qp_req_runtime_ramrod_data {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \
+ 0x7FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+};
+
+/* RoCE Create Suspended QP requester ramrod data */
+struct roce_create_suspended_qp_req_ramrod_data {
+ struct roce_create_qp_req_ramrod_data qp_params;
+ struct roce_create_suspended_qp_req_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE Create Suspended QP responder runtime params */
+struct roce_create_suspended_qp_resp_runtime_params {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+ __le32 resreved;
+};
+
+/* RoCE RDB array entry */
+struct roce_resp_qp_rdb_entry {
+ struct regpair atomic_data;
+ struct regpair va;
+ __le32 psn;
+ __le32 rkey;
+ __le32 byte_count;
+ u8 op_type;
+ u8 reserved[3];
+};
+
+/* RoCE Create Suspended QP responder runtime ramrod data */
+struct roce_create_suspended_qp_resp_runtime_ramrod_data {
+ struct roce_create_suspended_qp_resp_runtime_params params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Create Suspended QP responder ramrod data */
+struct roce_create_suspended_qp_resp_ramrod_data {
+ struct roce_create_qp_resp_ramrod_data
+ qp_params;
+ struct roce_create_suspended_qp_resp_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE create ud qp ramrod data */
+struct roce_create_ud_qp_ramrod_data {
+ __le16 local_mac_addr[3];
+ __le16 vlan_id;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved[3];
+};
+
/* roce DCQCN received statistics */
struct roce_dcqcn_received_stats {
struct regpair ecn_pkt_rcv;
struct regpair cnp_pkt_rcv;
+ struct regpair cnp_pkt_reject;
};
/* roce DCQCN sent statistics */
@@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data {
__le32 reserved;
};
+/* RoCE destroy ud qp ramrod data */
+struct roce_destroy_ud_qp_ramrod_data {
+ __le32 src_qp_id;
+ __le32 reserved;
+};
+
/* roce error statistics */
struct roce_error_stats {
__le32 resp_remote_access_errors;
@@ -7809,13 +7152,21 @@ struct roce_events_stats {
/* roce slow path EQ cmd IDs */
enum roce_event_opcode {
- ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_CREATE_QP = 13,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
ROCE_EVENT_CREATE_UD_QP,
ROCE_EVENT_DESTROY_UD_QP,
ROCE_EVENT_FUNC_UPDATE,
+ ROCE_EVENT_SUSPEND_QP,
+ ROCE_EVENT_QUERY_SUSPENDED_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_QP,
+ ROCE_EVENT_RESUME_QP,
+ ROCE_EVENT_SUSPEND_UD_QP,
+ ROCE_EVENT_RESUME_UD_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_UD_QP,
+ ROCE_EVENT_FLUSH_DPT_QP,
MAX_ROCE_EVENT_OPCODE
};
@@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data {
struct roce_init_func_params roce;
};
+/* roce_ll2_cqe_data */
+struct roce_ll2_cqe_data {
+ u8 name_space;
+ u8 flags;
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0
+#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F
+#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1
+ u8 reserved1[2];
+ __le32 cid;
+};
+
/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
@@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15
u8 fields;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
@@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12
u8 fields;
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
@@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE Query Suspended QP requester output params */
+struct roce_query_suspended_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+ __le32 reserved;
+};
+
+/* RoCE Query Suspended QP requester ramrod data */
+struct roce_query_suspended_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE Query Suspended QP responder runtime params */
+struct roce_query_suspended_qp_resp_runtime_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+};
+
+/* RoCE Query Suspended QP responder output params */
+struct roce_query_suspended_qp_resp_output_params {
+ struct roce_query_suspended_qp_resp_runtime_params runtime_params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Query Suspended QP responder ramrod data */
+struct roce_query_suspended_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
- ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_CREATE_QP = 13,
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
ROCE_RAMROD_CREATE_UD_QP,
ROCE_RAMROD_DESTROY_UD_QP,
ROCE_RAMROD_FUNC_UPDATE,
+ ROCE_RAMROD_SUSPEND_QP,
+ ROCE_RAMROD_QUERY_SUSPENDED_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_QP,
+ ROCE_RAMROD_RESUME_QP,
+ ROCE_RAMROD_SUSPEND_UD_QP,
+ ROCE_RAMROD_RESUME_UD_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_UD_QP,
+ ROCE_RAMROD_FLUSH_DPT_QP,
MAX_ROCE_RAMROD_CMD_ID
};
+/* ROCE RDB array entry type */
+enum roce_resp_qp_rdb_entry_type {
+ ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0,
+ ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1,
+ ROCE_QP_RDB_ENTRY_INVALID = 2,
+ MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE
+};
+
/* RoCE func init ramrod data */
struct roce_update_func_params {
u8 cnp_vlan_priority;
@@ -7995,7 +7428,7 @@ struct roce_update_func_params {
__le32 cnp_send_timeout;
};
-struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
@@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct e4_mstorm_roce_conn_ag_ctx {
+struct mstorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_req_conn_ag_ctx {
+struct mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_resp_conn_ag_ctx {
+struct mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_tstorm_roce_req_conn_ag_ctx {
+struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le32 reg10;
};
-struct e4_tstorm_roce_resp_conn_ag_ctx {
+struct tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_roce_req_conn_ag_ctx {
+struct ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct e4_ustorm_roce_resp_conn_ag_ctx {
+struct ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_req_conn_ag_ctx {
+struct xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct e4_xstorm_roce_resp_conn_ag_ctx {
+struct xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 irq_prod_shadow;
@@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct e4_ystorm_roce_conn_ag_ctx {
+struct ystorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_req_conn_ag_ctx {
+struct ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_resp_conn_ag_ctx {
+struct ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[48];
};
-struct e4_xstorm_iwarp_conn_ag_ctx {
+struct xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iwarp_conn_ag_ctx {
+struct tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx {
};
/* iwarp connection context */
-struct e4_iwarp_conn_context {
+struct iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
@@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
- IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
MAX_IWARP_EQE_ASYNC_OPCODE
};
@@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion {
/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
- IWARP_EVENT_TYPE_TCP_OFFLOAD =
- 11,
+ IWARP_EVENT_TYPE_TCP_OFFLOAD = 13,
IWARP_EVENT_TYPE_MPA_OFFLOAD,
IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
IWARP_EVENT_TYPE_CREATE_QP,
@@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code {
IWARP_EXCEPTION_DETECTED_LLP_RESET,
IWARP_EXCEPTION_DETECTED_IRQ_FULL,
IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_LIMIT,
IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
@@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data {
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le32 additional_setup_time;
__le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[13];
+ u8 reserved3[9];
};
/* iWARP TCP connection offload params passed by driver to FW */
@@ -9888,11 +9319,13 @@ struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
+ __le32 additional_setup_time;
__le16 physical_q0;
__le16 physical_q1;
u8 stats_counter_id;
u8 mpa_mode;
- u8 reserved[10];
+ u8 src_vport_id;
+ u8 reserved[5];
};
/* iWARP query QP output params */
@@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data {
/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9971,100 +9404,100 @@ struct unaligned_opaque_data {
__le32 cid;
};
-struct e4_mstorm_iwarp_conn_ag_ctx {
+struct mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_iwarp_conn_ag_ctx {
+struct ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct e4_ystorm_iwarp_conn_ag_ctx {
+struct ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct e4_xstorm_fcoe_conn_ag_ctx {
+struct xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct e4_tstorm_fcoe_conn_ag_ctx {
+struct tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_fcoe_conn_ag_ctx {
+struct ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx {
u8 reserved0[4];
};
-struct e4_mstorm_fcoe_conn_ag_ctx {
+struct mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx {
};
/* fcoe connection context */
-struct e4_fcoe_conn_context {
+struct fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
@@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct e4_ystorm_fcoe_conn_ag_ctx {
+struct ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_iscsi[44];
};
-struct e4_xstorm_iscsi_conn_ag_ctx {
+struct xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iscsi_conn_ag_ctx {
+struct tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 rx_tcp_checksum_err_cnt;
@@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct e4_ustorm_iscsi_conn_ag_ctx {
+struct ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx {
__le32 reserved[44];
};
-struct e4_mstorm_iscsi_conn_ag_ctx {
+struct mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx {
};
/* iscsi connection context */
-struct e4_iscsi_conn_context {
+struct iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
@@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params {
struct tcp_init_params tcp_init;
};
-struct e4_ystorm_iscsi_conn_ag_ctx {
+struct ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
__le32 reg3;
};
-#define MFW_TRACE_SIGNATURE 0x25071946
-
-/* The trace in the buffer */
-#define MFW_TRACE_EVENTID_MASK 0x00ffff
-#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_OFFSET 16
-#define MFW_TRACE_ENTRY_SIZE 3
-
-struct mcp_trace {
- u32 signature; /* Help to identify that the trace is valid */
- u32 size; /* the size of the trace buffer in bytes */
- u32 curr_level; /* 2 - all will be written to the buffer
- * 1 - debug trace will not be written
- * 0 - just errors will be written to the buffer
- */
- u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
- * mask it.
- */
-
- /* Warning: the following pointers are assumed to be 32bits as they are
- * used only in the MFW.
- */
- u32 trace_prod; /* The next trace will be written to this offset */
- u32 trace_oldest; /* The oldest valid trace starts at this offset
- * (usually very close after the current producer).
- */
-};
-
-#define VF_MAX_STATIC 192
-
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2
-#define MCP_GLOB_PORT_MAX 4
-#define MCP_GLOB_FUNC_MAX 16
-
-typedef u32 offsize_t; /* In DWORDS !!! */
-/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
-/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
-
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
-
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
-
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
-
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
- (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-
-/* PHY configuration */
-struct eth_phy_cfg {
- u32 speed;
-#define ETH_SPEED_AUTONEG 0x0
-#define ETH_SPEED_SMARTLINQ 0x8
-
- u32 pause;
-#define ETH_PAUSE_NONE 0x0
-#define ETH_PAUSE_AUTONEG 0x1
-#define ETH_PAUSE_RX 0x2
-#define ETH_PAUSE_TX 0x4
-
- u32 adv_speed;
-
- u32 loopback_mode;
-#define ETH_LOOPBACK_NONE 0x0
-#define ETH_LOOPBACK_INT_PHY 0x1
-#define ETH_LOOPBACK_EXT_PHY 0x2
-#define ETH_LOOPBACK_EXT 0x3
-#define ETH_LOOPBACK_MAC 0x4
-#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
-#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
-#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
-#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
-#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
-
- u32 eee_cfg;
-#define EEE_CFG_EEE_ENABLED BIT(0)
-#define EEE_CFG_TX_LPI BIT(1)
-#define EEE_CFG_ADV_SPEED_1G BIT(2)
-#define EEE_CFG_ADV_SPEED_10G BIT(3)
-#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
-#define EEE_TX_TIMER_USEC_OFFSET 4
-#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
-#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
-#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
-
- u32 deprecated;
-
- u32 fec_mode;
-#define FEC_FORCE_MODE_MASK 0x000000ff
-#define FEC_FORCE_MODE_OFFSET 0
-#define FEC_FORCE_MODE_NONE 0x00
-#define FEC_FORCE_MODE_FIRECODE 0x01
-#define FEC_FORCE_MODE_RS 0x02
-#define FEC_FORCE_MODE_AUTO 0x07
-#define FEC_EXTENDED_MODE_MASK 0xffffff00
-#define FEC_EXTENDED_MODE_OFFSET 8
-#define ETH_EXT_FEC_NONE 0x00000100
-#define ETH_EXT_FEC_10G_NONE 0x00000200
-#define ETH_EXT_FEC_10G_BASE_R 0x00000400
-#define ETH_EXT_FEC_20G_NONE 0x00000800
-#define ETH_EXT_FEC_20G_BASE_R 0x00001000
-#define ETH_EXT_FEC_25G_NONE 0x00002000
-#define ETH_EXT_FEC_25G_BASE_R 0x00004000
-#define ETH_EXT_FEC_25G_RS528 0x00008000
-#define ETH_EXT_FEC_40G_NONE 0x00010000
-#define ETH_EXT_FEC_40G_BASE_R 0x00020000
-#define ETH_EXT_FEC_50G_NONE 0x00040000
-#define ETH_EXT_FEC_50G_BASE_R 0x00080000
-#define ETH_EXT_FEC_50G_RS528 0x00100000
-#define ETH_EXT_FEC_50G_RS544 0x00200000
-#define ETH_EXT_FEC_100G_NONE 0x00400000
-#define ETH_EXT_FEC_100G_BASE_R 0x00800000
-#define ETH_EXT_FEC_100G_RS528 0x01000000
-#define ETH_EXT_FEC_100G_RS544 0x02000000
-
- u32 extended_speed;
-#define ETH_EXT_SPEED_MASK 0x0000ffff
-#define ETH_EXT_SPEED_OFFSET 0
-#define ETH_EXT_SPEED_AN 0x00000001
-#define ETH_EXT_SPEED_1G 0x00000002
-#define ETH_EXT_SPEED_10G 0x00000004
-#define ETH_EXT_SPEED_20G 0x00000008
-#define ETH_EXT_SPEED_25G 0x00000010
-#define ETH_EXT_SPEED_40G 0x00000020
-#define ETH_EXT_SPEED_50G_BASE_R 0x00000040
-#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080
-#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100
-#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200
-#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400
-#define ETH_EXT_ADV_SPEED_MASK 0xffff0000
-#define ETH_EXT_ADV_SPEED_OFFSET 16
-#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000
-#define ETH_EXT_ADV_SPEED_1G 0x00020000
-#define ETH_EXT_ADV_SPEED_10G 0x00040000
-#define ETH_EXT_ADV_SPEED_20G 0x00080000
-#define ETH_EXT_ADV_SPEED_25G 0x00100000
-#define ETH_EXT_ADV_SPEED_40G 0x00200000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000
-};
-
-struct port_mf_cfg {
- u32 dynamic_cfg;
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-struct eth_stats {
- u64 r64;
- u64 r127;
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
-
- union {
- struct {
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- } bb0;
- struct {
- u64 unused1;
- u64 r1519_to_max;
- u64 unused2;
- u64 unused3;
- u64 unused4;
- } ah0;
- } u0;
-
- u64 rfcs;
- u64 rxcf;
- u64 rxpf;
- u64 rxpp;
- u64 raln;
- u64 rfcr;
- u64 rovr;
- u64 rjbr;
- u64 rund;
- u64 rfrg;
- u64 t64;
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
-
- union {
- struct {
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- } bb1;
- struct {
- u64 t1519_to_max;
- u64 unused6;
- u64 unused7;
- u64 unused8;
- } ah1;
- } u1;
-
- u64 txpf;
- u64 txpp;
-
- union {
- struct {
- u64 tlpiec;
- u64 tncl;
- } bb2;
- struct {
- u64 unused9;
- u64 unused10;
- } ah2;
- } u2;
-
- u64 rbyte;
- u64 rxuca;
- u64 rxmca;
- u64 rxbca;
- u64 rxpok;
- u64 tbyte;
- u64 txuca;
- u64 txmca;
- u64 txbca;
- u64 txcf;
-};
-
-struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
-};
-
-struct port_stats {
- struct brb_stats brb;
- struct eth_stats eth;
-};
-
-struct couple_mode_teaming {
- u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM (1 << 0)
-
-#define PORT_CMT_PORT_ROLE (1 << 1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE (1 << 1)
-
-#define PORT_CMT_TEAM_MASK (1 << 2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 (1 << 2)
-};
-
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
-
-enum _lldp_agent {
- LLDP_NEAREST_BRIDGE = 0,
- LLDP_NEAREST_NON_TPMR_BRIDGE,
- LLDP_NEAREST_CUSTOMER_BRIDGE,
- LLDP_MAX_LLDP_AGENTS
-};
-
-struct lldp_config_params_s {
- u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
-};
-
-struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status;
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
-};
-
-struct dcbx_ets_feature {
- u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_OOO_TC_MASK 0x00000f00
-#define DCBX_OOO_TC_SHIFT 8
- u32 pri_tc_tbl[1];
-#define DCBX_TCP_OOO_TC (4)
-
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
-#define DCBX_CEE_STRICT_PRIORITY 0xf
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
-};
-
-#define DCBX_TCP_OOO_TC (4)
-#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-
-struct dcbx_app_priority_entry {
- u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_SF_IEEE_MASK 0x0000f000
-#define DCBX_APP_SF_IEEE_SHIFT 12
-#define DCBX_APP_SF_IEEE_RESERVED 0
-#define DCBX_APP_SF_IEEE_ETHTYPE 1
-#define DCBX_APP_SF_IEEE_TCP_PORT 2
-#define DCBX_APP_SF_IEEE_UDP_PORT 3
-#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
-
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-struct dcbx_app_priority_feature {
- u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
- struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
-};
-
-struct dcbx_features {
- struct dcbx_ets_feature ets;
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- struct dcbx_app_priority_feature app;
-};
-
-struct dcbx_local_params {
- u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000007
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
-#define DCBX_CONFIG_VERSION_STATIC 4
-
- u32 flags;
- struct dcbx_features features;
-};
-
-struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
-};
-
-struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
-};
-
-struct dcb_dscp_map {
- u32 flags;
-#define DCB_DSCP_ENABLE_MASK 0x1
-#define DCB_DSCP_ENABLE_SHIFT 0
-#define DCB_DSCP_ENABLE 1
- u32 dscp_pri_map[8];
-};
-
-struct public_global {
- u32 max_path;
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
- s32 external_temperature;
- u32 mdump_reason;
- u64 reserved;
- u32 data_ptr;
- u32 data_size;
-};
-
-struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack;
-};
-
-struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
-#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
-};
-
-struct public_port {
- u32 validity_map;
-
- u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-#define LINK_STATUS_PFC_ENABLED 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_MODULE_FIBER 0x6
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
-
- u32 lfa_status;
- u32 link_change_count;
-
- struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
-
- /* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
-
- u32 reserved[2];
-
- u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
-#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
-#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
-#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
-#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
-#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
-#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
-#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
-#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
-#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
-#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
-
- u32 wol_info;
- u32 wol_pkt_len;
- u32 wol_pkt_details;
- struct dcb_dscp_map dcb_dscp_map;
-
- u32 eee_status;
-#define EEE_ACTIVE_BIT BIT(0)
-#define EEE_LD_ADV_STATUS_MASK 0x000000f0
-#define EEE_LD_ADV_STATUS_OFFSET 4
-#define EEE_1G_ADV BIT(1)
-#define EEE_10G_ADV BIT(2)
-#define EEE_LP_ADV_STATUS_MASK 0x00000f00
-#define EEE_LP_ADV_STATUS_OFFSET 8
-#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
-#define EEE_SUPPORTED_SPEED_OFFSET 12
-#define EEE_1G_SUPPORTED BIT(1)
-#define EEE_10G_SUPPORTED BIT(2)
-
- u32 eee_remote;
-#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
-#define EEE_REMOTE_TW_TX_OFFSET 0
-#define EEE_REMOTE_TW_RX_MASK 0xffff0000
-#define EEE_REMOTE_TW_RX_OFFSET 16
-
- u32 reserved1;
- u32 oem_cfg_port;
-#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
-#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
-#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
-#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
-#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
-#define OEM_CFG_SCHED_TYPE_OFFSET 2
-#define OEM_CFG_SCHED_TYPE_ETS 0x1
-#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
-};
-
-struct public_func {
- u32 reserved0[2];
-
- u32 mtu_size;
-
- u32 reserved[7];
-
- u32 config;
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
-#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
-#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
-
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
-
- u32 status;
-#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
-
- u32 mac_upper;
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
-
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
-
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
-
- u32 ovlan_stag;
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
-
- u32 pf_allocation;
-
- u32 preserve_data;
-
- u32 driver_last_activity_ts;
-
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
-
-#define LOAD_REQ_HSI_VERSION 2
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
- DRV_ID_MCP_HSI_VER_SHIFT)
-
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
-
- u32 oem_cfg_func;
-#define OEM_CFG_FUNC_TC_MASK 0x0000000F
-#define OEM_CFG_FUNC_TC_OFFSET 0
-#define OEM_CFG_FUNC_TC_0 0x0
-#define OEM_CFG_FUNC_TC_1 0x1
-#define OEM_CFG_FUNC_TC_2 0x2
-#define OEM_CFG_FUNC_TC_3 0x3
-#define OEM_CFG_FUNC_TC_4 0x4
-#define OEM_CFG_FUNC_TC_5 0x5
-#define OEM_CFG_FUNC_TC_6 0x6
-#define OEM_CFG_FUNC_TC_7 0x7
-
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
-};
-
-struct mcp_mac {
- u32 mac_upper;
- u32 mac_lower;
-};
-
-struct mcp_val64 {
- u32 lo;
- u32 hi;
-};
-
-struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
-};
-
-struct bist_nvm_image_att {
- u32 return_code;
- u32 image_type;
- u32 nvm_start_addr;
- u32 len;
-};
-
-#define MCP_DRV_VER_STR_SIZE 16
-#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
-#define MCP_DRV_NVM_BUF_LEN 32
-struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
-};
-
-struct lan_stats_stc {
- u64 ucast_rx_pkts;
- u64 ucast_tx_pkts;
- u32 fcs_err;
- u32 rserved;
-};
-
-struct fcoe_stats_stc {
- u64 rx_pkts;
- u64 tx_pkts;
- u32 fcs_err;
- u32 login_failure;
-};
-
-struct ocbb_data_stc {
- u32 ocbb_host_addr;
- u32 ocsd_host_addr;
- u32 ocsd_req_update_interval;
-};
-
-#define MAX_NUM_OF_SENSORS 7
-struct temperature_status_stc {
- u32 num_of_sensors;
- u32 sensor[MAX_NUM_OF_SENSORS];
-};
-
-/* crash dump configuration header */
-struct mdump_config_stc {
- u32 version;
- u32 config;
- u32 epoc;
- u32 num_of_logs;
- u32 valid_logs;
-};
-
-enum resource_id_enum {
- RESOURCE_NUM_SB_E = 0,
- RESOURCE_NUM_L2_QUEUE_E = 1,
- RESOURCE_NUM_VPORT_E = 2,
- RESOURCE_NUM_VMQ_E = 3,
- RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
- RESOURCE_FACTOR_RSS_PER_VF_E = 5,
- RESOURCE_NUM_RL_E = 6,
- RESOURCE_NUM_PQ_E = 7,
- RESOURCE_NUM_VF_E = 8,
- RESOURCE_VFC_FILTER_E = 9,
- RESOURCE_ILT_E = 10,
- RESOURCE_CQS_E = 11,
- RESOURCE_GFT_PROFILES_E = 12,
- RESOURCE_NUM_TC_E = 13,
- RESOURCE_NUM_RSS_ENGINES_E = 14,
- RESOURCE_LL2_QUEUE_E = 15,
- RESOURCE_RDMA_STATS_QUEUE_E = 16,
- RESOURCE_BDQ_E = 17,
- RESOURCE_QCN_E = 18,
- RESOURCE_LLH_FILTER_E = 19,
- RESOURCE_VF_MAC_ADDR = 20,
- RESOURCE_LL2_CQS_E = 21,
- RESOURCE_VF_CNQS = 22,
- RESOURCE_MAX_NUM,
- RESOURCE_NUM_INVALID = 0xFFFFFFFF
-};
-
-/* Resource ID is to be filled by the driver in the MB request
- * Size, offset & flags to be filled by the MFW in the MB response
- */
-struct resource_info {
- enum resource_id_enum res_id;
- u32 size; /* number of allocated resources */
- u32 offset; /* Offset of the 1st resource */
- u32 vf_size;
- u32 vf_offset;
- u32 flags;
-#define RESOURCE_ELEMENT_STRICT (1 << 0)
-};
-
-#define DRV_ROLE_NONE 0
-#define DRV_ROLE_PREBOOT 1
-#define DRV_ROLE_OS 2
-#define DRV_ROLE_KDUMP 3
-
-struct load_req_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_REQ_ROLE_MASK 0x000000FF
-#define LOAD_REQ_ROLE_SHIFT 0
-#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
-#define LOAD_REQ_LOCK_TO_SHIFT 8
-#define LOAD_REQ_LOCK_TO_DEFAULT 0
-#define LOAD_REQ_LOCK_TO_NONE 255
-#define LOAD_REQ_FORCE_MASK 0x000F0000
-#define LOAD_REQ_FORCE_SHIFT 16
-#define LOAD_REQ_FORCE_NONE 0
-#define LOAD_REQ_FORCE_PF 1
-#define LOAD_REQ_FORCE_ALL 2
-#define LOAD_REQ_FLAGS0_MASK 0x00F00000
-#define LOAD_REQ_FLAGS0_SHIFT 20
-#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
-};
-
-struct load_rsp_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_RSP_ROLE_MASK 0x000000FF
-#define LOAD_RSP_ROLE_SHIFT 0
-#define LOAD_RSP_HSI_MASK 0x0000FF00
-#define LOAD_RSP_HSI_SHIFT 8
-#define LOAD_RSP_FLAGS0_MASK 0x000F0000
-#define LOAD_RSP_FLAGS0_SHIFT 16
-#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
-};
-
-struct mdump_retain_data_stc {
- u32 valid;
- u32 epoch;
- u32 pf;
- u32 status;
-};
-
-union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
-
- struct eth_phy_cfg drv_phy_cfg;
-
- struct mcp_val64 val64;
-
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
-
- struct mcp_file_att file_att;
-
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
-
- struct drv_version_stc drv_version;
-
- struct lan_stats_stc lan_stats;
- struct fcoe_stats_stc fcoe_stats;
- struct ocbb_data_stc ocbb_info;
- struct temperature_status_stc temp_info;
- struct resource_info resource;
- struct bist_nvm_image_att nvm_image_att;
- struct mdump_config_stc mdump_config;
-};
-
-struct public_drv_mb {
- u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
-#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
-#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
-#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
-#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
-#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
-#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
-#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
-#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
-
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
-#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
-#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
-#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-#define DRV_MSG_CODE_MCP_HALT 0x00100000
-#define DRV_MSG_CODE_SET_VMAC 0x00110000
-#define DRV_MSG_CODE_GET_VMAC 0x00120000
-#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
-#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
-#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
-#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
-#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
-#define DRV_MSG_CODE_GET_STATS 0x00130000
-#define DRV_MSG_CODE_STATS_TYPE_LAN 1
-#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
-#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-
-#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
-
-#define DRV_MSG_CODE_BIST_TEST 0x001e0000
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
-/* Send crash dump commands with param[3:0] - opcode */
-#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
-#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
-#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
-#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
-
-#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
-
-#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
-#define RESOURCE_CMD_REQ_RESC_SHIFT 0
-#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
-#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
-#define RESOURCE_OPCODE_REQ 1
-#define RESOURCE_OPCODE_REQ_WO_AGING 2
-#define RESOURCE_OPCODE_REQ_W_AGING 3
-#define RESOURCE_OPCODE_RELEASE 4
-#define RESOURCE_OPCODE_FORCE_RELEASE 5
-#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
-#define RESOURCE_CMD_REQ_AGE_SHIFT 8
-
-#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
-#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
-#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
-#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
-#define RESOURCE_OPCODE_GNT 1
-#define RESOURCE_OPCODE_BUSY 2
-#define RESOURCE_OPCODE_RELEASED 3
-#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
-#define RESOURCE_OPCODE_WRONG_OWNER 5
-#define RESOURCE_OPCODE_UNKNOWN_CMD 255
-
-#define RESOURCE_DUMP 0
-
-/* DRV_MSG_CODE_MDUMP_CMD parameters */
-#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
-#define DRV_MSG_CODE_MDUMP_ACK 0x01
-#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
-#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
-#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
-#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
-#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
-
-#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
-#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
-#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
-
-#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
-#define DRV_MSG_CODE_OS_WOL 0x002e0000
-
-#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
-#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 drv_mb_param;
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
-#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-
-#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
-#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
-#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
-#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
-#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
-#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
-
-#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
-#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
-
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
-
-#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
-#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
-
-#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
- DRV_MB_PARAM_WOL_DISABLED | \
- DRV_MB_PARAM_WOL_ENABLED)
-#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
-#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
-#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
-
-#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
- DRV_MB_PARAM_ESWITCH_MODE_VEB | \
- DRV_MB_PARAM_ESWITCH_MODE_VEPA)
-#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
-#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
-#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
-
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
-
- /* Resource Allocation params - Driver version support */
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
-#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
-#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
-#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
-#define DRV_MB_PARAM_BIST_RC_PASSED 1
-#define DRV_MB_PARAM_BIST_RC_FAILED 2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
-
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
-#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
-
-/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
-
-/* Driver attributes params */
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
-
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
-
- u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_UNSUPPORTED 0x00000000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
-#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_OK 0x00160000
-#define FW_MSG_CODE_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-
-#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
-#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
-#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
-#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
-#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
-
-#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
-
- u32 fw_mb_param;
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
- /* Get PF RDMA protocol command response */
-#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
-#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
-#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
-#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
-
- /* Get MFW feature support response */
-#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
-#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
-#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
-
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
-
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
-
-#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
-#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
-
- u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
-
- union drv_union_data union_data;
-};
-
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
-
-enum MFW_DRV_MSG_TYPE {
- MFW_DRV_MSG_LINK_CHANGE,
- MFW_DRV_MSG_FLR_FW_ACK_FAILED,
- MFW_DRV_MSG_VF_DISABLED,
- MFW_DRV_MSG_LLDP_DATA_UPDATED,
- MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
- MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
- MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
- MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
- MFW_DRV_MSG_RESERVED,
- MFW_DRV_MSG_GET_TLV_REQ,
- MFW_DRV_MSG_OEM_CFG_UPDATE,
- MFW_DRV_MSG_MAX
-};
-
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
-
-struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
-};
-
-enum public_sections {
- PUBLIC_DRV_MB,
- PUBLIC_MFW_MB,
- PUBLIC_GLOBAL,
- PUBLIC_PATH,
- PUBLIC_PORT,
- PUBLIC_FUNC,
- PUBLIC_MAX_SECTIONS
-};
-
-struct mcp_public_data {
- u32 num_sections;
- u32 sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
-};
-
-#define MAX_I2C_TRANSACTION_SIZE 16
-
-/* OCBB definitions */
-enum tlvs {
- /* Category 1: Device Properties */
- DRV_TLV_CLP_STR,
- DRV_TLV_CLP_STR_CTD,
- /* Category 6: Device Configuration */
- DRV_TLV_SCSI_TO,
- DRV_TLV_R_T_TOV,
- DRV_TLV_R_A_TOV,
- DRV_TLV_E_D_TOV,
- DRV_TLV_CR_TOV,
- DRV_TLV_BOOT_TYPE,
- /* Category 8: Port Configuration */
- DRV_TLV_NPIV_ENABLED,
- /* Category 10: Function Configuration */
- DRV_TLV_FEATURE_FLAGS,
- DRV_TLV_LOCAL_ADMIN_ADDR,
- DRV_TLV_ADDITIONAL_MAC_ADDR_1,
- DRV_TLV_ADDITIONAL_MAC_ADDR_2,
- DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
- DRV_TLV_LSO_MIN_SEGMENT_COUNT,
- DRV_TLV_PROMISCUOUS_MODE,
- DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
- DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
- DRV_TLV_OS_DRIVER_STATES,
- DRV_TLV_PXE_BOOT_PROGRESS,
- /* Category 12: FC/FCoE Configuration */
- DRV_TLV_NPIV_STATE,
- DRV_TLV_NUM_OF_NPIV_IDS,
- DRV_TLV_SWITCH_NAME,
- DRV_TLV_SWITCH_PORT_NUM,
- DRV_TLV_SWITCH_PORT_ID,
- DRV_TLV_VENDOR_NAME,
- DRV_TLV_SWITCH_MODEL,
- DRV_TLV_SWITCH_FW_VER,
- DRV_TLV_QOS_PRIORITY_PER_802_1P,
- DRV_TLV_PORT_ALIAS,
- DRV_TLV_PORT_STATE,
- DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_LINK_FAILURE_COUNT,
- DRV_TLV_FCOE_BOOT_PROGRESS,
- /* Category 13: iSCSI Configuration */
- DRV_TLV_TARGET_LLMNR_ENABLED,
- DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
- DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
- DRV_TLV_AUTHENTICATION_METHOD,
- DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
- DRV_TLV_MAX_FRAME_SIZE,
- DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_ISCSI_BOOT_PROGRESS,
- /* Category 20: Device Data */
- DRV_TLV_PCIE_BUS_RX_UTILIZATION,
- DRV_TLV_PCIE_BUS_TX_UTILIZATION,
- DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
- DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
- DRV_TLV_NCSI_RX_BYTES_RECEIVED,
- DRV_TLV_NCSI_TX_BYTES_SENT,
- /* Category 22: Base Port Data */
- DRV_TLV_RX_DISCARDS,
- DRV_TLV_RX_ERRORS,
- DRV_TLV_TX_ERRORS,
- DRV_TLV_TX_DISCARDS,
- DRV_TLV_RX_FRAMES_RECEIVED,
- DRV_TLV_TX_FRAMES_SENT,
- /* Category 23: FC/FCoE Port Data */
- DRV_TLV_RX_BROADCAST_PACKETS,
- DRV_TLV_TX_BROADCAST_PACKETS,
- /* Category 28: Base Function Data */
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
- DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_PF_RX_FRAMES_RECEIVED,
- DRV_TLV_RX_BYTES_RECEIVED,
- DRV_TLV_PF_TX_FRAMES_SENT,
- DRV_TLV_TX_BYTES_SENT,
- DRV_TLV_IOV_OFFLOAD,
- DRV_TLV_PCI_ERRORS_CAP_ID,
- DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
- DRV_TLV_UNCORRECTABLE_ERROR_MASK,
- DRV_TLV_CORRECTABLE_ERROR_STATUS,
- DRV_TLV_CORRECTABLE_ERROR_MASK,
- DRV_TLV_PCI_ERRORS_AECC_REGISTER,
- DRV_TLV_TX_QUEUES_EMPTY,
- DRV_TLV_RX_QUEUES_EMPTY,
- DRV_TLV_TX_QUEUES_FULL,
- DRV_TLV_RX_QUEUES_FULL,
- /* Category 29: FC/FCoE Function Data */
- DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
- DRV_TLV_FCOE_RX_BYTES_RECEIVED,
- DRV_TLV_FCOE_TX_FRAMES_SENT,
- DRV_TLV_FCOE_TX_BYTES_SENT,
- DRV_TLV_CRC_ERROR_COUNT,
- DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_1_TIMESTAMP,
- DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_2_TIMESTAMP,
- DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_3_TIMESTAMP,
- DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_4_TIMESTAMP,
- DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_5_TIMESTAMP,
- DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
- DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
- DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
- DRV_TLV_DISPARITY_ERROR_COUNT,
- DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_RJT,
- DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
- DRV_TLV_FDISCS_SENT_COUNT,
- DRV_TLV_FDISC_ACCS_RECEIVED,
- DRV_TLV_FDISC_RJTS_RECEIVED,
- DRV_TLV_PLOGI_SENT_COUNT,
- DRV_TLV_PLOGI_ACCS_RECEIVED,
- DRV_TLV_PLOGI_RJTS_RECEIVED,
- DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_1_TIMESTAMP,
- DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_2_TIMESTAMP,
- DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_3_TIMESTAMP,
- DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_4_TIMESTAMP,
- DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_5_TIMESTAMP,
- DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
- DRV_TLV_LOGOS_ISSUED,
- DRV_TLV_LOGO_ACCS_RECEIVED,
- DRV_TLV_LOGO_RJTS_RECEIVED,
- DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_1_TIMESTAMP,
- DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_2_TIMESTAMP,
- DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_3_TIMESTAMP,
- DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_4_TIMESTAMP,
- DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_5_TIMESTAMP,
- DRV_TLV_LOGOS_RECEIVED,
- DRV_TLV_ACCS_ISSUED,
- DRV_TLV_PRLIS_ISSUED,
- DRV_TLV_ACCS_RECEIVED,
- DRV_TLV_ABTS_SENT_COUNT,
- DRV_TLV_ABTS_ACCS_RECEIVED,
- DRV_TLV_ABTS_RJTS_RECEIVED,
- DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_1_TIMESTAMP,
- DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_2_TIMESTAMP,
- DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_3_TIMESTAMP,
- DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_4_TIMESTAMP,
- DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_5_TIMESTAMP,
- DRV_TLV_RSCNS_RECEIVED,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
- DRV_TLV_LUN_RESETS_ISSUED,
- DRV_TLV_ABORT_TASK_SETS_ISSUED,
- DRV_TLV_TPRLOS_SENT,
- DRV_TLV_NOS_SENT_COUNT,
- DRV_TLV_NOS_RECEIVED_COUNT,
- DRV_TLV_OLS_COUNT,
- DRV_TLV_LR_COUNT,
- DRV_TLV_LRR_COUNT,
- DRV_TLV_LIP_SENT_COUNT,
- DRV_TLV_LIP_RECEIVED_COUNT,
- DRV_TLV_EOFA_COUNT,
- DRV_TLV_EOFNI_COUNT,
- DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
- DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_BUSY_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
- DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
- DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
- /* Category 30: iSCSI Function Data */
- DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
- DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
- DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
- DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
-};
-
-struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
-};
-
-struct nvm_cfg1_glob {
- u32 generic_cont0;
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-
- u32 engineering_change[3];
- u32 manufacturing_id;
- u32 serial_number[4];
- u32 pcie_cfg;
- u32 mgmt_traffic;
-
- u32 core_cfg;
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
-
- u32 e_lane_cfg1;
- u32 e_lane_cfg2;
- u32 f_lane_cfg1;
- u32 f_lane_cfg2;
- u32 mps10_preemphasis;
- u32 mps10_driver_current;
- u32 mps25_preemphasis;
- u32 mps25_driver_current;
- u32 pci_id;
- u32 pci_subsys_id;
- u32 bar;
- u32 mps10_txfir_main;
- u32 mps10_txfir_post;
- u32 mps25_txfir_main;
- u32 mps25_txfir_post;
- u32 manufacture_ver;
- u32 manufacture_time;
- u32 led_global_settings;
- u32 generic_cont1;
-
- u32 mbi_version;
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date;
- u32 misc_sig;
-
- u32 device_capabilities;
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
-
- u32 power_dissipated;
- u32 power_consumed;
- u32 efi_version;
- u32 multi_net_modes_cap;
- u32 reserved[41];
-};
-
-struct nvm_cfg1_path {
- u32 reserved[30];
-};
-
-struct nvm_cfg1_port {
- u32 rel_to_opt123;
- u32 rel_to_opt124;
-
- u32 generic_cont0;
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
-
- u32 pcie_cfg;
- u32 features;
-
- u32 speed_cap_mask;
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
-
- u32 link_settings;
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
-
- u32 phy_cfg;
- u32 mgmt_traffic;
-
- u32 ext_phy;
- /* EEE power saving mode */
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
-
- u32 mba_cfg1;
- u32 mba_cfg2;
- u32 vf_cfg;
- struct nvm_cfg_mac_address lldp_mac_address;
- u32 led_port_settings;
- u32 transceiver_00;
- u32 device_ids;
-
- u32 board_cfg;
-#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
-#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
-#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
-#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
-
- u32 mnm_10g_cap;
- u32 mnm_10g_ctrl;
- u32 mnm_10g_misc;
- u32 mnm_25g_cap;
- u32 mnm_25g_ctrl;
- u32 mnm_25g_misc;
- u32 mnm_40g_cap;
- u32 mnm_40g_ctrl;
- u32 mnm_40g_misc;
- u32 mnm_50g_cap;
- u32 mnm_50g_ctrl;
- u32 mnm_50g_misc;
- u32 mnm_100g_cap;
- u32 mnm_100g_ctrl;
- u32 mnm_100g_misc;
-
- u32 temperature;
- u32 ext_phy_cfg1;
-
- u32 extended_speed;
-#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
-#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
-
- u32 extended_fec_mode;
-
- u32 reserved[112];
-};
-
-struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address;
- u32 rsrv1;
- u32 rsrv2;
- u32 device_id;
- u32 cmn_cfg;
- u32 pci_cfg;
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
- u32 preboot_generic_cfg;
- u32 reserved[8];
-};
-
-struct nvm_cfg1 {
- struct nvm_cfg1_glob glob;
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
-};
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-#define MCP_TRACE_SIZE 2048 /* 2kb */
-
-/* This section is located at a fixed location in the beginning of the
- * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
- * All the rest of data has a floating location which differs from version to
- * version, and is pointed by the mcp_meta_data below.
- * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
- * with it from nvram in order to clear this portion.
- */
-struct static_init {
- u32 num_sections;
- offsize_t sections[SPAD_SECTION_MAX];
-#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
-
- struct mcp_trace trace;
-#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
- u8 trace_buffer[MCP_TRACE_SIZE];
-#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
- /* running_mfw has the same definition as in nvm_map.h.
- * This bit indicate both the running dir, and the running bundle.
- * It is set once when the LIM is loaded.
- */
- u32 running_mfw;
-#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
- u32 build_time;
-#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
- u32 reset_type;
-#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
- u32 mfw_secure_mode;
-#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
- u16 pme_status_pf_bitmap;
-#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
- u16 pme_enable_pf_bitmap;
-#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
- u32 mim_nvm_addr;
- u32 mim_start_addr;
- u32 ah_pcie_link_params;
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
-#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
-
- u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
-};
-
-#define NVM_MAGIC_VALUE 0x669955aa
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
- NVM_TYPE_INIT_HW = 0x19,
- NVM_TYPE_DEFAULT_CFG = 0x1a,
- NVM_TYPE_MDUMP = 0x1b,
- NVM_TYPE_META = 0x1c,
- NVM_TYPE_ISCSI_CFG = 0x1d,
- NVM_TYPE_FCOE_CFG = 0x1f,
- NVM_TYPE_ETH_PHY_FW1 = 0x20,
- NVM_TYPE_ETH_PHY_FW2 = 0x21,
- NVM_TYPE_BDN = 0x22,
- NVM_TYPE_8485X_PHY_FW = 0x23,
- NVM_TYPE_PUB_KEY = 0x24,
- NVM_TYPE_RECOVERY = 0x25,
- NVM_TYPE_PLDM = 0x26,
- NVM_TYPE_UPK1 = 0x27,
- NVM_TYPE_UPK2 = 0x28,
- NVM_TYPE_MASTER_KC = 0x29,
- NVM_TYPE_BACKUP_KC = 0x2a,
- NVM_TYPE_HW_DUMP = 0x2b,
- NVM_TYPE_HW_DUMP_OUT = 0x2c,
- NVM_TYPE_BIN_NVM_META = 0x30,
- NVM_TYPE_ROM_TEST = 0xf0,
- NVM_TYPE_88X33X0_PHY_FW = 0x31,
- NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
- NVM_TYPE_MAX,
-};
-
-#define DIR_ID_1 (0)
-
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 2734f49956f7..e535983ce21b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask {
#define DMAE_MAX_CLIENTS 32
/**
- * @brief qed_gtt_init - Initialize GTT windows
+ * qed_gtt_init(): Initialize GTT windows.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ * qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return struct _qed_status - success (0), negative - error.
+ * Return: struct _qed_status - success (0), negative - error.
*/
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_free -
+ * qed_ptt_pool_free(): Free PTT pool.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ * qed_ptt_get_bar_addr(): Get PPT's external BAR address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ * qed_ptt_set_win(): Set PTT Window's GRC BAR address
*
- * @param p_hwfn
- * @param new_hw_addr
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @new_hw_addr: New HW address.
+ * @p_ptt: P_Ptt
+ *
+ * Return: Void.
*/
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 new_hw_addr);
/**
- * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ * qed_get_reserved_ptt(): Get a specific reserved PTT.
*
- * @param p_hwfn
- * @param ptt_idx
+ * @p_hwfn: HW device data.
+ * @ptt_idx: Ptt Index.
*
- * @return struct qed_ptt *
+ * Return: struct qed_ptt *.
*/
struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
- * @brief qed_wr - Write value to BAR using the given ptt
+ * qed_wr(): Write value to BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @val: Val.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
void qed_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn,
u32 val);
/**
- * @brief qed_rd - Read value from BAR using the given ptt
+ * qed_rd(): Read value from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
u32 qed_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr);
/**
- * @brief qed_memcpy_from - copy n bytes from BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param dest
- * @param hw_addr
- * @param n
+ * qed_memcpy_from(): Copy n bytes from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @dest: Destination.
+ * @hw_addr: HW address.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
size_t n);
/**
- * @brief qed_memcpy_to - copy n bytes to BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param hw_addr
- * @param src
- * @param n
+ * qed_memcpy_to(): Copy n bytes to BAR using the given ptt
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address.
+ * @src: Source.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
void *src,
size_t n);
/**
- * @brief qed_fid_pretend - pretend to another function when
- * accessing the ptt window. There is no way to unpretend
- * a function. The only way to cancel a pretend is to
- * pretend back to the original function.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param fid - fid field of pxp_pretend structure. Can contain
- * either pf / vf, port/path fields are don't care.
+ * qed_fid_pretend(): pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @fid: fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ *
+ * Return: Void.
*/
void qed_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 fid);
/**
- * @brief qed_port_pretend - pretend to another port when
- * accessing the ptt window
+ * qed_port_pretend(): Pretend to another port when accessing the ptt window
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ *
+ * Return: Void.
*/
void qed_port_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 port_id);
/**
- * @brief qed_port_unpretend - cancel any previously set port
- * pretend
+ * qed_port_unpretend(): Cancel any previously set port pretend
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_port_fid_pretend - pretend to another port and another function
- * when accessing the ptt window
+ * qed_port_fid_pretend(): Pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ * @fid: fid field of pxp_pretend structure. Can contain either pf / vf.
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
- * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ * Return: Void.
*/
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid);
/**
- * @brief qed_vfid_to_concrete - build a concrete FID for a
- * given VF ID
+ * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfid
+ * @p_hwfn: HW device data.
+ * @vfid: VFID.
+ *
+ * Return: Void.
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/**
- * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
- * this is declared here since other files will require it.
- * @param idx
+ * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ *
+ * @idx: Index
+ *
+ * Return: Void.
*/
u32 qed_dmae_idx_to_go_cmd(u8 idx);
/**
- * @brief qed_dmae_info_alloc - Init the dmae_info structure
- * which is part of p_hwfn.
- * @param p_hwfn
+ * qed_dmae_info_alloc(): Init the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
*/
int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_dmae_info_free - Free the dmae_info structure
- * which is part of p_hwfn
+ * qed_dmae_info_free(): Free the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
@@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
#define QED_HW_ERR_MAX_STR_SIZE 256
/**
- * @brief qed_hw_err_notify - Notify upper layer driver and management FW
- * about a HW error.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param err_type
- * @param fmt - debug data buffer to send to the MFW
- * @param ... - buffer format args
+ * qed_hw_err_notify(): Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @err_type: Err Type.
+ * @fmt: Debug data buffer to send to the MFW
+ * @...: buffer format args
+ *
+ * Return void.
*/
void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index ea888a2c6ddb..321c43408153 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/types.h>
@@ -13,17 +13,18 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
-#define CDU_VALIDATION_DEFAULT_CFG 61
+#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
{400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
{528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
{608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
{240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
+/* Initial VOQ byte credit */
+#define QM_INITIAL_VOQ_BYTE_CRD 98304
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
+/* VOQ constants */
+#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
+
/* WFQ constants */
-/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
-#define QM_WFQ_UPPER_BOUND 62500000
+/* PF WFQ increment value, 0x9000 = 4*9*1024 */
+#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_WFQ_UPPER_BOUND 62500000
+
+/* PF WFQ max increment value, 0.7 * upper bound */
+#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
+
+/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
+#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
+
+/* VP WFQ increment value */
+#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
-/* Bit of VOQ in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+/* VP WFQ min increment value */
+#define QM_VP_WFQ_MIN_INC_VAL 10800
-/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+/* VP WFQ max increment value, 2^30 */
+#define QM_VP_WFQ_MAX_INC_VAL 0x40000000
-/* 0x9000 = 4*9*1024 */
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+/* VP WFQ bypass threshold */
+#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
-/* Max WFQ increment value is 0.7 * upper bound */
-#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
+/* VP RL credit task cost */
+#define QM_VP_RL_CRD_TASK_COST 9700
+
+/* Bit of VOQ in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_PF_SHIFT 5
/* RL constants */
@@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps */
-#define QM_RL_INC_VAL(rate) ({ \
- typeof(rate) __rate = (rate); \
- max_t(u32, \
- (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
- (8 * 100)), \
- 1); })
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : \
+ 100000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1); })
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_PF_RL_UPPER_BOUND 62500000
@@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
-/* Vport RL Upper bound, link speed is in Mpbs */
-#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
- QM_RL_INC_VAL(speed), \
- 9700 + 1000))
-
-/* Max Vport RL increment value is the Vport RL upper bound */
-#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
-
-/* Vport RL credit threshold in case of QM bypass */
-#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+/* QCN RL Upper bound, speed is in Mpbs */
+#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
+ u32, \
+ (u32)(((speed) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), \
+ QM_VP_RL_CRD_TASK_COST \
+ + 1000))
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
rl_id, ext_voq, wrr) \
do { \
u32 __reg = 0; \
\
BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
- \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \
+ memset(&(map), 0, sizeof(map)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
!!(rl_valid)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
(wrr)); \
\
STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
@@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(((rl) >> 8) << 9))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
- XSTORM_PQ_INFO_OFFSET(pq_id)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id))
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
/* Enable RLs for all VOQs */
@@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_PF_WFQ_UPPER_BOUND);
}
/* Prepare global RL enable/disable runtime init values */
@@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_VP_RL_BYPASS_THRESH_SPEED);
+ QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
}
}
@@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_VP_WFQ_BYPASS_THRESH);
}
/* Prepare runtime init values to allocate PBF command queue lines for
@@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
}
/* Prepare runtime init values to allocate PBF command queue lines. */
-static void qed_cmdq_lines_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u8 tc, ext_voq, port_id, num_tcs_in_port;
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
/* Clear PBF lines of all VOQs */
for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
@@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init(
* - No optimization for lossy TC (all are considered lossless). Shared space
* is not enabled and allocated for each TC.
*/
-static void qed_btb_blocks_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
u8 tc, ext_voq, port_id, num_tcs_in_port;
@@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init(
*/
static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
{
- u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
(u32)QM_RL_CRD_REG_SIGN_BIT;
u32 inc_val;
u16 rl_id;
@@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Returns the upper bound for the specified Vport RL parameters.
+ * link_speed is in Mbps.
+ * Returns 0 in case of error.
+ */
+static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
+ u32 link_speed)
+{
+ switch (vport_rl_type) {
+ case QM_RL_TYPE_NORMAL:
+ return QM_INITIAL_VOQ_BYTE_CRD;
+ case QM_RL_TYPE_QCN:
+ return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
+ default:
+ return 0;
+ }
+}
+
+/* Prepare VPORT RL runtime init values.
+ * Return -1 on error.
+ */
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u16 start_rl,
+ u16 num_rls,
+ u32 link_speed,
+ struct init_qm_rl_params *rl_params)
+{
+ u16 i, rl_id;
+
+ if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
+ u32 upper_bound, inc_val;
+
+ upper_bound =
+ qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
+ rl_params[i].vport_rl_type,
+ link_speed);
+
+ inc_val =
+ QM_RL_INC_VAL(rl_params[i].vport_rl ?
+ rl_params[i].vport_rl : link_speed);
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn,
+ "Invalid RL rate - limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
struct init_qm_vport_params *vport_params = p_params->vport_params;
@@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u16 *p_first_tx_pq_id, vport_id_in_pf;
- struct qm_rf_pq_map_e4 tx_pq_map;
+ struct qm_rf_pq_map tx_pq_map;
u8 tc_id = pq_params[i].tc_id;
bool is_vf_pq;
u8 ext_voq;
@@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
&vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
u32 map_val =
- (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
+ (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
/* Create new VP PQ */
*p_first_tx_pq_id = pq_id;
@@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
- E4,
pq_id,
*p_first_tx_pq_id,
pq_params[i].rl_valid,
@@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
+
+ return 0;
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
-
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
@@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 ext_voq;
u16 i;
- inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
@@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id, i;
+ u16 vport_pq_id, wfq, i;
u32 inc_val;
u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].wfq)
- continue;
-
- inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration\n");
- return -1;
- }
-
/* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ /* Check if VPORT/TC is valid */
vport_pq_id = vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
+ if (vport_pq_id == QM_INVALID_PQ_ID)
+ continue;
+
+ /* Find WFQ weight (per VPORT or per VPORT+TC) */
+ wfq = vport_params[i].wfq;
+ wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
}
+
+ /* Config registers */
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
+ vport_pq_id,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
@@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
QM_OPPOR_LINE_VOQ_DEF);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
+ p_params->pf_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
+ p_params->vport_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
+ p_params->pf_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
- p_params->global_rl_en);
+ p_params->global_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
SET_FIELD(mask,
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
@@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u16 i;
u8 tc;
-
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
@@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
p_params->num_tids, 0);
/* Map Tx PQs */
- qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
+ return -1;
/* Init PF WFQ */
if (p_params->pf_wfq)
@@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
+ p_params->num_rls, p_params->link_speed,
+ p_params->rl_params))
+ return -1;
+
return 0;
}
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
+ int result = 0;
u16 vport_pq_id;
- u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
+ vport_pq_id, wfq);
+ }
+
+ return result;
+}
+
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 wfq)
+{
+ u32 inc_val;
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID)
+ return -1;
+
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
- /* A VPORT can have several VPORT PQ IDs for various TCs */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- vport_pq_id = first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
- }
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
+ inc_val);
return 0;
}
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type)
{
- u32 inc_val;
+ u32 inc_val, upper_bound;
+ upper_bound =
+ (vport_rl_type ==
+ QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
+ QM_INITIAL_VOQ_BYTE_CRD;
inc_val = QM_RL_INC_VAL(rate_limit);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
- DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
return -1;
}
qed_wr(p_hwfn, p_ptt,
QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
@@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
__le32 *p_data, u32 addr, u32 len_in_dwords)
{
- struct qed_dmae_params params = {};
+ struct qed_dmae_params params = { 0 };
u32 *data_cpu;
int rc;
@@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
+ eth_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
+ ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- u8 shift;
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
+ eth_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
+ ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
/* Update DORQ registers */
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
ip_geneve_enable ? 1 : 0);
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
/* update PRS FIC register */
qed_wr(p_hwfn,
p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
} else {
/* clear VXLAN_NO_L2_ENABLE flag */
@@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- struct regpair ram_line = { };
+ struct regpair ram_line = { 0 };
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
storm_buf_size = GET_FIELD(hdr->data,
FW_OVERLAY_BUF_HDR_BUF_SIZE);
storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ if (storm_id >= NUM_STORMS)
+ break;
storm_mem_desc = allocated_mem + storm_id;
storm_mem_desc->size = storm_buf_size * sizeof(u32);
@@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
/* If memory allocation has failed, free all allocated memory */
if (buf_offset < buf_size) {
- qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
return NULL;
}
@@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
}
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem)
+ struct phys_mem_desc **fw_overlay_mem)
{
u8 storm_id;
- if (!fw_overlay_mem)
+ if (!fw_overlay_mem || !(*fw_overlay_mem))
return;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
struct phys_mem_desc *storm_mem_desc =
- (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
/* Free Storm's physical memory */
if (storm_mem_desc->virt_addr)
@@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
}
/* Free allocated virtual memory */
- kfree(fw_overlay_mem);
+ kfree(*fw_overlay_mem);
+ *fw_overlay_mem = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 7e6c6389523b..b3bf9899c1a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -15,6 +15,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
@@ -46,30 +47,32 @@ static u32 pxp_global_win[] = {
/* IRO Array */
static const u32 iro_arr[] = {
0x00000000, 0x00000000, 0x00080000,
+ 0x00004478, 0x00000008, 0x00080000,
0x00003288, 0x00000088, 0x00880000,
- 0x000058e8, 0x00000020, 0x00200000,
+ 0x000058a8, 0x00000020, 0x00200000,
+ 0x00003188, 0x00000008, 0x00080000,
0x00000b00, 0x00000008, 0x00040000,
0x00000a80, 0x00000008, 0x00040000,
0x00000000, 0x00000008, 0x00020000,
0x00000080, 0x00000008, 0x00040000,
0x00000084, 0x00000008, 0x00020000,
- 0x00005718, 0x00000004, 0x00040000,
- 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00005798, 0x00000004, 0x00040000,
+ 0x00004e50, 0x00000000, 0x00780000,
0x00003e40, 0x00000000, 0x00780000,
- 0x00004480, 0x00000000, 0x00780000,
+ 0x00004500, 0x00000000, 0x00780000,
0x00003210, 0x00000000, 0x00780000,
0x00003b50, 0x00000000, 0x00780000,
0x00007f58, 0x00000000, 0x00780000,
- 0x00005f58, 0x00000000, 0x00080000,
+ 0x00005fd8, 0x00000000, 0x00080000,
0x00007100, 0x00000000, 0x00080000,
- 0x0000aea0, 0x00000000, 0x00080000,
+ 0x0000af20, 0x00000000, 0x00080000,
0x00004398, 0x00000000, 0x00080000,
0x0000a5a0, 0x00000000, 0x00080000,
0x0000bde8, 0x00000000, 0x00080000,
0x00000020, 0x00000004, 0x00040000,
- 0x000056c8, 0x00000010, 0x00100000,
+ 0x00005688, 0x00000010, 0x00100000,
0x0000c210, 0x00000030, 0x00300000,
- 0x0000b088, 0x00000038, 0x00380000,
+ 0x0000b108, 0x00000038, 0x00380000,
0x00003d20, 0x00000080, 0x00400000,
0x0000bf60, 0x00000000, 0x00040000,
0x00004560, 0x00040080, 0x00040000,
@@ -77,11 +80,11 @@ static const u32 iro_arr[] = {
0x00003d60, 0x00000080, 0x00200000,
0x00008960, 0x00000040, 0x00300000,
0x0000e840, 0x00000060, 0x00600000,
- 0x00004618, 0x00000080, 0x00380000,
- 0x00010738, 0x000000c0, 0x00c00000,
+ 0x00004698, 0x00000080, 0x00380000,
+ 0x000107b8, 0x000000c0, 0x00c00000,
0x000001f8, 0x00000002, 0x00020000,
- 0x0000a2a0, 0x00000000, 0x01080000,
- 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x0000a260, 0x00000000, 0x01080000,
+ 0x0000a368, 0x00000008, 0x00080000,
0x000001c0, 0x00000008, 0x00080000,
0x000001f8, 0x00000008, 0x00080000,
0x00000ac0, 0x00000008, 0x00080000,
@@ -90,39 +93,46 @@ static const u32 iro_arr[] = {
0x00000280, 0x00000008, 0x00080000,
0x00000680, 0x00080018, 0x00080000,
0x00000b78, 0x00080018, 0x00020000,
- 0x0000c640, 0x00000050, 0x003c0000,
- 0x00012038, 0x00000018, 0x00100000,
- 0x00011b00, 0x00000040, 0x00180000,
- 0x000095d0, 0x00000050, 0x00200000,
+ 0x0000c600, 0x00000058, 0x003c0000,
+ 0x00012038, 0x00000020, 0x00100000,
+ 0x00011b00, 0x00000048, 0x00180000,
+ 0x00009650, 0x00000050, 0x00200000,
0x00008b10, 0x00000040, 0x00280000,
- 0x00011640, 0x00000018, 0x00100000,
- 0x0000c828, 0x00000048, 0x00380000,
- 0x00011710, 0x00000020, 0x00200000,
- 0x00004650, 0x00000080, 0x00100000,
+ 0x000116c0, 0x00000018, 0x00100000,
+ 0x0000c808, 0x00000048, 0x00380000,
+ 0x00011790, 0x00000020, 0x00200000,
+ 0x000046d0, 0x00000080, 0x00100000,
0x00003618, 0x00000010, 0x00100000,
- 0x0000a968, 0x00000008, 0x00010000,
+ 0x0000a9e8, 0x00000008, 0x00010000,
0x000097a0, 0x00000008, 0x00010000,
- 0x00011990, 0x00000008, 0x00010000,
- 0x0000f018, 0x00000008, 0x00010000,
- 0x00012628, 0x00000008, 0x00010000,
- 0x00011da8, 0x00000008, 0x00010000,
- 0x0000aa78, 0x00000030, 0x00100000,
- 0x0000d768, 0x00000028, 0x00280000,
- 0x00009a58, 0x00000018, 0x00180000,
- 0x00009bd8, 0x00000008, 0x00080000,
- 0x00013a18, 0x00000008, 0x00080000,
- 0x000126e8, 0x00000018, 0x00180000,
- 0x0000e608, 0x00500288, 0x00100000,
- 0x00012970, 0x00000138, 0x00280000,
+ 0x00011a10, 0x00000008, 0x00010000,
+ 0x0000e9f8, 0x00000008, 0x00010000,
+ 0x00012648, 0x00000008, 0x00010000,
+ 0x000121c8, 0x00000008, 0x00010000,
+ 0x0000af08, 0x00000030, 0x00100000,
+ 0x0000d748, 0x00000028, 0x00280000,
+ 0x00009e68, 0x00000018, 0x00180000,
+ 0x00009fe8, 0x00000008, 0x00080000,
+ 0x00013ea8, 0x00000008, 0x00080000,
+ 0x00012f18, 0x00000018, 0x00180000,
+ 0x0000dfe8, 0x00500288, 0x00100000,
+ 0x000131a0, 0x00000138, 0x00280000,
};
void qed_init_iro_array(struct qed_dev *cdev)
{
- cdev->iro_arr = iro_arr;
+ cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u!\n",
+ val, rt_offset);
+ return;
+ }
+
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
{
size_t i;
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u!\n",
+ rt_offset,
+ (u32)(rt_offset + size - 1));
+ return;
+ }
+
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
return rc;
/* invalidate after writing */
- for (j = i; j < i + segment; j++)
+ for (j = i; j < (u32)(i + segment); j++)
p_valid[j] = false;
/* Jump over the entire segment, including invalid entry */
@@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr, u32 fill, u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
struct qed_dmae_params params = {};
@@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = le32_to_cpu(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
-
val = qed_rd(p_hwfn, p_ptt, addr);
if (poll == INIT_POLL_NONE)
@@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
@@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ cmd_num += qed_init_cmd_phase(&cmd->if_phase,
phase, phase_id);
break;
case INIT_OP_DELAY:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index a573c8921982..12e5c4e370d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -12,23 +12,24 @@
#include "qed.h"
/**
- * @brief qed_init_iro_array - init iro_arr.
+ * qed_init_iro_array(): init iro_arr.
*
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_iro_array(struct qed_dev *cdev);
/**
- * @brief qed_init_run - Run the init-sequence.
+ * qed_init_run(): Run the init-sequence.
*
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @phase: Phase.
+ * @phase_id: Phase ID.
+ * @modes: Mode.
*
- * @param p_hwfn
- * @param p_ptt
- * @param phase
- * @param phase_id
- * @param modes
- * @return _qed_status_t
+ * Return: _qed_status_t
*/
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
int modes);
/**
- * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ * qed_init_alloc(): Allocate RT array, Store 'values' ptrs.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return _qed_status_t
+ * Return: _qed_status_t.
*/
int qed_init_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_hwfn_deallocate
+ * qed_init_free(): Init HW function deallocate.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ * qed_init_store_rt_reg(): Store a configuration value in the RT array.
*
+ * @p_hwfn: HW device data.
+ * @rt_offset: RT offset.
+ * @val: Val.
*
- * @param p_hwfn
- * @param rt_offset
- * @param val
+ * Return: Void.
*/
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
@@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
#define OVERWRITE_RT_REG(hwfn, offset, val) \
qed_init_store_rt_reg(hwfn, offset, val)
-/**
- * @brief
- *
- *
- * @param p_hwfn
- * @param rt_offset
- * @param val
- * @param size
- */
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
- qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
/**
- * @brief
- * Initialize GTT global windows and set admin window
- * related params of GTT/PTT to default values.
+ * qed_gtt_init(): Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f78e6055f654..a97f691839e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -36,7 +36,7 @@ struct qed_sb_sp_info {
struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
enum qed_attention_type {
@@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
else
SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
- sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ sb_offset = igu_sb_id * PIS_PER_SB;
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c5550e96bbe1..84c17e97f569 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -53,51 +53,54 @@ enum qed_coalescing_fsm {
};
/**
- * @brief qed_int_igu_enable_int - enable device interrupts
+ * qed_int_igu_enable_int(): Enable device interrupts.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode - interrupt mode to use
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrupt mode to use.
+ *
+ * Return: Void.
*/
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief qed_int_igu_disable_int - disable device interrupts
+ * qed_int_igu_disable_int(): Disable device interrupts.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
- * register from igu.
+ * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc
+ * register from igu.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u64
+ * Return: u64.
*/
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
#define QED_SP_SB_ID 0xffff
/**
- * @brief qed_int_sb_init - Initializes the sb_info structure.
+ * qed_int_sb_init(): Initializes the sb_info structure.
*
- * once the structure is initialized it can be passed to sb related functions.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: points to an uninitialized (but allocated) sb_info structure
+ * @sb_virt_addr: SB Virtual address.
+ * @sb_phy_addr: SB Physial address.
+ * @sb_id: the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info points to an uninitialized (but
- * allocated) sb_info structure
- * @param sb_virt_addr
- * @param sb_phy_addr
- * @param sb_id the sb_id to be used (zero based in driver)
- * should use QED_SP_SB_ID for SP Status block
+ * Return: int.
*
- * @return int
+ * Once the structure is initialized it can be passed to sb related functions.
*/
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr,
u16 sb_id);
/**
- * @brief qed_int_sb_setup - Setup the sb.
+ * qed_int_sb_setup(): Setup the sb.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: Initialized sb_info structure.
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info initialized sb_info structure
+ * Return: Void.
*/
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info);
/**
- * @brief qed_int_sb_release - releases the sb_info structure.
+ * qed_int_sb_release(): Releases the sb_info structure.
*
- * once the structure is released, it's memory can be freed
+ * @p_hwfn: HW device data.
+ * @sb_info: Points to an allocated sb_info structure.
+ * @sb_id: The sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block).
*
- * @param p_hwfn
- * @param sb_info points to an allocated sb_info structure
- * @param sb_id the sb_id to be used (zero based in driver)
- * should never be equal to QED_SP_SB_ID
- * (SP Status block)
+ * Return: int.
*
- * @return int
+ * Once the structure is released, it's memory can be freed.
*/
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info,
u16 sb_id);
/**
- * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
- * default status block.
+ * qed_int_sp_dpc(): To be called when an interrupt is received on the
+ * default status block.
*
- * @param p_hwfn - pointer to hwfn
+ * @t: Tasklet.
+ *
+ * Return: Void.
*
*/
void qed_int_sp_dpc(struct tasklet_struct *t);
/**
- * @brief qed_int_get_num_sbs - get the number of status
- * blocks configured for this funciton in the igu.
+ * qed_int_get_num_sbs(): Get the number of status blocks configured
+ * for this funciton in the igu.
*
- * @param p_hwfn
- * @param p_sb_cnt_info
+ * @p_hwfn: HW device data.
+ * @p_sb_cnt_info: Pointer to SB count info.
*
- * @return int - number of status blocks configured
+ * Return: Void.
*/
void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
struct qed_sb_cnt_info *p_sb_cnt_info);
/**
- * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * qed_int_disable_post_isr_release(): Performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
+ * Return: Void.
*/
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
- * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * qed_int_attn_clr_enable: Sets whether the general behavior is
* preventing attentions from being reasserted, or following the
* attributes of the specific attention.
*
- * @param cdev
- * @param clr_enable
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable
+ *
+ * Return: Void.
*
*/
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief - Doorbell Recovery handler.
+ * qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
@@ -223,30 +235,34 @@ struct qed_igu_info {
};
/**
- * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources
+ * provided by MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into
+ * an IGU sb-id
*
- * @param p_hwfn
- * @param sb_id - user provided sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: user provided sb_id.
*
- * @return an index inside IGU CAM where the SB resides
+ * Return: An index inside IGU CAM where the SB resides.
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief return a pointer to an unused valid SB
+ * qed_get_igu_free_sb(): Return a pointer to an unused valid SB
*
- * @param p_hwfn
- * @param b_is_pf - true iff we want a SB belonging to a PF
+ * @p_hwfn: HW device data.
+ * @b_is_pf: True iff we want a SB belonging to a PF.
*
- * @return point to an igu_block, NULL if none is available
+ * Return: Point to an igu_block, NULL if none is available.
*/
struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
bool b_is_pf);
@@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * qed_int_igu_read_cam(): Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_int_register_cb - Register callback func for
- * slowhwfn statusblock.
- *
- * Every protocol that uses the slowhwfn status block
- * should register a callback function that will be called
- * once there is an update of the sp status block.
- *
- * @param p_hwfn
- * @param comp_cb - function to be called when there is an
- * interrupt on the sp sb
- *
- * @param cookie - passed to the callback function
- * @param sb_idx - OUT parameter which gives the chosen index
- * for this protocol.
- * @param p_fw_cons - pointer to the actual address of the
- * consumer for this protocol.
- *
- * @return int
+ * qed_int_register_cb(): Register callback func for slowhwfn statusblock.
+ *
+ * @p_hwfn: HW device data.
+ * @comp_cb: Function to be called when there is an
+ * interrupt on the sp sb
+ * @cookie: Passed to the callback function
+ * @sb_idx: (OUT) parameter which gives the chosen index
+ * for this protocol.
+ * @p_fw_cons: Pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * Return: Int.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
*/
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
@@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons);
/**
- * @brief qed_int_unregister_cb - Unregisters callback
- * function from sp sb.
- * Partner of qed_int_register_cb -> should be called
- * when no longer required.
+ * qed_int_unregister_cb(): Unregisters callback function from sp sb.
+ *
+ * @p_hwfn: HW device data.
+ * @pi: Producer Index.
*
- * @param p_hwfn
- * @param pi
+ * Return: Int.
*
- * @return int
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
*/
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
u8 pi);
/**
- * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ * qed_int_get_sp_sb_id(): Get the slowhwfn sb id.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u16
+ * Return: u16.
*/
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param igu_sb_id - igu status block id
- * @param opaque - opaque fid of the sb owner.
- * @param b_set - set(1) / clear(0)
+ * qed_int_igu_init_pure_rt_single(): Status block cleanup.
+ * Should be called for each status
+ * block that will be used -> both PF / VF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @igu_sb_id: IGU status block id.
+ * @opaque: Opaque fid of the sb owner.
+ * @b_set: Set(1) / Clear(0).
+ *
+ * Return: Void.
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
bool b_set);
/**
- * @brief qed_int_cau_conf - configure cau for a given status
- * block
- *
- * @param p_hwfn
- * @param ptt
- * @param sb_phys
- * @param igu_sb_id
- * @param vf_number
- * @param vf_valid
+ * qed_int_cau_conf_sb(): Configure cau for a given status block.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_phys: SB Physical.
+ * @igu_sb_id: IGU status block id.
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
+ *
+ * Return: Void.
*/
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid);
/**
- * @brief qed_int_alloc
+ * qed_int_alloc(): QED interrupt alloc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_free
+ * qed_int_free(): QED interrupt free.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_int_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_setup
+ * qed_int_setup(): QED interrupt setup.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Enable Interrupt & Attention for hw function
+ * qed_int_igu_enable(): Enable Interrupt & Attention for hw function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrut mode
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief - Initialize CAU status block entry
+ * qed_init_cau_sb_entry(): Initialize CAU status block entry.
+ *
+ * @p_hwfn: HW device data.
+ * @p_sb_entry: Pointer SB entry.
+ * @pf_id: PF number
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
*
- * @param p_hwfn
- * @param p_sb_entry
- * @param pf_id
- * @param vf_number
- * @param vf_valid
+ * Return: Void.
*/
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
new file mode 100644
index 000000000000..3ccdd3b1d8cb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_IRO_HSI_H
+#define _QED_IRO_HSI_H
+
+#include <linux/types.h>
+
+enum {
+ IRO_YSTORM_FLOW_CONTROL_MODE_GTT,
+ IRO_PSTORM_PKT_DUPLICATION_CFG,
+ IRO_TSTORM_PORT_STAT,
+ IRO_TSTORM_LL2_PORT_STAT,
+ IRO_TSTORM_PKT_DUPLICATION_CFG,
+ IRO_USTORM_VF_PF_CHANNEL_READY_GTT,
+ IRO_USTORM_FLR_FINAL_ACK_GTT,
+ IRO_USTORM_EQE_CONS_GTT,
+ IRO_USTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_USTORM_COMMON_QUEUE_CONS_GTT,
+ IRO_XSTORM_PQ_INFO,
+ IRO_XSTORM_INTEG_TEST_DATA,
+ IRO_YSTORM_INTEG_TEST_DATA,
+ IRO_PSTORM_INTEG_TEST_DATA,
+ IRO_TSTORM_INTEG_TEST_DATA,
+ IRO_MSTORM_INTEG_TEST_DATA,
+ IRO_USTORM_INTEG_TEST_DATA,
+ IRO_XSTORM_OVERLAY_BUF_ADDR,
+ IRO_YSTORM_OVERLAY_BUF_ADDR,
+ IRO_PSTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_OVERLAY_BUF_ADDR,
+ IRO_MSTORM_OVERLAY_BUF_ADDR,
+ IRO_USTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_LL2_RX_PRODS_GTT,
+ IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_USTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT,
+ IRO_MSTORM_QUEUE_STAT,
+ IRO_MSTORM_TPA_TIMEOUT_US,
+ IRO_MSTORM_ETH_VF_PRODS,
+ IRO_MSTORM_ETH_PF_PRODS_GTT,
+ IRO_MSTORM_ETH_PF_STAT,
+ IRO_USTORM_QUEUE_STAT,
+ IRO_USTORM_ETH_PF_STAT,
+ IRO_PSTORM_QUEUE_STAT,
+ IRO_PSTORM_ETH_PF_STAT,
+ IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT,
+ IRO_TSTORM_ETH_PRS_INPUT,
+ IRO_ETH_RX_RATE_LIMIT,
+ IRO_TSTORM_ETH_RSS_UPDATE_GTT,
+ IRO_XSTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_YSTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_GRQ_PROD,
+ IRO_TSTORM_SCSI_CMDQ_CONS_GTT,
+ IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_TSTORM_ISCSI_RX_STATS,
+ IRO_MSTORM_ISCSI_RX_STATS,
+ IRO_USTORM_ISCSI_RX_STATS,
+ IRO_XSTORM_ISCSI_TX_STATS,
+ IRO_YSTORM_ISCSI_TX_STATS,
+ IRO_PSTORM_ISCSI_TX_STATS,
+ IRO_TSTORM_FCOE_RX_STATS,
+ IRO_PSTORM_FCOE_TX_STATS,
+ IRO_PSTORM_RDMA_QUEUE_STAT,
+ IRO_TSTORM_RDMA_QUEUE_STAT,
+ IRO_XSTORM_RDMA_ASSERT_LEVEL,
+ IRO_YSTORM_RDMA_ASSERT_LEVEL,
+ IRO_PSTORM_RDMA_ASSERT_LEVEL,
+ IRO_TSTORM_RDMA_ASSERT_LEVEL,
+ IRO_MSTORM_RDMA_ASSERT_LEVEL,
+ IRO_USTORM_RDMA_ASSERT_LEVEL,
+ IRO_XSTORM_IWARP_RXMIT_STATS,
+ IRO_TSTORM_ROCE_EVENTS_STAT,
+ IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS,
+ IRO_YSTORM_ROCE_ERROR_STATS,
+ IRO_PSTORM_ROCE_DCQCN_SENT_STATS,
+ IRO_USTORM_ROCE_CQE_STATS,
+};
+
+/* Pstorm LiteL2 queue statistics */
+
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \
+ + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size)
+
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[IRO_ETH_RX_RATE_LIMIT].base \
+ + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \
+ (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \
+ + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1))
+#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone
+ * size mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[IRO_MSTORM_ETH_VF_PRODS].base \
+ + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \
+ + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size)
+
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_MSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1))
+#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size)
+
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \
+ + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size)
+
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_FCOE_TX_STATS].base \
+ + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size)
+
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg
+ * data type.
+ */
+#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1))
+#define PSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_PSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size)
+
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size)
+
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \
+ + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1))
+#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size)
+
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_FCOE_RX_STATS].base \
+ + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size)
+
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size)
+
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_LL2_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size)
+
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \
+ + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1))
+#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base)
+
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Tstorm LL2 packet duplication configuration.
+ * Use tstorm_pkt_dup_cfg data type.
+ */
+#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1))
+#define TSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size)
+
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \
+ + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \
+ + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1))
+#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1))
+#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_EQE_CONS_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1))
+#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1))
+#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size)
+
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_USTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_USTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size)
+
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size)
+
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_USTORM_ROCE_CQE_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_USTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_TOE_GRQ_PROD].base \
+ + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size)
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \
+ + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1))
+#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \
+ (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size)
+
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \
+ + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[IRO_XSTORM_PQ_INFO].base \
+ + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size)
+
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base)
+#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size)
+
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size)
+
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_YSTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size)
+
+/* Per-chip offsets in iro_arr in dwords */
+#define E4_IRO_ARR_OFFSET 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index db926d8b3033..511ab214eb9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -29,6 +29,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
@@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index dab7a5d09f87..dec2b00259d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
- * @brief - Fills provided statistics struct with statistics.
+ * qed_get_protocol_stats_iscsi(): Fills provided statistics
+ * struct with statistics.
*
- * @param cdev
- * @param stats - points to struct that will be filled with statistics.
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 186d0048a9d1..1d1d4caad680 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
+ p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index dfaf10edfabf..2edd6bf64a3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -28,6 +28,7 @@
#include "qed_dev_api.h"
#include <linux/qed/qed_eth_if.h>
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_l2.h"
@@ -37,7 +38,6 @@
#include "qed_sp.h"
#include "qed_sriov.h"
-
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
@@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
{
u32 init_prod_val = 0;
- *pp_prod = p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
@@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
{
int rc;
-
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
qed_get_cm_pq_idx_mcos(p_hwfn, tc));
@@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params)
{
- struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0;
@@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
}
rc = qed_sp_init_request(p_hwfn, &p_ent,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
-static int qed_configure_filter(struct qed_dev *cdev,
- struct qed_filter_params *params)
-{
- enum qed_filter_rx_mode_type accept_flags;
-
- switch (params->type) {
- case QED_FILTER_TYPE_UCAST:
- return qed_configure_filter_ucast(cdev, &params->filter.ucast);
- case QED_FILTER_TYPE_MCAST:
- return qed_configure_filter_mcast(cdev, &params->filter.mcast);
- case QED_FILTER_TYPE_RX_MODE:
- accept_flags = params->filter.accept_flags;
- return qed_configure_filter_rx_mode(cdev, accept_flags);
- default:
- DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
- return -EINVAL;
- }
-}
-
static int qed_configure_arfs_searcher(struct qed_dev *cdev,
enum qed_filter_config_mode mode)
{
@@ -2867,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
-static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac)
{
int i, ret;
@@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.q_rx_stop = &qed_stop_rxq,
.q_tx_start = &qed_start_txq,
.q_tx_stop = &qed_stop_txq,
- .filter_config = &qed_configure_filter,
+ .filter_config_rx_mode = &qed_configure_filter_rx_mode,
+ .filter_config_ucast = &qed_configure_filter_ucast,
+ .filter_config_mcast = &qed_configure_filter_mcast,
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8eceeebb1a7b..a538cf478c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -92,18 +92,18 @@ struct qed_filter_mcast {
};
/**
- * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
+ * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
*
- * @param p_hwfn
- * @param p_rxq Handler of queue to close
- * @param eq_completion_only If True completion will be on
- * EQe, if False completion will be
- * on EQe if p_hwfn opaque
- * different from the RXQ opaque
- * otherwise on CQe.
- * @param cqe_completion If True completion will be
- * receive on CQe.
- * @return int
+ * @p_hwfn: HW device data.
+ * @p_rxq: Handler of queue to close
+ * @eq_completion_only: If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @cqe_completion: If True completion will be receive on CQe.
+ *
+ * Return: Int.
*/
int
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
@@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
bool eq_completion_only, bool cqe_completion);
/**
- * @brief qed_eth_tx_queue_stop - closes a Tx queue
+ * qed_eth_tx_queue_stop(): Closes a Tx queue.
*
- * @param p_hwfn
- * @param p_txq - handle to Tx queue needed to be closed
+ * @p_hwfn: HW device data.
+ * @p_txq: handle to Tx queue needed to be closed.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
@@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
-
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
@@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_vport_stop -
- *
- * This ramrod closes a VPort after all its RX and TX queues are terminated.
- * An Assert is generated if any queues are left open.
+ * qed_sp_vport_stop: This ramrod closes a VPort after all its
+ * RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
*
- * @param p_hwfn
- * @param opaque_fid
- * @param vport_id VPort ID
+ * @p_hwfn: HW device data.
+ * @opaque_fid: Opaque FID
+ * @vport_id: VPort ID.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
@@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_rx_eth_queues_update -
+ * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
+ * It is used for setting the active state
+ * of the queue and updating the TPA and
+ * SGE parameters.
+ * @p_hwfn: HW device data.
+ * @pp_rxq_handlers: An array of queue handlers to be updated.
+ * @num_rxqs: number of queues to update.
+ * @complete_cqe_flg: Post completion to the CQE Ring if set.
+ * @complete_event_flg: Post completion to the Event Ring if set.
+ * @comp_mode: Comp mode.
+ * @p_comp_data: Pointer Comp data.
*
- * This ramrod updates an RX queue. It is used for setting the active state
- * of the queue and updating the TPA and SGE parameters.
+ * Return: Int.
*
- * @note At the moment - only used by non-linux VFs.
- *
- * @param p_hwfn
- * @param pp_rxq_handlers An array of queue handlers to be updated.
- * @param num_rxqs number of queues to update.
- * @param complete_cqe_flg Post completion to the CQE Ring if set
- * @param complete_event_flg Post completion to the Event Ring if set
- * @param comp_mode
- * @param p_comp_data
- *
- * @return int
+ * Note At the moment - only used by non-linux VFs.
*/
int
@@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
/**
- * *@brief qed_arfs_mode_configure -
- *
- **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
- **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ * qed_arfs_mode_configure(): Enable or disable rfs mode.
+ * It must accept at least one of tcp or udp true
+ * and at least one of ipv4 or ipv6 true to enable
+ * rfs mode.
*
- **@param p_hwfn
- **@param p_ptt
- **@param p_cfg_params - arfs mode configuration parameters.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_cfg_params: arfs mode configuration parameters.
*
+ * Return. Void.
*/
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params);
/**
- * @brief - qed_configure_rfs_ntuple_filter
+ * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
+ * or remove arfs hw filter
*
- * This ramrod should be used to add or remove arfs hw filter
+ * @p_hwfn: HW device data.
+ * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @p_params: Pointer to params.
*
- * @params p_hwfn
- * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
- * it with cookie and callback function address, if not
- * using this mode then client must pass NULL.
- * @params p_params
+ * Return: Void.
*/
int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
@@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue, when queue_cid is already prepared
+ * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param bd_max_bytes
- * @param bd_chain_phys_addr
- * @param cqe_pbl_addr
- * @param cqe_pbl_size
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @bd_max_bytes: Max bytes.
+ * @bd_chain_phys_addr: Chain physcial address.
+ * @cqe_pbl_addr: PBL address.
+ * @cqe_pbl_size: PBL size.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
@@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue, where queue_cid is already prepared
+ * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param pbl_addr
- * @param pbl_size
- * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL size.
+ * @pq_id: Parameters for choosing the PQ for this Tx queue.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c46a7f756ed5..ed274f033626 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -28,6 +28,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_ll2.h"
@@ -43,6 +44,8 @@
#define QED_LL2_TX_SIZE (256)
#define QED_LL2_RX_SIZE (4096)
+#define QED_LL2_INVALID_STATS_ID 0xff
+
struct qed_cb_ll2_info {
int rx_cnt;
u32 rx_size;
@@ -62,6 +65,29 @@ struct qed_ll2_buffer {
dma_addr_t phys_addr;
};
+static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
+ u8 ll2_queue_type, u8 qid)
+{
+ u8 stats_id;
+
+ /* For legacy (RAM based) queues, the stats_id will be set as the
+ * queue_id. Otherwise (context based queue), it will be set to
+ * the "abs_pf_id" offset from the end of the RAM based queue IDs.
+ * If the final value exceeds the total counters amount, return
+ * INVALID value to indicate that the stats for this connection should
+ * be disabled.
+ */
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ stats_id = qid;
+ else
+ stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
+
+ if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
+ return stats_id;
+ else
+ return QED_LL2_INVALID_STATS_ID;
+}
+
static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
@@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
}
static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
- struct qed_ll2_buffer *buffer)
+ struct qed_ll2_buffer *buffer)
{
spin_lock_bh(&cdev->ll2->lock);
@@ -352,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
num_bds_in_packet = p_pkt->bd_used;
list_del(&p_pkt->list_entry);
- if (num_bds < num_bds_in_packet) {
+ if (unlikely(num_bds < num_bds_in_packet)) {
DP_NOTICE(p_hwfn,
"Rest of BDs does not cover whole packet\n");
goto out;
@@ -462,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if (!p_pkt) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"[%d] LL2 Rx completion but active_descq is empty\n",
p_ll2_conn->input.conn_type);
@@ -475,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
else
qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
- if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
@@ -597,18 +623,18 @@ static bool
qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
struct core_rx_slow_path_cqe *p_cqe)
{
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u32 cid;
if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
return false;
- iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
- if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
return false;
/* Need to make a flush */
- cid = le32_to_cpu(iscsi_ooo->cid);
+ cid = le32_to_cpu(ooo_opq->cid);
qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
return true;
@@ -624,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
struct qed_ooo_buffer *p_buffer;
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u8 placement_offset = 0;
u8 cqe_type;
@@ -645,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
&cqe->rx_cqe_sp))
continue;
- if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
cqe_type);
@@ -657,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
packet_length = le16_to_cpu(p_cqe_fp->packet_length);
vlan = le16_to_cpu(p_cqe_fp->vlan);
- iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
- qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
- iscsi_ooo);
- cid = le32_to_cpu(iscsi_ooo->cid);
+ ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+ qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
+ cid = le32_to_cpu(ooo_opq->cid);
/* Process delete isle first */
- if (iscsi_ooo->drop_size)
+ if (ooo_opq->drop_size)
qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
- iscsi_ooo->drop_isle,
- iscsi_ooo->drop_size);
+ ooo_opq->drop_isle,
+ ooo_opq->drop_size);
- if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+ if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
continue;
/* Now process create/add/join isles */
- if (list_empty(&p_rx->active_descq)) {
+ if (unlikely(list_empty(&p_rx->active_descq))) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX chain has no submitted buffers\n"
);
@@ -682,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
- if (!p_pkt) {
+ if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
+ ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX packet is not valid\n");
return -EIO;
@@ -701,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_chain_consume(&p_rx->rxq_chain);
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- switch (iscsi_ooo->ooo_opcode) {
+ switch (ooo_opq->ooo_opcode) {
case TCP_EVENT_ADD_NEW_ISLE:
qed_ooo_add_new_isle(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer);
break;
case TCP_EVENT_ADD_ISLE_RIGHT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_RIGHT_BUF);
break;
@@ -721,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_LEFT_BUF);
break;
@@ -729,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle +
- 1,
+ ooo_opq->ooo_isle + 1,
p_buffer,
QED_OOO_LEFT_BUF);
qed_ooo_join_isles(p_hwfn,
p_hwfn->p_ooo_info,
- cid, iscsi_ooo->ooo_isle);
+ cid, ooo_opq->ooo_isle);
break;
case TCP_EVENT_ADD_PEN:
num_ooo_add_to_peninsula++;
@@ -747,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
} else {
DP_NOTICE(p_hwfn,
"Unexpected event (%d) TX OOO completion\n",
- iscsi_ooo->ooo_opcode);
+ ooo_opq->ooo_opcode);
}
}
@@ -859,16 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return 0;
- if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
return 0;
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
- if (!num_bds)
+ if (unlikely(!num_bds))
return 0;
while (num_bds) {
@@ -877,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
- if (!p_pkt)
+ if (unlikely(!p_pkt))
return -EINVAL;
- if (p_pkt->bd_used != 1) {
+ if (unlikely(p_pkt->bd_used != 1)) {
DP_NOTICE(p_hwfn,
"Unexpectedly many BDs(%d) in TX OOO completion\n",
p_pkt->bd_used);
@@ -1008,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+ if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1124,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+
qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
/* Get SPQ entry */
@@ -1533,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_info *p_ll2_conn;
struct qed_hwfn *p_hwfn = cxt;
@@ -1544,7 +1569,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
int rc = -EINVAL;
u32 i, capacity;
size_t desc_size;
- u8 qid;
+ u8 qid, stats_id;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
@@ -1610,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
p_ll2_conn->input.rx_conn_type);
+ stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
+ p_ll2_conn->input.rx_conn_type,
+ qid);
p_ll2_conn->queue_id = qid;
- p_ll2_conn->tx_stats_id = qid;
+ p_ll2_conn->tx_stats_id = stats_id;
- DP_VERBOSE(p_hwfn, QED_MSG_LL2,
- "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
- p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+ /* If there is no valid stats id for this connection, disable stats */
+ if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
+ p_ll2_conn->tx_stats_en = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Disabling stats for queue %d - not enough counters\n",
+ qid);
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
+ p_hwfn->rel_pf_id,
+ p_ll2_conn->input.rx_conn_type, qid, stats_id);
if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
- p_rx->set_prod_addr = p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_rx->set_prod_addr =
+ (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_LL2_RX_PRODS, qid);
} else {
/* QED_LL2_RX_TYPE_CTX - using doorbell */
p_rx->ctx_based = 1;
@@ -1762,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt,
}
}
- /* If we're lacking entires, let's try to flush buffers to FW */
+ /* If we're lacking entries, let's try to flush buffers to FW */
if (!p_curp || !p_curb) {
rc = -EBUSY;
p_curp = NULL;
@@ -1842,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
}
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
- p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
+ if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
} else {
@@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt,
int rc = 0;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
+ if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
- if (p_tx->cur_send_packet) {
+ if (unlikely(p_tx->cur_send_packet)) {
rc = -EEXIST;
goto out;
}
/* Get entry, but only if we have tx elements for it */
- if (!list_empty(&p_tx->free_descq))
+ if (unlikely(!list_empty(&p_tx->free_descq)))
p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry);
- if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
+ if (unlikely(p_curp &&
+ qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
p_curp = NULL;
- if (!p_curp) {
+ if (unlikely(!p_curp)) {
rc = -EBUSY;
goto out;
}
@@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt,
unsigned long flags;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
- if (!p_ll2_conn->tx_queue.cur_send_packet)
+ if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
return -EINVAL;
p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
- if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
return -EINVAL;
/* Fill the BD information, and possibly notify FW */
@@ -2609,7 +2651,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
DP_NOTICE(cdev, "Failed to add an LLH filter\n");
goto err3;
}
-
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2651,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1 + nr_frags);
return -EINVAL;
@@ -2693,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
&pkt, 1);
- if (rc)
+ if (unlikely(rc))
goto err;
for (i = 0; i < nr_frags; i++) {
@@ -2717,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
/* if failed not much to do here, partial packet has been posted
* we can't free memory, will need to wait for completion
*/
- if (rc)
+ if (unlikely(rc))
goto err2;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index df88d00053a2..0bfc375161ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -32,7 +32,6 @@
#define QED_LL2_LEGACY_CONN_BASE_PF 0
#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
-
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -119,41 +118,41 @@ struct qed_ll2_info {
extern const struct qed_ll2_ops qed_ll2_ops_pass;
/**
- * @brief qed_ll2_acquire_connection - allocate resources,
- * starts rx & tx (if relevant) queues pair. Provides
- * connecion handler as output parameter.
+ * qed_ll2_acquire_connection(): Allocate resources,
+ * starts rx & tx (if relevant) queues pair.
+ * Provides connecion handler as output
+ * parameter.
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @data: Describes connection parameters.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param data - describes connection parameters
- * @return int
+ * Return: Int.
*/
int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
/**
- * @brief qed_ll2_establish_connection - start previously
- * allocated LL2 queues pair
+ * qed_ll2_establish_connection(): start previously allocated LL2 queues pair
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param p_ptt
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param addr rx (physical address) buffers to submit
- * @param cookie
- * @param notify_fw produce corresponding Rx BD immediately
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: RX (physical address) buffers to submit.
+ * @buf_len: Buffer Len.
+ * @cookie: Cookie.
+ * @notify_fw: Produce corresponding Rx BD immediately.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
@@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt,
u16 buf_len, void *cookie, u8 notify_fw);
/**
- * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
- * to prepare Tx packet submission to FW.
+ * qed_ll2_prepare_tx_packet(): Request for start Tx BD
+ * to prepare Tx packet submission to FW.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle
- * @param pkt - info regarding the tx packet
- * @param notify_fw - issue doorbell to fw for this packet
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: Connection handle.
+ * @pkt: Info regarding the tx packet.
+ * @notify_fw: Issue doorbell to fw for this packet.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
@@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt,
bool notify_fw);
/**
- * @brief qed_ll2_release_connection - releases resources
- * allocated for LL2 connection
+ * qed_ll2_release_connection(): Releases resources allocated for LL2
+ * connection.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * Return: Void.
*/
void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
- * Tx BD of BDs requested by
- * qed_ll2_prepare_tx_packet
+ * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
- * @param addr
- * @param nbytes
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: Address.
+ * @nbytes: Number of bytes.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes);
/**
- * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
- *
+ * qed_ll2_terminate_connection(): Stops Tx/Rx queues
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ * qed_ll2_get_stats(): Get LL2 queue's statistics
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @p_stats: Pointer Status.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param p_stats
- *
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats);
/**
- * @brief qed_ll2_alloc - Allocates LL2 connections set
+ * qed_ll2_alloc(): Allocates LL2 connections set.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_setup - Inits LL2 connections set
+ * qed_ll2_setup(): Inits LL2 connections set.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_free - Releases LL2 connections set
+ * qed_ll2_free(): Releases LL2 connections set
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index d10e1cd6d2ba..7673b3e07736 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = {
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
};
-static const u32 qed_mfw_ext_20g[] __initconst = {
- ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
-};
-
static const u32 qed_mfw_ext_25g[] __initconst = {
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
@@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
- QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
@@ -262,7 +257,7 @@ module_exit(qed_exit);
/* Check if the DMA controller on the machine can properly handle the DMA
* addressing required by the device.
-*/
+ */
static int qed_set_coherency_mask(struct qed_dev *cdev)
{
struct device *dev = &cdev->pdev->dev;
@@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2;
}
- DP_INFO(cdev, "qed_probe completed successfully\n");
+ DP_INFO(cdev, "%s completed successfully\n", __func__);
return cdev;
@@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
rc = qed_set_int_mode(cdev, false);
if (rc) {
- DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ DP_ERR(cdev, "%s ERR\n", __func__);
return rc;
}
@@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
/* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
@@ -1382,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
- strlcpy(drv_version.name, params->name,
+ strscpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
@@ -2892,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active)
return status;
}
-static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
@@ -3079,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ);
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
@@ -3159,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
return 0;
}
+
+unsigned long qed_get_epoch_time(void)
+{
+ return ktime_get_real_seconds();
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 24cd41567775..64678a256f3b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -17,6 +17,7 @@
#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
+#include "qed_mfw_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -30,11 +31,11 @@
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
- qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
_val)
#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
- qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
@@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
/* Get the union data */
- if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb,
union_data);
@@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb, union_data);
memset(&union_data, 0, sizeof(union_data));
- if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ if (p_mb_params->p_data_src && p_mb_params->data_src_size)
memcpy(&union_data, p_mb_params->p_data_src,
p_mb_params->data_src_size);
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
@@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep)
{
struct qed_mcp_mb_params mb_params;
u8 raw_data[MCP_DRV_NVM_BUF_LEN];
@@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
/* Use the maximal value since the actual one is part of the response */
mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+ if (b_can_sleep)
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -916,7 +920,6 @@ enum qed_load_req_force {
};
static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
-
enum qed_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
@@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = 0;
if (params->ext_speed.autoneg)
- ext_speed |= ETH_EXT_SPEED_AN;
+ ext_speed |= ETH_EXT_SPEED_NONE;
val = params->ext_speed.forced_speed;
if (val & QED_EXT_SPEED_1G)
ext_speed |= ETH_EXT_SPEED_1G;
if (val & QED_EXT_SPEED_10G)
ext_speed |= ETH_EXT_SPEED_10G;
- if (val & QED_EXT_SPEED_20G)
- ext_speed |= ETH_EXT_SPEED_20G;
if (val & QED_EXT_SPEED_25G)
ext_speed |= ETH_EXT_SPEED_25G;
if (val & QED_EXT_SPEED_40G)
@@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
ext_speed |= ETH_EXT_ADV_SPEED_1G;
if (val & QED_EXT_SPEED_MASK_10G)
ext_speed |= ETH_EXT_ADV_SPEED_10G;
- if (val & QED_EXT_SPEED_MASK_20G)
- ext_speed |= ETH_EXT_ADV_SPEED_20G;
if (val & QED_EXT_SPEED_MASK_25G)
ext_speed |= ETH_EXT_ADV_SPEED_25G;
if (val & QED_EXT_SPEED_MASK_40G)
@@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- u32 global_offsize;
+ u32 global_offsize, public_base;
if (IS_VF(p_hwfn->cdev)) {
if (p_hwfn->vf_iov_info) {
@@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
}
}
+ public_base = p_hwfn->mcp_info->public_base;
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->
- mcp_info->public_base,
+ SECTION_OFFSIZE_ADDR(public_base,
PUBLIC_GLOBAL));
*p_mfw_ver =
qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
- if (p_running_bundle_id != NULL) {
+ if (p_running_bundle_id) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
@@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
return 0;
}
+
static bool qed_is_transceiver_ready(u32 transceiver_state,
u32 transceiver_type)
{
@@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"According to Legacy capabilities, L2 personality is %08x\n",
- (u32) *p_proto);
+ (u32)*p_proto);
}
static int
@@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFUP,
"According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
- (u32) *p_proto, resp, param);
+ (u32)*p_proto, resp, param);
return 0;
}
@@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
- case FUNC_MF_CFG_PROTOCOL_NVMETCP:
- *p_proto = QED_PCI_NVMETCP;
- break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
@@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
}
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac)
+ struct qed_ptt *p_ptt, const u8 *mac)
{
struct qed_mcp_mb_params mb_params;
u32 mfw_mac[2];
@@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), false);
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
@@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
}
/* This can be a lengthy process, and it's possible scheduler
- * isn't preemptable. Sleep a bit to prevent CPU hogging.
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
(bytes_left - read_len) % 0x1000)
@@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
* to be delivered to MFW.
*/
if (param && cmd == QED_PUT_FILE_DATA) {
- buf_idx = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
- buf_size = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ buf_idx =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
} else {
buf_idx += buf_size;
buf_size = min_t(u32, (len - buf_idx),
@@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_TRANSCEIVER_READ,
nvm_offset, &resp, &param, &buf_size,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), true);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send a transceiver read command to the MFW. rc = %d.\n",
@@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
DRV_MSG_CODE_BIST_TEST, param,
&resp, &resp_param,
&buf_size,
- (u32 *)p_image_att);
+ (u32 *)p_image_att, false);
if (rc)
return rc;
@@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
type = NVM_TYPE_DEFAULT_CFG;
break;
case QED_NVM_IMAGE_NVM_META:
- type = NVM_TYPE_META;
+ type = NVM_TYPE_NVM_META;
break;
default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
@@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
- if (QED_IS_E5(p_hwfn->cdev))
- features |=
- DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
-
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
@@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_GET_NVM_CFG_OPTION,
- mb_param, &resp, &param, p_len, (u32 *)p_buf);
+ mb_param, &resp, &param, p_len,
+ (u32 *)p_buf, false);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 8edb450d0abf..564723800d15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -266,97 +266,97 @@ union qed_mfw_tlv_data {
#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
/**
- * @brief - returns the link params of the hw function
+ * qed_mcp_get_link_params(): Returns the link params of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link params
+ * Returns: Pointer to link params.
*/
-struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link state of the hw function
+ * qed_mcp_get_link_state(): Return the link state of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link state
+ * Returns: Pointer to link state.
*/
-struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link capabilities of the hw function
+ * qed_mcp_get_link_capabilities(): Return the link capabilities of the
+ * hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link capabilities
+ * Returns: Pointer to link capabilities.
*/
struct qed_mcp_link_capabilities
*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
/**
- * @brief Request the MFW to set the the link according to 'link_input'.
+ * qed_mcp_set_link(): Request the MFW to set the link according
+ * to 'link_input'.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_up - raise link if `true'. Reset link if `false'.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_up: Raise link if `true'. Reset link if `false'.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_up);
/**
- * @brief Get the management firmware version value
+ * qed_mcp_get_mfw_ver(): Get the management firmware version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mfw_ver - mfw version value
- * @param p_running_bundle_id - image id in nvram; Optional.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mfw_ver: MFW version value.
+ * @p_running_bundle_id: Image id in nvram; Optional.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
- * @brief Get the MBI version value
+ * qed_mcp_get_mbi_ver(): Get the MBI version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver);
/**
- * @brief Get media type value of the port.
+ * qed_mcp_get_media_type(): Get media type value of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param mfw_ver - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @media_type: Media type value
*
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *media_type);
/**
- * @brief Get transceiver data of the port.
+ * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_transceiver_state - transceiver state.
- * @param p_transceiver_type - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_transceiver_state: Transceiver state.
+ * @p_tranceiver_type: Media type value.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
u32 *p_tranceiver_type);
/**
- * @brief Get transceiver supported speed mask.
+ * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_speed_mask - Bit mask of all supported speeds.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_speed_mask: Bit mask of all supported speeds.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask);
/**
- * @brief Get board configuration.
+ * qed_mcp_get_board_config(): Get board configuration.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_board_config - Board config.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_board_config: Board config.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * @brief General function for sending commands to the MCP
- * mailbox. It acquire mutex lock for the entire
- * operation, from sending the request until the MCP
- * response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * qed_mcp_cmd(): General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param cmd - command to be sent to the MCP.
- * @param param - Optional param
- * @param o_mcp_resp - The MCP response code (exclude sequence).
- * @param o_mcp_param- Optional parameter provided by the MCP
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
* response
- * @return int - 0 - operation
- * was successul.
+ *
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
- * @brief - drains the nig, allowing completion to pass in case of pauses.
- * (Should be called only from sleepable context)
+ * qed_mcp_drain(): drains the nig, allowing completion to pass in
+ * case of pauses.
+ * (Should be called only from sleepable context)
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
*/
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Get the flash size value
+ * qed_mcp_get_flash_size(): Get the flash size value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_flash_size - flash size in bytes to be filled.
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_flash_size: Flash size in bytes to be filled.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size);
/**
- * @brief Send driver version to MFW
+ * qed_mcp_send_drv_version(): Send driver version to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param version - Version value
- * @param name - Protocol driver name
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_ver: Version value.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
@@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
- * @brief Read the MFW process kill counter
+ * qed_get_process_kill_counter(): Read the MFW process kill counter.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return u32
+ * Return: u32.
*/
u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Trigger a recovery process
+ * qed_start_recovery_process(): Trigger a recovery process.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief A recovery handler must call this function as its first step.
- * It is assumed that the handler is not run from an interrupt context.
+ * qed_recovery_prolog(): A recovery handler must call this function
+ * as its first step.
+ * It is assumed that the handler is not run from
+ * an interrupt context.
*
- * @param cdev
- * @param p_ptt
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_recovery_prolog(struct qed_dev *cdev);
/**
- * @brief Notify MFW about the change in base device properties
+ * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
+ * device properties
*
- * @param p_hwfn
- * @param p_ptt
- * @param client - qed client type
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @client: Qed client type.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client);
/**
- * @brief Notify MFW about the driver state
+ * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
*
- * @param p_hwfn
- * @param p_ptt
- * @param drv_state - Driver state
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @drv_state: Driver state.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state);
/**
- * @brief Send MTU size to MFW
+ * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mtu - MTU size
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mtu: MTU size.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu);
/**
- * @brief Send MAC address to MFW
+ * qed_mcp_ov_update_mac(): Send MAC address to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mac - MAC address
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mac: MAC address.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac);
+ struct qed_ptt *p_ptt, const u8 *mac);
/**
- * @brief Send WOL mode to MFW
+ * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param wol - WOL mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @wol: WOL mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_wol wol);
/**
- * @brief Set LED status
+ * qed_mcp_set_led(): Set LED status.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mode - LED mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mode: LED mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
/**
- * @brief Read from nvm
+ * qed_mcp_nvm_read(): Read from NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param p_buf - nvm read buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @p_buf: NVM read buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Write to nvm
+ * qed_mcp_nvm_write(): Write to NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param cmd - nvm command
- * @param p_buf - nvm write buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @cmd: NVM command.
+ * @p_buf: NVM write buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Check latest response
+ * qed_mcp_nvm_resp(): Check latest response.
*
- * @param cdev
- * @param p_buf - nvm write buffer
+ * @cdev: Qed dev pointer.
+ * @p_buf: NVM write buffer.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
@@ -604,13 +606,13 @@ struct qed_nvm_image_att {
};
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image to get attributes for
- * @param p_image_att - image attributes structure into which to fill data
+ * @p_hwfn: HW device data.
+ * @image_id: Image to get attributes for.
+ * @p_image_att: Image attributes structure into which to fill data.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
@@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att);
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image requested for reading
- * @param p_buffer - allocated buffer into which to fill data
- * @param buffer_len - length of the allocated buffer.
+ * @p_hwfn: HW device data.
+ * @image_id: image requested for reading.
+ * @p_buffer: allocated buffer into which to fill data.
+ * @buffer_len: length of the allocated buffer.
*
- * @return 0 iff p_buffer now contains the nvram image.
+ * Return: 0 if p_buffer now contains the nvram image.
*/
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
/**
- * @brief Bist register test
+ * qed_mcp_bist_register_test(): Bist register test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist clock test
+ * qed_mcp_bist_clock_test(): Bist clock test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist nvm test - get number of images
+ * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param num_images - number of images if operation was
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @num_images: number of images if operation was
* successful. 0 if not.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images);
/**
- * @brief Bist nvm test - get image attributes by index
+ * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
+ * by index.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_image_att - Attributes of image
- * @param image_index - Index of image to get information for
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_image_att: Attributes of image.
+ * @image_index: Index of image to get information for.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
u32 image_index);
/**
- * @brief - Processes the TLV request from MFW i.e., get the required TLV info
- * from the qed client and send it to the MFW.
+ * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
+ * get the required TLV info
+ * from the qed client and send it to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Send raw debug data to the MFW
+ * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_buf: raw debug data buffer.
+ * @size: Buffer size.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_buf - raw debug data buffer
- * @param size - buffer size
+ * Return : Int.
*/
int
qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
@@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
}
/**
- * @brief Initialize the interface with the MCP
+ * qed_mcp_cmd_init(): Initialize the interface with the MCP.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Initialize the port interface with the MCP
+ * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Releases resources allocated during the init process.
+ * qed_mcp_free(): Releases resources allocated during the init process.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW function.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_free(struct qed_hwfn *p_hwfn);
/**
- * @brief This function is called from the DPC context. After
- * pointing PTT to the mfw mb, check for events sent by the MCP
- * to the driver and ack them. In case a critical event
- * detected, it will be handled here, otherwise the work will be
- * queued to a sleepable work-queue.
+ * qed_mcp_handle_events(): This function is called from the DPC context.
+ * After pointing PTT to the mfw mb, check for events sent by
+ * the MCP to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @p_hwfn: HW function.
+ * @p_ptt: PTT required for register access.
*
- * @param p_hwfn - HW function
- * @param p_ptt - PTT required for register access
- * @return int - 0 - operation
- * was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -858,169 +866,177 @@ struct qed_load_req_params {
};
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
- * returns whether this PF is the first on the engine/port or function.
+ * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
+ * operation succeeds, returns whether this PF is
+ * the first on the engine/port or function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_load_req_params *p_params);
/**
- * @brief Sends a LOAD_DONE message to the MFW
+ * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_REQ message to the MFW
+ * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_DONE message to the MFW
+ * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read the MFW mailbox into Current buffer.
+ * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Ack to mfw that driver finished FLR process for VFs
+ * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
*
- * @param return int - 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/**
- * @brief - calls during init to read shmem of all function-related info.
+ * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
+ * all function-related info.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Reset the MCP using mailbox command.
+ * qed_mcp_reset(): Reset the MCP using mailbox command.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Sends an NVM read command request to the MFW to get
- * a buffer.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
- * DRV_MSG_CODE_NVM_READ_NVRAM commands
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param o_txn_size - Buffer size output
- * @param o_buf - Pointer to the buffer returned by the MFW.
+ * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
+ * a buffer.
*
- * @param return 0 upon success.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands.
+ * @param: [0:23] - Offset [24:31] - Size.
+ * @o_mcp_resp: MCP response.
+ * @o_mcp_param: MCP response param.
+ * @o_txn_size: Buffer size output.
+ * @o_buf: Pointer to the buffer returned by the MFW.
+ * @b_can_sleep: Can sleep.
+ *
+ * Return: 0 upon success.
*/
int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep);
/**
- * @brief Read from sfp
+ * qed_mcp_phy_sfp_read(): Read from sfp.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param port - transceiver port
- * @param addr - I2C address
- * @param offset - offset in sfp
- * @param len - buffer length
- * @param p_buf - buffer to read into
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @port: transceiver port.
+ * @addr: I2C address.
+ * @offset: offset in sfp.
+ * @len: buffer length.
+ * @p_buf: buffer to read into.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
/**
- * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
+ * are accessible
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return true iff MFW is running and mcp_info is initialized
+ * Return: true if MFW is running and mcp_info is initialized.
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
- * @brief request MFW to configure MSI-X for a VF
+ * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param vf_id - absolute inside engine
- * @param num_sbs - number of entries to request
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vf_id: absolute inside engine.
+ * @num: number of entries to request.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
/**
- * @brief - Halt the MCP.
+ * qed_mcp_halt(): Halt the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief - Wake up the MCP.
+ * qed_mcp_resume: Wake up the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
-/* @brief - Gets the mdump retained data from the MFW.
+/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mdump_retain
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mdump_retain: mdump retain.
*
- * @param return 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
@@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
struct mdump_retain_data_stc *p_mdump_retain);
/**
- * @brief - Sets the MFW's max value for the given resource
+ * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param resc_max_val
- * @param p_mcp_resp
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: RES ID.
+ * @resc_max_val: Resec max val.
+ * @p_mcp_resp: MCP Resp
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
@@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
u32 resc_max_val, u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
+ * resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param p_mcp_resp
- * @param p_resc_num
- * @param p_resc_start
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: Res ID.
+ * @p_mcp_resp: MCP resp.
+ * @p_resc_num: Resc num.
+ * @p_resc_start: Resc start.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
@@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
/**
- * @brief Send eswitch mode to MFW
+ * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param eswitch - eswitch mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @eswitch: eswitch mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1113,12 +1130,12 @@ enum qed_resc_lock {
};
/**
- * @brief - Initiates PF FLR
+ * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_resc_lock_params {
@@ -1151,13 +1168,13 @@ struct qed_resc_lock_params {
};
/**
- * @brief Acquires MFW generic resource lock
+ * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
@@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params {
};
/**
- * @brief Releases MFW generic resource lock
+ * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
@@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_resc_unlock_params *p_params);
/**
- * @brief - default initialization for lock/unlock resource structs
+ * qed_mcp_resc_lock_default_init(): Default initialization for
+ * lock/unlock resource structs.
+ *
+ * @p_lock: lock params struct to be initialized; Can be NULL.
+ * @p_unlock: unlock params struct to be initialized; Can be NULL.
+ * @resource: the requested resource.
+ * @b_is_permanent: disable retries & aging when set.
*
- * @param p_lock - lock params struct to be initialized; Can be NULL
- * @param p_unlock - unlock params struct to be initialized; Can be NULL
- * @param resource - the requested resource
- * @paral b_is_permanent - disable retries & aging when set
+ * Return: Void.
*/
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
@@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
resource, bool b_is_permanent);
/**
- * @brief - Return whether management firmware support smart AN
+ * qed_mcp_is_smart_an_supported(): Return whether management firmware
+ * support smart AN
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return bool - true if feature is supported.
+ * Return: bool true if feature is supported.
*/
bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
/**
- * @brief Learn of supported MFW features; To be done during early init
+ * qed_mcp_get_capabilities(): Learn of supported MFW features;
+ * To be done during early init.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Inform MFW of set of features supported by driver. Should be done
- * inside the content of the LOAD_REQ.
+ * qed_mcp_set_capabilities(): Inform MFW of set of features supported
+ * by driver. Should be done inside the content
+ * of the LOAD_REQ.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read ufp config from the shared memory.
+ * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Populate the nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
/**
- * @brief Delete nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
/**
- * @brief Get the engine affinity configuration.
+ * qed_mcp_get_engine_config(): Get the engine affinity configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get the PPFID bitmap.
+ * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get NVM config attribute value.
+ * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param p_len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @p_len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 *p_len);
/**
- * @brief Set NVM config attribute value.
+ * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
new file mode 100644
index 000000000000..8a0e3c5d4bda
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -0,0 +1,2474 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_MFW_HSI_H
+#define _QED_MFW_HSI_H
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
+#define VF_MAX_STATIC 192
+#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32)
+#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+
+#define EXT_VF_MAX_STATIC 240
+#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1)
+#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+#define ADDED_VF_BITMAP_SIZE 2
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+#define SECTION_OFFSET(_offsize) (((((_offsize) & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET((_offsize)) + \
+ (QED_SECTION_SIZE((_offsize)) * (idx)))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ ((_pub_base) + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0x0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE 0x0
+#define ETH_LOOPBACK_INT_PHY 0x1
+#define ETH_LOOPBACK_EXT_PHY 0x2
+#define ETH_LOOPBACK_EXT 0x3
+#define ETH_LOOPBACK_MAC 0x4
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
+#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
+
+ u32 eee_cfg;
+#define EEE_CFG_EEE_ENABLED BIT(0)
+#define EEE_CFG_TX_LPI BIT(1)
+#define EEE_CFG_ADV_SPEED_1G BIT(2)
+#define EEE_CFG_ADV_SPEED_10G BIT(3)
+#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
+#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
+
+ u32 link_modes;
+
+ u32 fec_mode;
+#define FEC_FORCE_MODE_MASK 0x000000ff
+#define FEC_FORCE_MODE_OFFSET 0
+#define FEC_FORCE_MODE_NONE 0x00
+#define FEC_FORCE_MODE_FIRECODE 0x01
+#define FEC_FORCE_MODE_RS 0x02
+#define FEC_FORCE_MODE_AUTO 0x07
+#define FEC_EXTENDED_MODE_MASK 0xffffff00
+#define FEC_EXTENDED_MODE_OFFSET 8
+#define ETH_EXT_FEC_NONE 0x00000000
+#define ETH_EXT_FEC_10G_NONE 0x00000100
+#define ETH_EXT_FEC_10G_BASE_R 0x00000200
+#define ETH_EXT_FEC_25G_NONE 0x00000400
+#define ETH_EXT_FEC_25G_BASE_R 0x00000800
+#define ETH_EXT_FEC_25G_RS528 0x00001000
+#define ETH_EXT_FEC_40G_NONE 0x00002000
+#define ETH_EXT_FEC_40G_BASE_R 0x00004000
+#define ETH_EXT_FEC_50G_NONE 0x00008000
+#define ETH_EXT_FEC_50G_BASE_R 0x00010000
+#define ETH_EXT_FEC_50G_RS528 0x00020000
+#define ETH_EXT_FEC_50G_RS544 0x00040000
+#define ETH_EXT_FEC_100G_NONE 0x00080000
+#define ETH_EXT_FEC_100G_BASE_R 0x00100000
+#define ETH_EXT_FEC_100G_RS528 0x00200000
+#define ETH_EXT_FEC_100G_RS544 0x00400000
+
+ u32 extended_speed;
+#define ETH_EXT_SPEED_MASK 0x0000ffff
+#define ETH_EXT_SPEED_OFFSET 0
+#define ETH_EXT_SPEED_NONE 0x00000001
+#define ETH_EXT_SPEED_1G 0x00000002
+#define ETH_EXT_SPEED_10G 0x00000004
+#define ETH_EXT_SPEED_25G 0x00000008
+#define ETH_EXT_SPEED_40G 0x00000010
+#define ETH_EXT_SPEED_50G_BASE_R 0x00000020
+#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040
+#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080
+#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100
+#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200
+#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000
+#define ETH_EXT_ADV_SPEED_OFFSET 16
+#define ETH_EXT_ADV_SPEED_1G 0x00010000
+#define ETH_EXT_ADV_SPEED_10G 0x00020000
+#define ETH_EXT_ADV_SPEED_25G 0x00040000
+#define ETH_EXT_ADV_SPEED_40G 0x00080000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000
+#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+